repository_name stringclasses 316 values | func_path_in_repository stringlengths 6 223 | func_name stringlengths 1 134 | language stringclasses 1 value | func_code_string stringlengths 57 65.5k | func_documentation_string stringlengths 1 46.3k | split_name stringclasses 1 value | func_code_url stringlengths 91 315 | called_functions listlengths 1 156 ⌀ | enclosing_scope stringlengths 2 1.48M |
|---|---|---|---|---|---|---|---|---|---|
konstantint/PassportEye | passporteye/util/geometry.py | RotatedBox.extract_from_image | python | def extract_from_image(self, img, scale=1.0, margin_width=5, margin_height=5):
rotate_by = (np.pi/2 - self.angle)*180/np.pi
img_rotated = transform.rotate(img, angle=rotate_by, center=[self.center[1]*scale, self.center[0]*scale], resize=True)
# The resizeable transform will shift the resulting image somewhat wrt original coordinates.
# When we cut out the box we will compensate for this shift.
shift_c, shift_r = self._compensate_rotation_shift(img, scale)
r1 = max(int((self.center[0] - self.height/2 - margin_height)*scale - shift_r), 0)
r2 = int((self.center[0] + self.height/2 + margin_height)*scale - shift_r)
c1 = max(int((self.center[1] - self.width/2 - margin_width)*scale - shift_c), 0)
c2 = int((self.center[1] + self.width/2 + margin_width)*scale - shift_c)
return img_rotated[r1:r2, c1:c2] | Extracts the contents of this box from a given image.
For that the image is "unrotated" by the appropriate angle, and the corresponding part is extracted from it.
Returns an image with dimensions height*scale x width*scale.
Note that the box coordinates are interpreted as "image coordinates" (i.e. x is row and y is column),
and box angle is considered to be relative to the vertical (i.e. np.pi/2 is "normal orientation")
:param img: a numpy ndarray suitable for image processing via skimage.
:param scale: the RotatedBox is scaled by this value before performing the extraction.
This is necessary when, for example, the location of a particular feature is determined using a smaller image,
yet then the corresponding area needs to be extracted from the original, larger image.
The scale parameter in this case should be width_of_larger_image/width_of_smaller_image.
:param margin_width: The margin that should be added to the width dimension of the box from each size.
This value is given wrt actual box dimensions (i.e. not scaled).
:param margin_height: The margin that should be added to the height dimension of the box from each side.
:return: a numpy ndarray, corresponding to the extracted region (aligned straight).
TODO: This could be made more efficient if we avoid rotating the full image and cut out the ROI from it beforehand. | train | https://github.com/konstantint/PassportEye/blob/b32afba0f5dc4eb600c4edc4f49e5d49959c5415/passporteye/util/geometry.py#L119-L149 | [
"def _compensate_rotation_shift(self, img, scale):\n \"\"\"This is an auxiliary method used by extract_from_image.\n It is needed due to particular specifics of the skimage.transform.rotate implementation.\n Namely, when you use rotate(... , resize=True), the rotated image is rotated and shifted by certain... | class RotatedBox(object):
"""
RotatedBox represents a rectangular box centered at (cx,cy) with dimensions width x height,
rotated by angle radians counterclockwise.
>>> RotatedBox.from_points([[0,0], [2,1], [0,1], [2,0]])
RotatedBox(cx=1.0, cy=0.5, width=2.0, height=1.0, angle=0.0)
"""
def __init__(self, center, width, height, angle, points=None):
"""Creates a new RotatedBox.
:param points: This parameter may be used to indicate the set of points used to create the box.
"""
self.center = np.asfarray(center)
self.width = width
self.height = height
self.angle = angle
self.points = points
def __repr__(self):
return "RotatedBox(cx={0}, cy={1}, width={2}, height={3}, angle={4})".format(self.cx, self.cy, self.width, self.height, self.angle)
@property
def cx(self):
return self.center[0]
@property
def cy(self):
return self.center[1]
@property
def area(self):
return self.width * self.height
def approx_equal(self, center, width, height, angle, tol=1e-6):
"Method mainly useful for testing"
return abs(self.cx - center[0]) < tol and abs(self.cy - center[1]) < tol and abs(self.width - width) < tol and \
abs(self.height - height) < tol and abs(self.angle - angle) < tol
def rotated(self, rotation_center, angle):
"""Returns a RotatedBox that is obtained by rotating this box around a given center by a given angle.
>>> assert RotatedBox([2, 2], 2, 1, 0.1).rotated([1, 1], np.pi/2).approx_equal([0, 2], 2, 1, np.pi/2+0.1)
"""
rot = np.array([[np.cos(angle), np.sin(angle)], [-np.sin(angle), np.cos(angle)]])
t = np.asfarray(rotation_center)
new_c = np.dot(rot.T, (self.center - t)) + t
return RotatedBox(new_c, self.width, self.height, (self.angle+angle) % (np.pi*2))
def as_poly(self, margin_width=0, margin_height=0):
"""Converts this box to a polygon, i.e. 4x2 array, representing the four corners starting from lower left to upper left counterclockwise.
:param margin_width: The additional "margin" that will be added to the box along its width dimension (from both sides) before conversion.
:param margin_height: The additional "margin" that will be added to the box along its height dimension (from both sides) before conversion.
>>> RotatedBox([0, 0], 4, 2, 0).as_poly()
array([[-2., -1.],
[ 2., -1.],
[ 2., 1.],
[-2., 1.]])
>>> RotatedBox([0, 0], 4, 2, np.pi/4).as_poly()
array([[-0.707..., -2.121...],
[ 2.121..., 0.707...],
[ 0.707..., 2.121...],
[-2.121..., -0.707...]])
>>> RotatedBox([0, 0], 4, 2, np.pi/2).as_poly()
array([[ 1., -2.],
[ 1., 2.],
[-1., 2.],
[-1., -2.]])
>>> RotatedBox([0, 0], 0, 0, np.pi/2).as_poly(2, 1)
array([[ 1., -2.],
[ 1., 2.],
[-1., 2.],
[-1., -2.]])
"""
v_hor = (self.width/2 + margin_width)*np.array([np.cos(self.angle), np.sin(self.angle)])
v_vert = (self.height/2 + margin_height)*np.array([-np.sin(self.angle), np.cos(self.angle)])
c = np.array([self.cx, self.cy])
return np.vstack([c - v_hor - v_vert, c + v_hor - v_vert, c + v_hor + v_vert, c - v_hor + v_vert])
def plot(self, mode='image', ax=None, **kwargs):
"""Visualize the box on a matplotlib plot.
:param mode: How should the box coordinates and angle be interpreted.
- mode `'image'` corresponds to the situation where x coordinate of the box
denotes the "row of an image" (ie. the Y coordinate of the plot, arranged downwards)
and y coordinate of the box corresponds to the "column of an image",
(ie X coordinate of the plot). In other words, box's x goes downwards and y - rightwards.
- mode `'math'` corresponds to the "mathematics" situation where box's x and y correspond to the X and Y axes of the plot.
:param ax: the matplotlib axis to draw on. If unspecified, the current axis is used.
:param kwargs: arguments passed to the matplotlib's `Polygon` patch object. By default, fill is set to False, color to red and lw to 2.
:return: The created Polygon object.
"""
ax = ax or plt.gca()
poly = self.as_poly()
if mode == 'image':
poly = poly[:,[1,0]]
kwargs.setdefault('fill', False)
kwargs.setdefault('color', 'r')
kwargs.setdefault('lw', 2)
p = patches.Polygon(poly, **kwargs)
ax.add_patch(p)
return p
def _compensate_rotation_shift(self, img, scale):
"""This is an auxiliary method used by extract_from_image.
It is needed due to particular specifics of the skimage.transform.rotate implementation.
Namely, when you use rotate(... , resize=True), the rotated image is rotated and shifted by certain amount.
Thus when we need to cut out the box from the image, we need to account for this shift.
We do this by repeating the computation from skimage.transform.rotate here.
TODO: This makes the code uncomfortably coupled to SKImage (e.g. this logic is appropriate for skimage 0.12.1, but not for 0.11,
and no one knows what happens in later versions). A solution would be to use skimage.transform.warp with custom settings, but we can think of it later.
"""
ctr = np.asarray([self.center[1]*scale, self.center[0]*scale])
tform1 = transform.SimilarityTransform(translation=ctr)
tform2 = transform.SimilarityTransform(rotation=np.pi/2 - self.angle)
tform3 = transform.SimilarityTransform(translation=-ctr)
tform = tform3 + tform2 + tform1
rows, cols = img.shape[0], img.shape[1]
corners = np.array([
[0, 0],
[0, rows - 1],
[cols - 1, rows - 1],
[cols - 1, 0]
])
corners = tform.inverse(corners)
minc = corners[:, 0].min()
minr = corners[:, 1].min()
maxc = corners[:, 0].max()
maxr = corners[:, 1].max()
# SKImage 0.11 version
out_rows = maxr - minr + 1
out_cols = maxc - minc + 1
# fit output image in new shape
return ((cols - out_cols) / 2., (rows - out_rows) / 2.)
@staticmethod
def from_points(points, box_type='bb'):
"""
Interpret a given point cloud as a RotatedBox, using PCA to determine the potential orientation (the longest component becomes width)
This is basically an approximate version of a min-area-rectangle algorithm.
TODO: Test whether using a true min-area-rectangle algorithm would be more precise or faster.
:param points: An n x 2 numpy array of coordinates.
:param box_type: The kind of method used to estimate the "box".
Possible values:
- `'bb'`, denoting the "bounding box" approach (min/max coordinates of the points correspond to box limits)
- `'mrz`, denoting a slightly modified technique, suited for MRZ zone detection from contour images.
Here the assumption is that the upper and lower bounds of the box are better estimated as the
10% and 90% quantile of the corresponding coordinates (rather than 0% and 100%, i.e. min and max).
This helps against accidental noise in the contour.
The `'mrz'` correction is only applied when there are at least 10 points in the set.
:returns: a RotatedBox, bounding the given set of points, oriented according to the principal components.
>>> RotatedBox.from_points([[0,0]])
RotatedBox(cx=0.0, cy=0.0, width=0.0, height=0.0, angle=0.0)
>>> assert RotatedBox.from_points([[0,0], [1,1], [2,2]]).approx_equal([1, 1], np.sqrt(8), 0, np.pi/4)
>>> assert RotatedBox.from_points([[0,0], [1,1], [0,1], [1,0]]).approx_equal([0.5, 0.5], 1, 1, 0.0) # The angle is rather arbitrary here
>>> assert RotatedBox.from_points([[0,0], [2,1], [0,1], [2,0]]).approx_equal([1, 0.5], 2, 1, 0)
>>> assert RotatedBox.from_points([[0,0], [2,4], [0,4], [2,0]]).approx_equal([1, 2], 4, 2, np.pi/2)
>>> assert RotatedBox.from_points([[0,0], [1,1.5], [2,0]]).approx_equal([1, 0.75], 2, 1.5, 0)
>>> assert RotatedBox.from_points([[0,0], [0,1], [1,1]]).approx_equal([0.25, 0.75], np.sqrt(2), np.sqrt(2)/2, np.pi/4)
"""
points = np.asfarray(points)
if points.shape[0] == 1:
return RotatedBox(points[0], width=0.0, height=0.0, angle=0.0, points=points)
m = PCA(2).fit(points)
# Find the angle
angle = (np.arctan2(m.components_[0,1], m.components_[0,0]) % np.pi)
if abs(angle - np.pi) < angle:
# Here the angle is always between -pi and pi
# If the principal component happened to be oriented so that the angle happens to be > pi/2 by absolute value,
# we flip the direction
angle = angle - np.pi if angle > 0 else angle + np.pi
points_transformed = m.transform(points)
ll = np.min(points_transformed, 0)
ur = np.max(points_transformed, 0)
wh = ur - ll
# Now compute and return the bounding box
if box_type == 'bb' or (box_type == 'mrz' and points.shape[0] < 10):
# We know that if we rotate the points around m.mean_, we get a box with bounds ur and ll
# The center of this box is (ur+ll)/2 + mean, which is not the same as the mean,
# hence to get the center of the original box we need to "unrotate" this box back.
return RotatedBox(np.dot(m.components_.T, (ll+ur)/2) + m.mean_, width=wh[0], height=wh[1], angle=angle, points=points)
elif box_type == 'mrz':
# When working with MRZ detection from contours, we may have minor "bumps" in the contour,
# that should be ignored at least along the long ("horizontal") side.
# To do that, we will use 10% and 90% quantiles as the bounds of the box instead of the max and min.
# We drop all points which lie beyond and simply repeat the estimation (now 'bb-style') without them.
h_coord = sorted(points_transformed[:,1])
n = len(h_coord)
bottom, top = h_coord[n/10], h_coord[n*9/10]
valid_points = np.logical_and(points_transformed[:,1]>=bottom, points_transformed[:,1]<=top)
rb = RotatedBox.from_points(points[valid_points, :], 'bb')
rb.points = points
return rb
else:
raise ValueError("Unknown parameter value: box_type=%s" % box_type)
|
konstantint/PassportEye | passporteye/util/geometry.py | RotatedBox._compensate_rotation_shift | python | def _compensate_rotation_shift(self, img, scale):
ctr = np.asarray([self.center[1]*scale, self.center[0]*scale])
tform1 = transform.SimilarityTransform(translation=ctr)
tform2 = transform.SimilarityTransform(rotation=np.pi/2 - self.angle)
tform3 = transform.SimilarityTransform(translation=-ctr)
tform = tform3 + tform2 + tform1
rows, cols = img.shape[0], img.shape[1]
corners = np.array([
[0, 0],
[0, rows - 1],
[cols - 1, rows - 1],
[cols - 1, 0]
])
corners = tform.inverse(corners)
minc = corners[:, 0].min()
minr = corners[:, 1].min()
maxc = corners[:, 0].max()
maxr = corners[:, 1].max()
# SKImage 0.11 version
out_rows = maxr - minr + 1
out_cols = maxc - minc + 1
# fit output image in new shape
return ((cols - out_cols) / 2., (rows - out_rows) / 2.) | This is an auxiliary method used by extract_from_image.
It is needed due to particular specifics of the skimage.transform.rotate implementation.
Namely, when you use rotate(... , resize=True), the rotated image is rotated and shifted by certain amount.
Thus when we need to cut out the box from the image, we need to account for this shift.
We do this by repeating the computation from skimage.transform.rotate here.
TODO: This makes the code uncomfortably coupled to SKImage (e.g. this logic is appropriate for skimage 0.12.1, but not for 0.11,
and no one knows what happens in later versions). A solution would be to use skimage.transform.warp with custom settings, but we can think of it later. | train | https://github.com/konstantint/PassportEye/blob/b32afba0f5dc4eb600c4edc4f49e5d49959c5415/passporteye/util/geometry.py#L151-L185 | null | class RotatedBox(object):
"""
RotatedBox represents a rectangular box centered at (cx,cy) with dimensions width x height,
rotated by angle radians counterclockwise.
>>> RotatedBox.from_points([[0,0], [2,1], [0,1], [2,0]])
RotatedBox(cx=1.0, cy=0.5, width=2.0, height=1.0, angle=0.0)
"""
def __init__(self, center, width, height, angle, points=None):
"""Creates a new RotatedBox.
:param points: This parameter may be used to indicate the set of points used to create the box.
"""
self.center = np.asfarray(center)
self.width = width
self.height = height
self.angle = angle
self.points = points
def __repr__(self):
return "RotatedBox(cx={0}, cy={1}, width={2}, height={3}, angle={4})".format(self.cx, self.cy, self.width, self.height, self.angle)
@property
def cx(self):
return self.center[0]
@property
def cy(self):
return self.center[1]
@property
def area(self):
return self.width * self.height
def approx_equal(self, center, width, height, angle, tol=1e-6):
"Method mainly useful for testing"
return abs(self.cx - center[0]) < tol and abs(self.cy - center[1]) < tol and abs(self.width - width) < tol and \
abs(self.height - height) < tol and abs(self.angle - angle) < tol
def rotated(self, rotation_center, angle):
"""Returns a RotatedBox that is obtained by rotating this box around a given center by a given angle.
>>> assert RotatedBox([2, 2], 2, 1, 0.1).rotated([1, 1], np.pi/2).approx_equal([0, 2], 2, 1, np.pi/2+0.1)
"""
rot = np.array([[np.cos(angle), np.sin(angle)], [-np.sin(angle), np.cos(angle)]])
t = np.asfarray(rotation_center)
new_c = np.dot(rot.T, (self.center - t)) + t
return RotatedBox(new_c, self.width, self.height, (self.angle+angle) % (np.pi*2))
def as_poly(self, margin_width=0, margin_height=0):
"""Converts this box to a polygon, i.e. 4x2 array, representing the four corners starting from lower left to upper left counterclockwise.
:param margin_width: The additional "margin" that will be added to the box along its width dimension (from both sides) before conversion.
:param margin_height: The additional "margin" that will be added to the box along its height dimension (from both sides) before conversion.
>>> RotatedBox([0, 0], 4, 2, 0).as_poly()
array([[-2., -1.],
[ 2., -1.],
[ 2., 1.],
[-2., 1.]])
>>> RotatedBox([0, 0], 4, 2, np.pi/4).as_poly()
array([[-0.707..., -2.121...],
[ 2.121..., 0.707...],
[ 0.707..., 2.121...],
[-2.121..., -0.707...]])
>>> RotatedBox([0, 0], 4, 2, np.pi/2).as_poly()
array([[ 1., -2.],
[ 1., 2.],
[-1., 2.],
[-1., -2.]])
>>> RotatedBox([0, 0], 0, 0, np.pi/2).as_poly(2, 1)
array([[ 1., -2.],
[ 1., 2.],
[-1., 2.],
[-1., -2.]])
"""
v_hor = (self.width/2 + margin_width)*np.array([np.cos(self.angle), np.sin(self.angle)])
v_vert = (self.height/2 + margin_height)*np.array([-np.sin(self.angle), np.cos(self.angle)])
c = np.array([self.cx, self.cy])
return np.vstack([c - v_hor - v_vert, c + v_hor - v_vert, c + v_hor + v_vert, c - v_hor + v_vert])
def plot(self, mode='image', ax=None, **kwargs):
"""Visualize the box on a matplotlib plot.
:param mode: How should the box coordinates and angle be interpreted.
- mode `'image'` corresponds to the situation where x coordinate of the box
denotes the "row of an image" (ie. the Y coordinate of the plot, arranged downwards)
and y coordinate of the box corresponds to the "column of an image",
(ie X coordinate of the plot). In other words, box's x goes downwards and y - rightwards.
- mode `'math'` corresponds to the "mathematics" situation where box's x and y correspond to the X and Y axes of the plot.
:param ax: the matplotlib axis to draw on. If unspecified, the current axis is used.
:param kwargs: arguments passed to the matplotlib's `Polygon` patch object. By default, fill is set to False, color to red and lw to 2.
:return: The created Polygon object.
"""
ax = ax or plt.gca()
poly = self.as_poly()
if mode == 'image':
poly = poly[:,[1,0]]
kwargs.setdefault('fill', False)
kwargs.setdefault('color', 'r')
kwargs.setdefault('lw', 2)
p = patches.Polygon(poly, **kwargs)
ax.add_patch(p)
return p
def extract_from_image(self, img, scale=1.0, margin_width=5, margin_height=5):
"""Extracts the contents of this box from a given image.
For that the image is "unrotated" by the appropriate angle, and the corresponding part is extracted from it.
Returns an image with dimensions height*scale x width*scale.
Note that the box coordinates are interpreted as "image coordinates" (i.e. x is row and y is column),
and box angle is considered to be relative to the vertical (i.e. np.pi/2 is "normal orientation")
:param img: a numpy ndarray suitable for image processing via skimage.
:param scale: the RotatedBox is scaled by this value before performing the extraction.
This is necessary when, for example, the location of a particular feature is determined using a smaller image,
yet then the corresponding area needs to be extracted from the original, larger image.
The scale parameter in this case should be width_of_larger_image/width_of_smaller_image.
:param margin_width: The margin that should be added to the width dimension of the box from each size.
This value is given wrt actual box dimensions (i.e. not scaled).
:param margin_height: The margin that should be added to the height dimension of the box from each side.
:return: a numpy ndarray, corresponding to the extracted region (aligned straight).
TODO: This could be made more efficient if we avoid rotating the full image and cut out the ROI from it beforehand.
"""
rotate_by = (np.pi/2 - self.angle)*180/np.pi
img_rotated = transform.rotate(img, angle=rotate_by, center=[self.center[1]*scale, self.center[0]*scale], resize=True)
# The resizeable transform will shift the resulting image somewhat wrt original coordinates.
# When we cut out the box we will compensate for this shift.
shift_c, shift_r = self._compensate_rotation_shift(img, scale)
r1 = max(int((self.center[0] - self.height/2 - margin_height)*scale - shift_r), 0)
r2 = int((self.center[0] + self.height/2 + margin_height)*scale - shift_r)
c1 = max(int((self.center[1] - self.width/2 - margin_width)*scale - shift_c), 0)
c2 = int((self.center[1] + self.width/2 + margin_width)*scale - shift_c)
return img_rotated[r1:r2, c1:c2]
@staticmethod
def from_points(points, box_type='bb'):
"""
Interpret a given point cloud as a RotatedBox, using PCA to determine the potential orientation (the longest component becomes width)
This is basically an approximate version of a min-area-rectangle algorithm.
TODO: Test whether using a true min-area-rectangle algorithm would be more precise or faster.
:param points: An n x 2 numpy array of coordinates.
:param box_type: The kind of method used to estimate the "box".
Possible values:
- `'bb'`, denoting the "bounding box" approach (min/max coordinates of the points correspond to box limits)
- `'mrz`, denoting a slightly modified technique, suited for MRZ zone detection from contour images.
Here the assumption is that the upper and lower bounds of the box are better estimated as the
10% and 90% quantile of the corresponding coordinates (rather than 0% and 100%, i.e. min and max).
This helps against accidental noise in the contour.
The `'mrz'` correction is only applied when there are at least 10 points in the set.
:returns: a RotatedBox, bounding the given set of points, oriented according to the principal components.
>>> RotatedBox.from_points([[0,0]])
RotatedBox(cx=0.0, cy=0.0, width=0.0, height=0.0, angle=0.0)
>>> assert RotatedBox.from_points([[0,0], [1,1], [2,2]]).approx_equal([1, 1], np.sqrt(8), 0, np.pi/4)
>>> assert RotatedBox.from_points([[0,0], [1,1], [0,1], [1,0]]).approx_equal([0.5, 0.5], 1, 1, 0.0) # The angle is rather arbitrary here
>>> assert RotatedBox.from_points([[0,0], [2,1], [0,1], [2,0]]).approx_equal([1, 0.5], 2, 1, 0)
>>> assert RotatedBox.from_points([[0,0], [2,4], [0,4], [2,0]]).approx_equal([1, 2], 4, 2, np.pi/2)
>>> assert RotatedBox.from_points([[0,0], [1,1.5], [2,0]]).approx_equal([1, 0.75], 2, 1.5, 0)
>>> assert RotatedBox.from_points([[0,0], [0,1], [1,1]]).approx_equal([0.25, 0.75], np.sqrt(2), np.sqrt(2)/2, np.pi/4)
"""
points = np.asfarray(points)
if points.shape[0] == 1:
return RotatedBox(points[0], width=0.0, height=0.0, angle=0.0, points=points)
m = PCA(2).fit(points)
# Find the angle
angle = (np.arctan2(m.components_[0,1], m.components_[0,0]) % np.pi)
if abs(angle - np.pi) < angle:
# Here the angle is always between -pi and pi
# If the principal component happened to be oriented so that the angle happens to be > pi/2 by absolute value,
# we flip the direction
angle = angle - np.pi if angle > 0 else angle + np.pi
points_transformed = m.transform(points)
ll = np.min(points_transformed, 0)
ur = np.max(points_transformed, 0)
wh = ur - ll
# Now compute and return the bounding box
if box_type == 'bb' or (box_type == 'mrz' and points.shape[0] < 10):
# We know that if we rotate the points around m.mean_, we get a box with bounds ur and ll
# The center of this box is (ur+ll)/2 + mean, which is not the same as the mean,
# hence to get the center of the original box we need to "unrotate" this box back.
return RotatedBox(np.dot(m.components_.T, (ll+ur)/2) + m.mean_, width=wh[0], height=wh[1], angle=angle, points=points)
elif box_type == 'mrz':
# When working with MRZ detection from contours, we may have minor "bumps" in the contour,
# that should be ignored at least along the long ("horizontal") side.
# To do that, we will use 10% and 90% quantiles as the bounds of the box instead of the max and min.
# We drop all points which lie beyond and simply repeat the estimation (now 'bb-style') without them.
h_coord = sorted(points_transformed[:,1])
n = len(h_coord)
bottom, top = h_coord[n/10], h_coord[n*9/10]
valid_points = np.logical_and(points_transformed[:,1]>=bottom, points_transformed[:,1]<=top)
rb = RotatedBox.from_points(points[valid_points, :], 'bb')
rb.points = points
return rb
else:
raise ValueError("Unknown parameter value: box_type=%s" % box_type)
|
konstantint/PassportEye | passporteye/util/geometry.py | RotatedBox.from_points | python | def from_points(points, box_type='bb'):
points = np.asfarray(points)
if points.shape[0] == 1:
return RotatedBox(points[0], width=0.0, height=0.0, angle=0.0, points=points)
m = PCA(2).fit(points)
# Find the angle
angle = (np.arctan2(m.components_[0,1], m.components_[0,0]) % np.pi)
if abs(angle - np.pi) < angle:
# Here the angle is always between -pi and pi
# If the principal component happened to be oriented so that the angle happens to be > pi/2 by absolute value,
# we flip the direction
angle = angle - np.pi if angle > 0 else angle + np.pi
points_transformed = m.transform(points)
ll = np.min(points_transformed, 0)
ur = np.max(points_transformed, 0)
wh = ur - ll
# Now compute and return the bounding box
if box_type == 'bb' or (box_type == 'mrz' and points.shape[0] < 10):
# We know that if we rotate the points around m.mean_, we get a box with bounds ur and ll
# The center of this box is (ur+ll)/2 + mean, which is not the same as the mean,
# hence to get the center of the original box we need to "unrotate" this box back.
return RotatedBox(np.dot(m.components_.T, (ll+ur)/2) + m.mean_, width=wh[0], height=wh[1], angle=angle, points=points)
elif box_type == 'mrz':
# When working with MRZ detection from contours, we may have minor "bumps" in the contour,
# that should be ignored at least along the long ("horizontal") side.
# To do that, we will use 10% and 90% quantiles as the bounds of the box instead of the max and min.
# We drop all points which lie beyond and simply repeat the estimation (now 'bb-style') without them.
h_coord = sorted(points_transformed[:,1])
n = len(h_coord)
bottom, top = h_coord[n/10], h_coord[n*9/10]
valid_points = np.logical_and(points_transformed[:,1]>=bottom, points_transformed[:,1]<=top)
rb = RotatedBox.from_points(points[valid_points, :], 'bb')
rb.points = points
return rb
else:
raise ValueError("Unknown parameter value: box_type=%s" % box_type) | Interpret a given point cloud as a RotatedBox, using PCA to determine the potential orientation (the longest component becomes width)
This is basically an approximate version of a min-area-rectangle algorithm.
TODO: Test whether using a true min-area-rectangle algorithm would be more precise or faster.
:param points: An n x 2 numpy array of coordinates.
:param box_type: The kind of method used to estimate the "box".
Possible values:
- `'bb'`, denoting the "bounding box" approach (min/max coordinates of the points correspond to box limits)
- `'mrz`, denoting a slightly modified technique, suited for MRZ zone detection from contour images.
Here the assumption is that the upper and lower bounds of the box are better estimated as the
10% and 90% quantile of the corresponding coordinates (rather than 0% and 100%, i.e. min and max).
This helps against accidental noise in the contour.
The `'mrz'` correction is only applied when there are at least 10 points in the set.
:returns: a RotatedBox, bounding the given set of points, oriented according to the principal components.
>>> RotatedBox.from_points([[0,0]])
RotatedBox(cx=0.0, cy=0.0, width=0.0, height=0.0, angle=0.0)
>>> assert RotatedBox.from_points([[0,0], [1,1], [2,2]]).approx_equal([1, 1], np.sqrt(8), 0, np.pi/4)
>>> assert RotatedBox.from_points([[0,0], [1,1], [0,1], [1,0]]).approx_equal([0.5, 0.5], 1, 1, 0.0) # The angle is rather arbitrary here
>>> assert RotatedBox.from_points([[0,0], [2,1], [0,1], [2,0]]).approx_equal([1, 0.5], 2, 1, 0)
>>> assert RotatedBox.from_points([[0,0], [2,4], [0,4], [2,0]]).approx_equal([1, 2], 4, 2, np.pi/2)
>>> assert RotatedBox.from_points([[0,0], [1,1.5], [2,0]]).approx_equal([1, 0.75], 2, 1.5, 0)
>>> assert RotatedBox.from_points([[0,0], [0,1], [1,1]]).approx_equal([0.25, 0.75], np.sqrt(2), np.sqrt(2)/2, np.pi/4) | train | https://github.com/konstantint/PassportEye/blob/b32afba0f5dc4eb600c4edc4f49e5d49959c5415/passporteye/util/geometry.py#L189-L251 | [
"def from_points(points, box_type='bb'):\n \"\"\"\n Interpret a given point cloud as a RotatedBox, using PCA to determine the potential orientation (the longest component becomes width)\n This is basically an approximate version of a min-area-rectangle algorithm.\n TODO: Test whether using a true min-ar... | class RotatedBox(object):
"""
RotatedBox represents a rectangular box centered at (cx,cy) with dimensions width x height,
rotated by angle radians counterclockwise.
>>> RotatedBox.from_points([[0,0], [2,1], [0,1], [2,0]])
RotatedBox(cx=1.0, cy=0.5, width=2.0, height=1.0, angle=0.0)
"""
def __init__(self, center, width, height, angle, points=None):
"""Creates a new RotatedBox.
:param points: This parameter may be used to indicate the set of points used to create the box.
"""
self.center = np.asfarray(center)
self.width = width
self.height = height
self.angle = angle
self.points = points
def __repr__(self):
return "RotatedBox(cx={0}, cy={1}, width={2}, height={3}, angle={4})".format(self.cx, self.cy, self.width, self.height, self.angle)
@property
def cx(self):
return self.center[0]
@property
def cy(self):
return self.center[1]
@property
def area(self):
return self.width * self.height
def approx_equal(self, center, width, height, angle, tol=1e-6):
"Method mainly useful for testing"
return abs(self.cx - center[0]) < tol and abs(self.cy - center[1]) < tol and abs(self.width - width) < tol and \
abs(self.height - height) < tol and abs(self.angle - angle) < tol
def rotated(self, rotation_center, angle):
"""Returns a RotatedBox that is obtained by rotating this box around a given center by a given angle.
>>> assert RotatedBox([2, 2], 2, 1, 0.1).rotated([1, 1], np.pi/2).approx_equal([0, 2], 2, 1, np.pi/2+0.1)
"""
rot = np.array([[np.cos(angle), np.sin(angle)], [-np.sin(angle), np.cos(angle)]])
t = np.asfarray(rotation_center)
new_c = np.dot(rot.T, (self.center - t)) + t
return RotatedBox(new_c, self.width, self.height, (self.angle+angle) % (np.pi*2))
def as_poly(self, margin_width=0, margin_height=0):
"""Converts this box to a polygon, i.e. 4x2 array, representing the four corners starting from lower left to upper left counterclockwise.
:param margin_width: The additional "margin" that will be added to the box along its width dimension (from both sides) before conversion.
:param margin_height: The additional "margin" that will be added to the box along its height dimension (from both sides) before conversion.
>>> RotatedBox([0, 0], 4, 2, 0).as_poly()
array([[-2., -1.],
[ 2., -1.],
[ 2., 1.],
[-2., 1.]])
>>> RotatedBox([0, 0], 4, 2, np.pi/4).as_poly()
array([[-0.707..., -2.121...],
[ 2.121..., 0.707...],
[ 0.707..., 2.121...],
[-2.121..., -0.707...]])
>>> RotatedBox([0, 0], 4, 2, np.pi/2).as_poly()
array([[ 1., -2.],
[ 1., 2.],
[-1., 2.],
[-1., -2.]])
>>> RotatedBox([0, 0], 0, 0, np.pi/2).as_poly(2, 1)
array([[ 1., -2.],
[ 1., 2.],
[-1., 2.],
[-1., -2.]])
"""
v_hor = (self.width/2 + margin_width)*np.array([np.cos(self.angle), np.sin(self.angle)])
v_vert = (self.height/2 + margin_height)*np.array([-np.sin(self.angle), np.cos(self.angle)])
c = np.array([self.cx, self.cy])
return np.vstack([c - v_hor - v_vert, c + v_hor - v_vert, c + v_hor + v_vert, c - v_hor + v_vert])
def plot(self, mode='image', ax=None, **kwargs):
"""Visualize the box on a matplotlib plot.
:param mode: How should the box coordinates and angle be interpreted.
- mode `'image'` corresponds to the situation where x coordinate of the box
denotes the "row of an image" (ie. the Y coordinate of the plot, arranged downwards)
and y coordinate of the box corresponds to the "column of an image",
(ie X coordinate of the plot). In other words, box's x goes downwards and y - rightwards.
- mode `'math'` corresponds to the "mathematics" situation where box's x and y correspond to the X and Y axes of the plot.
:param ax: the matplotlib axis to draw on. If unspecified, the current axis is used.
:param kwargs: arguments passed to the matplotlib's `Polygon` patch object. By default, fill is set to False, color to red and lw to 2.
:return: The created Polygon object.
"""
ax = ax or plt.gca()
poly = self.as_poly()
if mode == 'image':
poly = poly[:,[1,0]]
kwargs.setdefault('fill', False)
kwargs.setdefault('color', 'r')
kwargs.setdefault('lw', 2)
p = patches.Polygon(poly, **kwargs)
ax.add_patch(p)
return p
def extract_from_image(self, img, scale=1.0, margin_width=5, margin_height=5):
"""Extracts the contents of this box from a given image.
For that the image is "unrotated" by the appropriate angle, and the corresponding part is extracted from it.
Returns an image with dimensions height*scale x width*scale.
Note that the box coordinates are interpreted as "image coordinates" (i.e. x is row and y is column),
and box angle is considered to be relative to the vertical (i.e. np.pi/2 is "normal orientation")
:param img: a numpy ndarray suitable for image processing via skimage.
:param scale: the RotatedBox is scaled by this value before performing the extraction.
This is necessary when, for example, the location of a particular feature is determined using a smaller image,
yet then the corresponding area needs to be extracted from the original, larger image.
The scale parameter in this case should be width_of_larger_image/width_of_smaller_image.
:param margin_width: The margin that should be added to the width dimension of the box from each size.
This value is given wrt actual box dimensions (i.e. not scaled).
:param margin_height: The margin that should be added to the height dimension of the box from each side.
:return: a numpy ndarray, corresponding to the extracted region (aligned straight).
TODO: This could be made more efficient if we avoid rotating the full image and cut out the ROI from it beforehand.
"""
rotate_by = (np.pi/2 - self.angle)*180/np.pi
img_rotated = transform.rotate(img, angle=rotate_by, center=[self.center[1]*scale, self.center[0]*scale], resize=True)
# The resizeable transform will shift the resulting image somewhat wrt original coordinates.
# When we cut out the box we will compensate for this shift.
shift_c, shift_r = self._compensate_rotation_shift(img, scale)
r1 = max(int((self.center[0] - self.height/2 - margin_height)*scale - shift_r), 0)
r2 = int((self.center[0] + self.height/2 + margin_height)*scale - shift_r)
c1 = max(int((self.center[1] - self.width/2 - margin_width)*scale - shift_c), 0)
c2 = int((self.center[1] + self.width/2 + margin_width)*scale - shift_c)
return img_rotated[r1:r2, c1:c2]
def _compensate_rotation_shift(self, img, scale):
"""This is an auxiliary method used by extract_from_image.
It is needed due to particular specifics of the skimage.transform.rotate implementation.
Namely, when you use rotate(... , resize=True), the rotated image is rotated and shifted by certain amount.
Thus when we need to cut out the box from the image, we need to account for this shift.
We do this by repeating the computation from skimage.transform.rotate here.
TODO: This makes the code uncomfortably coupled to SKImage (e.g. this logic is appropriate for skimage 0.12.1, but not for 0.11,
and no one knows what happens in later versions). A solution would be to use skimage.transform.warp with custom settings, but we can think of it later.
"""
ctr = np.asarray([self.center[1]*scale, self.center[0]*scale])
tform1 = transform.SimilarityTransform(translation=ctr)
tform2 = transform.SimilarityTransform(rotation=np.pi/2 - self.angle)
tform3 = transform.SimilarityTransform(translation=-ctr)
tform = tform3 + tform2 + tform1
rows, cols = img.shape[0], img.shape[1]
corners = np.array([
[0, 0],
[0, rows - 1],
[cols - 1, rows - 1],
[cols - 1, 0]
])
corners = tform.inverse(corners)
minc = corners[:, 0].min()
minr = corners[:, 1].min()
maxc = corners[:, 0].max()
maxr = corners[:, 1].max()
# SKImage 0.11 version
out_rows = maxr - minr + 1
out_cols = maxc - minc + 1
# fit output image in new shape
return ((cols - out_cols) / 2., (rows - out_rows) / 2.)
@staticmethod
|
konstantint/PassportEye | passporteye/mrz/image.py | read_mrz | python | def read_mrz(file, save_roi=False, extra_cmdline_params=''):
p = MRZPipeline(file, extra_cmdline_params)
mrz = p.result
if mrz is not None:
mrz.aux['text'] = p['text']
if save_roi:
mrz.aux['roi'] = p['roi']
return mrz | The main interface function to this module, encapsulating the recognition pipeline.
Given an image filename, runs MRZPipeline on it, returning the parsed MRZ object.
:param file: A filename or a stream to read the file data from.
:param save_roi: when this is True, the .aux['roi'] field will contain the Region of Interest where the MRZ was parsed from.
:param extra_cmdline_params:extra parameters to the ocr.py | train | https://github.com/konstantint/PassportEye/blob/b32afba0f5dc4eb600c4edc4f49e5d49959c5415/passporteye/mrz/image.py#L328-L343 | null | '''
PassportEye::MRZ: Machine-readable zone extraction and parsing.
Image processing for MRZ extraction.
Author: Konstantin Tretyakov
License: MIT
'''
import io
import numpy as np
from skimage import transform, morphology, filters, measure
from skimage import io as skimage_io # So as not to clash with builtin io
from ..util.pdf import extract_first_jpeg_in_pdf
from ..util.pipeline import Pipeline
from ..util.geometry import RotatedBox
from ..util.ocr import ocr
from .text import MRZ
class Loader(object):
"""Loads `file` to `img`."""
__depends__ = []
__provides__ = ['img']
def __init__(self, file, as_gray=True, pdf_aware=True):
self.file = file
self.as_gray = as_gray
self.pdf_aware = pdf_aware
def _imread(self, file):
"""Proxy to skimage.io.imread with some fixes."""
# For now, we have to select the imageio plugin to read image from byte stream
# When ski-image v0.15 is released, imageio will be the default plugin, so this
# code can be simplified at that time. See issue report and pull request:
# https://github.com/scikit-image/scikit-image/issues/2889
# https://github.com/scikit-image/scikit-image/pull/3126
img = skimage_io.imread(file, as_gray=self.as_gray, plugin='imageio')
if img is not None and len(img.shape) != 2:
# The PIL plugin somewhy fails to load some images
img = skimage_io.imread(file, as_gray=self.as_gray, plugin='matplotlib')
return img
def __call__(self):
if isinstance(self.file, str):
if self.pdf_aware and self.file.lower().endswith('.pdf'):
with open(self.file, 'rb') as f:
img_data = extract_first_jpeg_in_pdf(f)
if img_data is None:
return None
return self._imread(img_data)
else:
return self._imread(self.file)
elif isinstance(self.file, (bytes, io.IOBase)):
return self._imread(self.file)
return None
class Scaler(object):
"""Scales `image` down to `img_scaled` so that its width is at most 250."""
__depends__ = ['img']
__provides__ = ['img_small', 'scale_factor']
def __init__(self, max_width=250):
self.max_width = max_width
def __call__(self, img):
scale_factor = self.max_width / float(img.shape[1])
if scale_factor <= 1:
img_small = transform.rescale(img, scale_factor, mode='constant', multichannel=False, anti_aliasing=True)
else:
scale_factor = 1.0
img_small = img
return img_small, scale_factor
class BooneTransform(object):
"""Processes `img_small` according to Hans Boone's method
(http://www.pyimagesearch.com/2015/11/30/detecting-machine-readable-zones-in-passport-images/)
Outputs a `img_binary` - a result of threshold_otsu(closing(sobel(black_tophat(img_small)))"""
__depends__ = ['img_small']
__provides__ = ['img_binary']
def __init__(self, square_size=5):
self.square_size = square_size
def __call__(self, img_small):
m = morphology.square(self.square_size)
img_th = morphology.black_tophat(img_small, m)
img_sob = abs(filters.sobel_v(img_th))
img_closed = morphology.closing(img_sob, m)
threshold = filters.threshold_otsu(img_closed)
return img_closed > threshold
class MRZBoxLocator(object):
"""Extracts putative MRZs as RotatedBox instances from the contours of `img_binary`"""
__depends__ = ['img_binary']
__provides__ = ['boxes']
def __init__(self, max_boxes=4, min_points_in_contour=50, min_area=500, min_box_aspect=5, angle_tol=0.1,
lineskip_tol=1.5, box_type='bb'):
self.max_boxes = max_boxes
self.min_points_in_contour = min_points_in_contour
self.min_area = min_area
self.min_box_aspect = min_box_aspect
self.angle_tol = angle_tol
self.lineskip_tol = lineskip_tol
self.box_type = box_type
def __call__(self, img_binary):
cs = measure.find_contours(img_binary, 0.5)
# Collect contours into RotatedBoxes
results = []
for c in cs:
# Now examine the bounding box. If it is too small, we ignore the contour
ll, ur = np.min(c, 0), np.max(c, 0)
wh = ur - ll
if wh[0] * wh[1] < self.min_area:
continue
# Finally, construct the rotatedbox. If its aspect ratio is too small, we ignore it
rb = RotatedBox.from_points(c, self.box_type)
if rb.height == 0 or rb.width / rb.height < self.min_box_aspect:
continue
# All tests fine, add to the list
results.append(rb)
# Next sort and leave only max_boxes largest boxes by area
results.sort(key=lambda x: -x.area)
return self._merge_boxes(results[0:self.max_boxes])
def _are_aligned_angles(self, b1, b2):
"Are two boxes aligned according to their angle?"
return abs(b1 - b2) <= self.angle_tol or abs(np.pi - abs(b1 - b2)) <= self.angle_tol
def _are_nearby_parallel_boxes(self, b1, b2):
"Are two boxes nearby, parallel, and similar in width?"
if not self._are_aligned_angles(b1.angle, b2.angle):
return False
# Otherwise pick the smaller angle and see whether the two boxes are close according to the "up" direction wrt that angle
angle = min(b1.angle, b2.angle)
return abs(np.dot(b1.center - b2.center, [-np.sin(angle), np.cos(angle)])) < self.lineskip_tol * (
b1.height + b2.height) and (b1.width > 0) and (b2.width > 0) and (0.5 < b1.width / b2.width < 2.0)
def _merge_any_two_boxes(self, box_list):
"""Given a list of boxes, finds two nearby parallel ones and merges them. Returns false if none found."""
n = len(box_list)
for i in range(n):
for j in range(i + 1, n):
if self._are_nearby_parallel_boxes(box_list[i], box_list[j]):
# Remove the two boxes from the list, add a new one
a, b = box_list[i], box_list[j]
merged_points = np.vstack([a.points, b.points])
merged_box = RotatedBox.from_points(merged_points, self.box_type)
if merged_box.width / merged_box.height >= self.min_box_aspect:
box_list.remove(a)
box_list.remove(b)
box_list.append(merged_box)
return True
return False
def _merge_boxes(self, box_list):
"""Mergest nearby parallel boxes in the given list."""
while self._merge_any_two_boxes(box_list):
pass
return box_list
class FindFirstValidMRZ(object):
"""Iterates over boxes found by MRZBoxLocator, passes them to BoxToMRZ, finds the first valid MRZ
or the best-scoring MRZ"""
__provides__ = ['box_idx', 'roi', 'text', 'mrz']
__depends__ = ['boxes', 'img', 'img_small', 'scale_factor', '__data__']
def __init__(self, use_original_image=True, extra_cmdline_params=''):
self.box_to_mrz = BoxToMRZ(use_original_image, extra_cmdline_params=extra_cmdline_params)
def __call__(self, boxes, img, img_small, scale_factor, data):
mrzs = []
data['__debug__mrz'] = []
for i, b in enumerate(boxes):
roi, text, mrz = self.box_to_mrz(b, img, img_small, scale_factor)
data['__debug__mrz'].append((roi, text, mrz))
if mrz.valid:
return i, roi, text, mrz
elif mrz.valid_score > 0:
mrzs.append((i, roi, text, mrz))
if not mrzs:
return None, None, None, None
else:
mrzs.sort(key=lambda x: x[3].valid_score)
return mrzs[-1]
class BoxToMRZ(object):
"""Extracts ROI from the image, corresponding to a box found by MRZBoxLocator, does OCR and MRZ parsing on this region."""
__provides__ = ['roi', 'text', 'mrz']
__depends__ = ['box', 'img', 'img_small', 'scale_factor']
def __init__(self, use_original_image=True, extra_cmdline_params=''):
"""
:param use_original_image: when True, the ROI is extracted from img, otherwise from img_small
"""
self.use_original_image = use_original_image
self.extra_cmdline_params = extra_cmdline_params
def __call__(self, box, img, img_small, scale_factor):
img = img if self.use_original_image else img_small
scale = 1.0 / scale_factor if self.use_original_image else 1.0
# If the box's angle is np.pi/2 +- 0.01, we shall round it to np.pi/2:
# this way image extraction is fast and introduces no distortions.
# and this may be more important than being perfectly straight
# similar for 0 angle
if abs(abs(box.angle) - np.pi / 2) <= 0.01:
box.angle = np.pi / 2
if abs(box.angle) <= 0.01:
box.angle = 0.0
roi = box.extract_from_image(img, scale)
text = ocr(roi, extra_cmdline_params=self.extra_cmdline_params)
if '>>' in text or ('>' in text and '<' not in text):
# Most probably we need to reverse the ROI
roi = roi[::-1, ::-1]
text = ocr(roi, extra_cmdline_params=self.extra_cmdline_params)
if not '<' in text:
# Assume this is unrecoverable and stop here (TODO: this may be premature, although it saves time on useless stuff)
return roi, text, MRZ.from_ocr(text)
mrz = MRZ.from_ocr(text)
mrz.aux['method'] = 'direct'
# Now try improving the result via hacks
if not mrz.valid:
text, mrz = self._try_larger_image(roi, text, mrz)
# Sometimes the filter used for enlargement is important!
if not mrz.valid:
text, mrz = self._try_larger_image(roi, text, mrz, 1)
if not mrz.valid:
text, mrz = self._try_black_tophat(roi, text, mrz)
return roi, text, mrz
def _try_larger_image(self, roi, cur_text, cur_mrz, filter_order=3):
"""Attempts to improve the OCR result by scaling the image. If the new mrz is better, returns it, otherwise returns
the old mrz."""
if roi.shape[1] <= 700:
scale_by = int(1050.0 / roi.shape[1] + 0.5)
roi_lg = transform.rescale(roi, scale_by, order=filter_order, mode='constant', multichannel=False,
anti_aliasing=True)
new_text = ocr(roi_lg, extra_cmdline_params=self.extra_cmdline_params)
new_mrz = MRZ.from_ocr(new_text)
new_mrz.aux['method'] = 'rescaled(%d)' % filter_order
if new_mrz.valid_score > cur_mrz.valid_score:
cur_mrz = new_mrz
cur_text = new_text
return cur_text, cur_mrz
def _try_black_tophat(self, roi, cur_text, cur_mrz):
roi_b = morphology.black_tophat(roi, morphology.disk(5))
# There are some examples where this line basically hangs for an undetermined amount of time.
new_text = ocr(roi_b, extra_cmdline_params=self.extra_cmdline_params)
new_mrz = MRZ.from_ocr(new_text)
if new_mrz.valid_score > cur_mrz.valid_score:
new_mrz.aux['method'] = 'black_tophat'
cur_text, cur_mrz = new_text, new_mrz
new_text, new_mrz = self._try_larger_image(roi_b, cur_text, cur_mrz)
if new_mrz.valid_score > cur_mrz.valid_score:
new_mrz.aux['method'] = 'black_tophat(rescaled(3))'
cur_text, cur_mrz = new_text, new_mrz
return cur_text, cur_mrz
class TryOtherMaxWidth(object):
"""
If mrz was not found so far in the current pipeline,
changes the max_width parameter of the scaler to 1000 and reruns the pipeline again.
"""
__provides__ = ['mrz_final']
__depends__ = ['mrz', '__pipeline__']
def __init__(self, other_max_width=1000):
self.other_max_width = other_max_width
def __call__(self, mrz, __pipeline__):
# We'll only try this if we see that img_binary.mean() is very small or img.mean() is very large (i.e. image is mostly white).
if mrz is None and (__pipeline__['img_binary'].mean() < 0.01 or __pipeline__['img'].mean() > 0.95):
__pipeline__.replace_component('scaler', Scaler(self.other_max_width))
new_mrz = __pipeline__['mrz']
if new_mrz is not None:
new_mrz.aux['method'] = new_mrz.aux['method'] + '|max_width(%d)' % self.other_max_width
mrz = new_mrz
return mrz
class MRZPipeline(Pipeline):
"""This is the "currently best-performing" pipeline for parsing MRZ from a given image file."""
def __init__(self, file, extra_cmdline_params=''):
super(MRZPipeline, self).__init__()
self.version = '1.0' # In principle we might have different pipelines in use, so possible backward compatibility is an issue
self.file = file
self.add_component('loader', Loader(file))
self.add_component('scaler', Scaler())
self.add_component('boone', BooneTransform())
self.add_component('box_locator', MRZBoxLocator())
self.add_component('mrz', FindFirstValidMRZ(extra_cmdline_params=extra_cmdline_params))
self.add_component('other_max_width', TryOtherMaxWidth())
@property
def result(self):
return self['mrz_final']
|
konstantint/PassportEye | passporteye/mrz/image.py | Loader._imread | python | def _imread(self, file):
# For now, we have to select the imageio plugin to read image from byte stream
# When ski-image v0.15 is released, imageio will be the default plugin, so this
# code can be simplified at that time. See issue report and pull request:
# https://github.com/scikit-image/scikit-image/issues/2889
# https://github.com/scikit-image/scikit-image/pull/3126
img = skimage_io.imread(file, as_gray=self.as_gray, plugin='imageio')
if img is not None and len(img.shape) != 2:
# The PIL plugin somewhy fails to load some images
img = skimage_io.imread(file, as_gray=self.as_gray, plugin='matplotlib')
return img | Proxy to skimage.io.imread with some fixes. | train | https://github.com/konstantint/PassportEye/blob/b32afba0f5dc4eb600c4edc4f49e5d49959c5415/passporteye/mrz/image.py#L30-L41 | null | class Loader(object):
"""Loads `file` to `img`."""
__depends__ = []
__provides__ = ['img']
def __init__(self, file, as_gray=True, pdf_aware=True):
self.file = file
self.as_gray = as_gray
self.pdf_aware = pdf_aware
def __call__(self):
if isinstance(self.file, str):
if self.pdf_aware and self.file.lower().endswith('.pdf'):
with open(self.file, 'rb') as f:
img_data = extract_first_jpeg_in_pdf(f)
if img_data is None:
return None
return self._imread(img_data)
else:
return self._imread(self.file)
elif isinstance(self.file, (bytes, io.IOBase)):
return self._imread(self.file)
return None
|
konstantint/PassportEye | passporteye/mrz/image.py | MRZBoxLocator._are_aligned_angles | python | def _are_aligned_angles(self, b1, b2):
"Are two boxes aligned according to their angle?"
return abs(b1 - b2) <= self.angle_tol or abs(np.pi - abs(b1 - b2)) <= self.angle_tol | Are two boxes aligned according to their angle? | train | https://github.com/konstantint/PassportEye/blob/b32afba0f5dc4eb600c4edc4f49e5d49959c5415/passporteye/mrz/image.py#L136-L138 | null | class MRZBoxLocator(object):
"""Extracts putative MRZs as RotatedBox instances from the contours of `img_binary`"""
__depends__ = ['img_binary']
__provides__ = ['boxes']
def __init__(self, max_boxes=4, min_points_in_contour=50, min_area=500, min_box_aspect=5, angle_tol=0.1,
lineskip_tol=1.5, box_type='bb'):
self.max_boxes = max_boxes
self.min_points_in_contour = min_points_in_contour
self.min_area = min_area
self.min_box_aspect = min_box_aspect
self.angle_tol = angle_tol
self.lineskip_tol = lineskip_tol
self.box_type = box_type
def __call__(self, img_binary):
cs = measure.find_contours(img_binary, 0.5)
# Collect contours into RotatedBoxes
results = []
for c in cs:
# Now examine the bounding box. If it is too small, we ignore the contour
ll, ur = np.min(c, 0), np.max(c, 0)
wh = ur - ll
if wh[0] * wh[1] < self.min_area:
continue
# Finally, construct the rotatedbox. If its aspect ratio is too small, we ignore it
rb = RotatedBox.from_points(c, self.box_type)
if rb.height == 0 or rb.width / rb.height < self.min_box_aspect:
continue
# All tests fine, add to the list
results.append(rb)
# Next sort and leave only max_boxes largest boxes by area
results.sort(key=lambda x: -x.area)
return self._merge_boxes(results[0:self.max_boxes])
def _are_nearby_parallel_boxes(self, b1, b2):
"Are two boxes nearby, parallel, and similar in width?"
if not self._are_aligned_angles(b1.angle, b2.angle):
return False
# Otherwise pick the smaller angle and see whether the two boxes are close according to the "up" direction wrt that angle
angle = min(b1.angle, b2.angle)
return abs(np.dot(b1.center - b2.center, [-np.sin(angle), np.cos(angle)])) < self.lineskip_tol * (
b1.height + b2.height) and (b1.width > 0) and (b2.width > 0) and (0.5 < b1.width / b2.width < 2.0)
def _merge_any_two_boxes(self, box_list):
"""Given a list of boxes, finds two nearby parallel ones and merges them. Returns false if none found."""
n = len(box_list)
for i in range(n):
for j in range(i + 1, n):
if self._are_nearby_parallel_boxes(box_list[i], box_list[j]):
# Remove the two boxes from the list, add a new one
a, b = box_list[i], box_list[j]
merged_points = np.vstack([a.points, b.points])
merged_box = RotatedBox.from_points(merged_points, self.box_type)
if merged_box.width / merged_box.height >= self.min_box_aspect:
box_list.remove(a)
box_list.remove(b)
box_list.append(merged_box)
return True
return False
def _merge_boxes(self, box_list):
"""Mergest nearby parallel boxes in the given list."""
while self._merge_any_two_boxes(box_list):
pass
return box_list
|
konstantint/PassportEye | passporteye/mrz/image.py | MRZBoxLocator._are_nearby_parallel_boxes | python | def _are_nearby_parallel_boxes(self, b1, b2):
"Are two boxes nearby, parallel, and similar in width?"
if not self._are_aligned_angles(b1.angle, b2.angle):
return False
# Otherwise pick the smaller angle and see whether the two boxes are close according to the "up" direction wrt that angle
angle = min(b1.angle, b2.angle)
return abs(np.dot(b1.center - b2.center, [-np.sin(angle), np.cos(angle)])) < self.lineskip_tol * (
b1.height + b2.height) and (b1.width > 0) and (b2.width > 0) and (0.5 < b1.width / b2.width < 2.0) | Are two boxes nearby, parallel, and similar in width? | train | https://github.com/konstantint/PassportEye/blob/b32afba0f5dc4eb600c4edc4f49e5d49959c5415/passporteye/mrz/image.py#L140-L147 | null | class MRZBoxLocator(object):
"""Extracts putative MRZs as RotatedBox instances from the contours of `img_binary`"""
__depends__ = ['img_binary']
__provides__ = ['boxes']
def __init__(self, max_boxes=4, min_points_in_contour=50, min_area=500, min_box_aspect=5, angle_tol=0.1,
lineskip_tol=1.5, box_type='bb'):
self.max_boxes = max_boxes
self.min_points_in_contour = min_points_in_contour
self.min_area = min_area
self.min_box_aspect = min_box_aspect
self.angle_tol = angle_tol
self.lineskip_tol = lineskip_tol
self.box_type = box_type
def __call__(self, img_binary):
cs = measure.find_contours(img_binary, 0.5)
# Collect contours into RotatedBoxes
results = []
for c in cs:
# Now examine the bounding box. If it is too small, we ignore the contour
ll, ur = np.min(c, 0), np.max(c, 0)
wh = ur - ll
if wh[0] * wh[1] < self.min_area:
continue
# Finally, construct the rotatedbox. If its aspect ratio is too small, we ignore it
rb = RotatedBox.from_points(c, self.box_type)
if rb.height == 0 or rb.width / rb.height < self.min_box_aspect:
continue
# All tests fine, add to the list
results.append(rb)
# Next sort and leave only max_boxes largest boxes by area
results.sort(key=lambda x: -x.area)
return self._merge_boxes(results[0:self.max_boxes])
def _are_aligned_angles(self, b1, b2):
"Are two boxes aligned according to their angle?"
return abs(b1 - b2) <= self.angle_tol or abs(np.pi - abs(b1 - b2)) <= self.angle_tol
def _merge_any_two_boxes(self, box_list):
"""Given a list of boxes, finds two nearby parallel ones and merges them. Returns false if none found."""
n = len(box_list)
for i in range(n):
for j in range(i + 1, n):
if self._are_nearby_parallel_boxes(box_list[i], box_list[j]):
# Remove the two boxes from the list, add a new one
a, b = box_list[i], box_list[j]
merged_points = np.vstack([a.points, b.points])
merged_box = RotatedBox.from_points(merged_points, self.box_type)
if merged_box.width / merged_box.height >= self.min_box_aspect:
box_list.remove(a)
box_list.remove(b)
box_list.append(merged_box)
return True
return False
def _merge_boxes(self, box_list):
"""Mergest nearby parallel boxes in the given list."""
while self._merge_any_two_boxes(box_list):
pass
return box_list
|
konstantint/PassportEye | passporteye/mrz/image.py | MRZBoxLocator._merge_any_two_boxes | python | def _merge_any_two_boxes(self, box_list):
n = len(box_list)
for i in range(n):
for j in range(i + 1, n):
if self._are_nearby_parallel_boxes(box_list[i], box_list[j]):
# Remove the two boxes from the list, add a new one
a, b = box_list[i], box_list[j]
merged_points = np.vstack([a.points, b.points])
merged_box = RotatedBox.from_points(merged_points, self.box_type)
if merged_box.width / merged_box.height >= self.min_box_aspect:
box_list.remove(a)
box_list.remove(b)
box_list.append(merged_box)
return True
return False | Given a list of boxes, finds two nearby parallel ones and merges them. Returns false if none found. | train | https://github.com/konstantint/PassportEye/blob/b32afba0f5dc4eb600c4edc4f49e5d49959c5415/passporteye/mrz/image.py#L149-L164 | [
"def from_points(points, box_type='bb'):\n \"\"\"\n Interpret a given point cloud as a RotatedBox, using PCA to determine the potential orientation (the longest component becomes width)\n This is basically an approximate version of a min-area-rectangle algorithm.\n TODO: Test whether using a true min-ar... | class MRZBoxLocator(object):
"""Extracts putative MRZs as RotatedBox instances from the contours of `img_binary`"""
__depends__ = ['img_binary']
__provides__ = ['boxes']
def __init__(self, max_boxes=4, min_points_in_contour=50, min_area=500, min_box_aspect=5, angle_tol=0.1,
lineskip_tol=1.5, box_type='bb'):
self.max_boxes = max_boxes
self.min_points_in_contour = min_points_in_contour
self.min_area = min_area
self.min_box_aspect = min_box_aspect
self.angle_tol = angle_tol
self.lineskip_tol = lineskip_tol
self.box_type = box_type
def __call__(self, img_binary):
cs = measure.find_contours(img_binary, 0.5)
# Collect contours into RotatedBoxes
results = []
for c in cs:
# Now examine the bounding box. If it is too small, we ignore the contour
ll, ur = np.min(c, 0), np.max(c, 0)
wh = ur - ll
if wh[0] * wh[1] < self.min_area:
continue
# Finally, construct the rotatedbox. If its aspect ratio is too small, we ignore it
rb = RotatedBox.from_points(c, self.box_type)
if rb.height == 0 or rb.width / rb.height < self.min_box_aspect:
continue
# All tests fine, add to the list
results.append(rb)
# Next sort and leave only max_boxes largest boxes by area
results.sort(key=lambda x: -x.area)
return self._merge_boxes(results[0:self.max_boxes])
def _are_aligned_angles(self, b1, b2):
"Are two boxes aligned according to their angle?"
return abs(b1 - b2) <= self.angle_tol or abs(np.pi - abs(b1 - b2)) <= self.angle_tol
def _are_nearby_parallel_boxes(self, b1, b2):
"Are two boxes nearby, parallel, and similar in width?"
if not self._are_aligned_angles(b1.angle, b2.angle):
return False
# Otherwise pick the smaller angle and see whether the two boxes are close according to the "up" direction wrt that angle
angle = min(b1.angle, b2.angle)
return abs(np.dot(b1.center - b2.center, [-np.sin(angle), np.cos(angle)])) < self.lineskip_tol * (
b1.height + b2.height) and (b1.width > 0) and (b2.width > 0) and (0.5 < b1.width / b2.width < 2.0)
def _merge_boxes(self, box_list):
"""Mergest nearby parallel boxes in the given list."""
while self._merge_any_two_boxes(box_list):
pass
return box_list
|
konstantint/PassportEye | passporteye/mrz/image.py | BoxToMRZ._try_larger_image | python | def _try_larger_image(self, roi, cur_text, cur_mrz, filter_order=3):
if roi.shape[1] <= 700:
scale_by = int(1050.0 / roi.shape[1] + 0.5)
roi_lg = transform.rescale(roi, scale_by, order=filter_order, mode='constant', multichannel=False,
anti_aliasing=True)
new_text = ocr(roi_lg, extra_cmdline_params=self.extra_cmdline_params)
new_mrz = MRZ.from_ocr(new_text)
new_mrz.aux['method'] = 'rescaled(%d)' % filter_order
if new_mrz.valid_score > cur_mrz.valid_score:
cur_mrz = new_mrz
cur_text = new_text
return cur_text, cur_mrz | Attempts to improve the OCR result by scaling the image. If the new mrz is better, returns it, otherwise returns
the old mrz. | train | https://github.com/konstantint/PassportEye/blob/b32afba0f5dc4eb600c4edc4f49e5d49959c5415/passporteye/mrz/image.py#L254-L267 | [
"def ocr(img, mrz_mode=True, extra_cmdline_params=''):\n \"\"\"Runs Tesseract on a given image. Writes an intermediate tempfile and then runs the tesseract command on the image.\n\n This is a simplified modification of image_to_string from PyTesseract, which is adapted to SKImage rather than PIL.\n\n In pr... | class BoxToMRZ(object):
"""Extracts ROI from the image, corresponding to a box found by MRZBoxLocator, does OCR and MRZ parsing on this region."""
__provides__ = ['roi', 'text', 'mrz']
__depends__ = ['box', 'img', 'img_small', 'scale_factor']
def __init__(self, use_original_image=True, extra_cmdline_params=''):
"""
:param use_original_image: when True, the ROI is extracted from img, otherwise from img_small
"""
self.use_original_image = use_original_image
self.extra_cmdline_params = extra_cmdline_params
def __call__(self, box, img, img_small, scale_factor):
img = img if self.use_original_image else img_small
scale = 1.0 / scale_factor if self.use_original_image else 1.0
# If the box's angle is np.pi/2 +- 0.01, we shall round it to np.pi/2:
# this way image extraction is fast and introduces no distortions.
# and this may be more important than being perfectly straight
# similar for 0 angle
if abs(abs(box.angle) - np.pi / 2) <= 0.01:
box.angle = np.pi / 2
if abs(box.angle) <= 0.01:
box.angle = 0.0
roi = box.extract_from_image(img, scale)
text = ocr(roi, extra_cmdline_params=self.extra_cmdline_params)
if '>>' in text or ('>' in text and '<' not in text):
# Most probably we need to reverse the ROI
roi = roi[::-1, ::-1]
text = ocr(roi, extra_cmdline_params=self.extra_cmdline_params)
if not '<' in text:
# Assume this is unrecoverable and stop here (TODO: this may be premature, although it saves time on useless stuff)
return roi, text, MRZ.from_ocr(text)
mrz = MRZ.from_ocr(text)
mrz.aux['method'] = 'direct'
# Now try improving the result via hacks
if not mrz.valid:
text, mrz = self._try_larger_image(roi, text, mrz)
# Sometimes the filter used for enlargement is important!
if not mrz.valid:
text, mrz = self._try_larger_image(roi, text, mrz, 1)
if not mrz.valid:
text, mrz = self._try_black_tophat(roi, text, mrz)
return roi, text, mrz
def _try_black_tophat(self, roi, cur_text, cur_mrz):
roi_b = morphology.black_tophat(roi, morphology.disk(5))
# There are some examples where this line basically hangs for an undetermined amount of time.
new_text = ocr(roi_b, extra_cmdline_params=self.extra_cmdline_params)
new_mrz = MRZ.from_ocr(new_text)
if new_mrz.valid_score > cur_mrz.valid_score:
new_mrz.aux['method'] = 'black_tophat'
cur_text, cur_mrz = new_text, new_mrz
new_text, new_mrz = self._try_larger_image(roi_b, cur_text, cur_mrz)
if new_mrz.valid_score > cur_mrz.valid_score:
new_mrz.aux['method'] = 'black_tophat(rescaled(3))'
cur_text, cur_mrz = new_text, new_mrz
return cur_text, cur_mrz
|
konstantint/PassportEye | passporteye/mrz/scripts.py | process_file | python | def process_file(params):
tic = time.time()
filename, save_roi, extra_params = params
result = read_mrz(filename, save_roi=save_roi, extra_cmdline_params=extra_params)
walltime = time.time() - tic
return (filename, result, walltime) | Processes a file and returns the parsed MRZ (or None if no candidate regions were even found).
The input argument is a list (filename, save_roi, extra_params).
(Because we need to use this function within imap_unordered) | train | https://github.com/konstantint/PassportEye/blob/b32afba0f5dc4eb600c4edc4f49e5d49959c5415/passporteye/mrz/scripts.py#L25-L36 | [
"def read_mrz(file, save_roi=False, extra_cmdline_params=''):\n \"\"\"The main interface function to this module, encapsulating the recognition pipeline.\n Given an image filename, runs MRZPipeline on it, returning the parsed MRZ object.\n\n :param file: A filename or a stream to read the file data from... | '''
PassportEye::MRZ: Machine-readable zone extraction and parsing.
Command-line scripts
Author: Konstantin Tretyakov
License: MIT
'''
import argparse
import glob
import json
import logging
import multiprocessing
import os
import shutil
import sys
import time
from collections import Counter
import pkg_resources
from skimage import io
from pytesseract.pytesseract import TesseractNotFoundError, TesseractError
import passporteye
from .image import read_mrz
def evaluate_mrz():
"""
A script for evaluating the current MRZ recognition pipeline by applying it to a list of files in a directory and reporting how well it went.
"""
parser = argparse.ArgumentParser(description='Run the MRZ OCR recognition algorithm on the sample test data, reporting the quality summary.')
parser.add_argument('-j', '--jobs', default=1, type=int, help='Number of parallel jobs to run')
parser.add_argument('-dd', '--data-dir', default=pkg_resources.resource_filename('passporteye.mrz', 'testdata'),
help='Read files from this directory instead of the package test files')
parser.add_argument('-sd', '--success-dir', default=None,
help='Copy files with successful (nonzero score) extraction results to this directory')
parser.add_argument('-fd', '--fail-dir', default=None,
help='Copy files with unsuccessful (zero score) extraction resutls to this directory')
parser.add_argument('-rd', '--roi-dir', default=None,
help='Extract ROIs to this directory')
parser.add_argument('-l', '--limit', default=-1, type=int, help='Only process the first <limit> files in the directory.')
parser.add_argument('--legacy', action='store_true',
help='Use the "legacy" Tesseract OCR engine (--oem 0). Despite the name, it most often results in better '
'results. It is not the default option, because it will only work if '
'your Tesseract installation includes the legacy *.traineddata files. You can download them at '
'https://github.com/tesseract-ocr/tesseract/wiki/Data-Files#data-files-for-version-400-november-29-2016')
args = parser.parse_args()
files = sorted(glob.glob(os.path.join(args.data_dir, '*.*')))
if args.limit >= 0:
files = files[0:args.limit]
logging.basicConfig(level=logging.INFO)
log = logging.getLogger("evaluate_mrz")
tic = time.time()
pool = multiprocessing.Pool(args.jobs)
log.info("Preparing computation for %d files from %s", len(files), args.data_dir)
log.info("Running %d workers", args.jobs)
results = []
save_roi = args.roi_dir is not None
for d in [args.success_dir, args.fail_dir, args.roi_dir]:
if d is not None and not os.path.isdir(d):
os.mkdir(d)
def valid_score(mrz_):
return 0 if mrz_ is None else mrz_.valid_score
def score_change_type(filename, mrz_):
try:
new_score = valid_score(mrz_)
old_score = int(os.path.basename(filename).split('_')[0])
schange = new_score - old_score
return '=' if schange == 0 else ('>' if schange > 0 else '<')
except Exception:
return '?'
method_stats = Counter()
extra_params = '--oem 0' if args.legacy else ''
for result in pool.imap_unordered(process_file, [(f, save_roi, extra_params) for f in files]):
filename, mrz_, walltime = result
results.append(result)
log.info("Processed %s in %0.2fs (score %d) [%s]", os.path.basename(filename), walltime, valid_score(mrz_), score_change_type(filename, mrz_))
log.debug("\t%s", mrz_)
vs = valid_score(mrz_)
if args.success_dir is not None and vs > 0:
shutil.copyfile(filename, os.path.join(args.success_dir, '%d_%s' % (vs, os.path.basename(filename))))
if args.fail_dir is not None and vs == 0:
shutil.copyfile(filename, os.path.join(args.fail_dir, '%d_%s' % (vs, os.path.basename(filename))))
if args.roi_dir is not None and mrz_ is not None and 'roi' in mrz_.aux:
roi_fn = '%d_roi_%s.png' % (vs, os.path.basename(filename))
io.imsave(os.path.join(args.roi_dir, roi_fn), mrz_.aux['roi'])
if vs > 0 and 'method' in mrz_.aux:
method_stats[mrz_.aux['method']] += 1
num_files = len(results)
score_changes = [score_change_type(fn, mrz_) for fn, mrz_, wt in results]
scores = [valid_score(mrz_) for fn, mrz_, wt in results]
num_perfect = scores.count(100)
num_invalid = scores.count(0)
total_score = sum(scores)
total_computation_walltime = sum([wt for fn, mrz_, wt in results])
total_walltime = time.time() - tic
log.info("Completed")
print("Walltime: %0.2fs" % total_walltime)
print("Compute walltime: %0.2fs" % total_computation_walltime)
print("Processed files: %d" % num_files)
print("Perfect parses: %d" % num_perfect)
print("Invalid parses: %d" % num_invalid)
print("Improved parses: %d" % len([x for x in score_changes if x == '>']))
print("Worsened parses: %d" % len([x for x in score_changes if x == '<']))
print("Total score: %d" % total_score)
print("Mean score: %0.2f" % (float(total_score)/num_files))
print("Mean compute time: %0.2fs" % (total_computation_walltime/num_files))
print("Methods used:")
for stat in method_stats.most_common():
print(" %s: %d" % stat)
def mrz():
"""
Command-line script for extracting MRZ from a given image
"""
parser = argparse.ArgumentParser(description='Run the MRZ OCR recognition algorithm on the given image.')
parser.add_argument('filename')
parser.add_argument('--json', action='store_true', help='Produce JSON (rather than tabular) output')
parser.add_argument('--legacy', action='store_true',
help='Use the "legacy" Tesseract OCR engine (--oem 0). Despite the name, it most often results in better '
'results. It is not the default option, because it will only work if '
'your Tesseract installation includes the legacy *.traineddata files. You can download them at '
'https://github.com/tesseract-ocr/tesseract/wiki/Data-Files#data-files-for-version-400-november-29-2016')
parser.add_argument('-r', '--save-roi', default=None,
help='Output the region of the image that is detected to contain the MRZ to the given png file')
parser.add_argument('--version', action='version', version='PassportEye MRZ v%s' % passporteye.__version__)
args = parser.parse_args()
try:
extra_params = '--oem 0' if args.legacy else ''
filename, mrz_, walltime = process_file((args.filename, args.save_roi is not None, extra_params))
except TesseractNotFoundError:
sys.stderr.write("ERROR: The tesseract executable was not found.\n"
"Please, make sure Tesseract is installed and the appropriate directory is included "
"in your PATH environment variable.\n")
sys.exit(1)
except TesseractError as ex:
sys.stderr.write("ERROR: %s" % ex.message)
sys.exit(ex.status)
d = mrz_.to_dict() if mrz_ is not None else {'mrz_type': None, 'valid': False, 'valid_score': 0}
d['walltime'] = walltime
d['filename'] = filename
if args.save_roi is not None and mrz_ is not None and 'roi' in mrz_.aux:
io.imsave(args.save_roi, mrz_.aux['roi'])
if not args.json:
for k in d:
print("%s\t%s" % (k, str(d[k])))
else:
print(json.dumps(d, indent=2))
|
konstantint/PassportEye | passporteye/mrz/scripts.py | evaluate_mrz | python | def evaluate_mrz():
parser = argparse.ArgumentParser(description='Run the MRZ OCR recognition algorithm on the sample test data, reporting the quality summary.')
parser.add_argument('-j', '--jobs', default=1, type=int, help='Number of parallel jobs to run')
parser.add_argument('-dd', '--data-dir', default=pkg_resources.resource_filename('passporteye.mrz', 'testdata'),
help='Read files from this directory instead of the package test files')
parser.add_argument('-sd', '--success-dir', default=None,
help='Copy files with successful (nonzero score) extraction results to this directory')
parser.add_argument('-fd', '--fail-dir', default=None,
help='Copy files with unsuccessful (zero score) extraction resutls to this directory')
parser.add_argument('-rd', '--roi-dir', default=None,
help='Extract ROIs to this directory')
parser.add_argument('-l', '--limit', default=-1, type=int, help='Only process the first <limit> files in the directory.')
parser.add_argument('--legacy', action='store_true',
help='Use the "legacy" Tesseract OCR engine (--oem 0). Despite the name, it most often results in better '
'results. It is not the default option, because it will only work if '
'your Tesseract installation includes the legacy *.traineddata files. You can download them at '
'https://github.com/tesseract-ocr/tesseract/wiki/Data-Files#data-files-for-version-400-november-29-2016')
args = parser.parse_args()
files = sorted(glob.glob(os.path.join(args.data_dir, '*.*')))
if args.limit >= 0:
files = files[0:args.limit]
logging.basicConfig(level=logging.INFO)
log = logging.getLogger("evaluate_mrz")
tic = time.time()
pool = multiprocessing.Pool(args.jobs)
log.info("Preparing computation for %d files from %s", len(files), args.data_dir)
log.info("Running %d workers", args.jobs)
results = []
save_roi = args.roi_dir is not None
for d in [args.success_dir, args.fail_dir, args.roi_dir]:
if d is not None and not os.path.isdir(d):
os.mkdir(d)
def valid_score(mrz_):
return 0 if mrz_ is None else mrz_.valid_score
def score_change_type(filename, mrz_):
try:
new_score = valid_score(mrz_)
old_score = int(os.path.basename(filename).split('_')[0])
schange = new_score - old_score
return '=' if schange == 0 else ('>' if schange > 0 else '<')
except Exception:
return '?'
method_stats = Counter()
extra_params = '--oem 0' if args.legacy else ''
for result in pool.imap_unordered(process_file, [(f, save_roi, extra_params) for f in files]):
filename, mrz_, walltime = result
results.append(result)
log.info("Processed %s in %0.2fs (score %d) [%s]", os.path.basename(filename), walltime, valid_score(mrz_), score_change_type(filename, mrz_))
log.debug("\t%s", mrz_)
vs = valid_score(mrz_)
if args.success_dir is not None and vs > 0:
shutil.copyfile(filename, os.path.join(args.success_dir, '%d_%s' % (vs, os.path.basename(filename))))
if args.fail_dir is not None and vs == 0:
shutil.copyfile(filename, os.path.join(args.fail_dir, '%d_%s' % (vs, os.path.basename(filename))))
if args.roi_dir is not None and mrz_ is not None and 'roi' in mrz_.aux:
roi_fn = '%d_roi_%s.png' % (vs, os.path.basename(filename))
io.imsave(os.path.join(args.roi_dir, roi_fn), mrz_.aux['roi'])
if vs > 0 and 'method' in mrz_.aux:
method_stats[mrz_.aux['method']] += 1
num_files = len(results)
score_changes = [score_change_type(fn, mrz_) for fn, mrz_, wt in results]
scores = [valid_score(mrz_) for fn, mrz_, wt in results]
num_perfect = scores.count(100)
num_invalid = scores.count(0)
total_score = sum(scores)
total_computation_walltime = sum([wt for fn, mrz_, wt in results])
total_walltime = time.time() - tic
log.info("Completed")
print("Walltime: %0.2fs" % total_walltime)
print("Compute walltime: %0.2fs" % total_computation_walltime)
print("Processed files: %d" % num_files)
print("Perfect parses: %d" % num_perfect)
print("Invalid parses: %d" % num_invalid)
print("Improved parses: %d" % len([x for x in score_changes if x == '>']))
print("Worsened parses: %d" % len([x for x in score_changes if x == '<']))
print("Total score: %d" % total_score)
print("Mean score: %0.2f" % (float(total_score)/num_files))
print("Mean compute time: %0.2fs" % (total_computation_walltime/num_files))
print("Methods used:")
for stat in method_stats.most_common():
print(" %s: %d" % stat) | A script for evaluating the current MRZ recognition pipeline by applying it to a list of files in a directory and reporting how well it went. | train | https://github.com/konstantint/PassportEye/blob/b32afba0f5dc4eb600c4edc4f49e5d49959c5415/passporteye/mrz/scripts.py#L39-L131 | [
"def valid_score(mrz_):\n return 0 if mrz_ is None else mrz_.valid_score\n",
"def score_change_type(filename, mrz_):\n try:\n new_score = valid_score(mrz_)\n old_score = int(os.path.basename(filename).split('_')[0])\n schange = new_score - old_score\n return '=' if schange == 0 e... | '''
PassportEye::MRZ: Machine-readable zone extraction and parsing.
Command-line scripts
Author: Konstantin Tretyakov
License: MIT
'''
import argparse
import glob
import json
import logging
import multiprocessing
import os
import shutil
import sys
import time
from collections import Counter
import pkg_resources
from skimage import io
from pytesseract.pytesseract import TesseractNotFoundError, TesseractError
import passporteye
from .image import read_mrz
def process_file(params):
"""
Processes a file and returns the parsed MRZ (or None if no candidate regions were even found).
The input argument is a list (filename, save_roi, extra_params).
(Because we need to use this function within imap_unordered)
"""
tic = time.time()
filename, save_roi, extra_params = params
result = read_mrz(filename, save_roi=save_roi, extra_cmdline_params=extra_params)
walltime = time.time() - tic
return (filename, result, walltime)
def mrz():
"""
Command-line script for extracting MRZ from a given image
"""
parser = argparse.ArgumentParser(description='Run the MRZ OCR recognition algorithm on the given image.')
parser.add_argument('filename')
parser.add_argument('--json', action='store_true', help='Produce JSON (rather than tabular) output')
parser.add_argument('--legacy', action='store_true',
help='Use the "legacy" Tesseract OCR engine (--oem 0). Despite the name, it most often results in better '
'results. It is not the default option, because it will only work if '
'your Tesseract installation includes the legacy *.traineddata files. You can download them at '
'https://github.com/tesseract-ocr/tesseract/wiki/Data-Files#data-files-for-version-400-november-29-2016')
parser.add_argument('-r', '--save-roi', default=None,
help='Output the region of the image that is detected to contain the MRZ to the given png file')
parser.add_argument('--version', action='version', version='PassportEye MRZ v%s' % passporteye.__version__)
args = parser.parse_args()
try:
extra_params = '--oem 0' if args.legacy else ''
filename, mrz_, walltime = process_file((args.filename, args.save_roi is not None, extra_params))
except TesseractNotFoundError:
sys.stderr.write("ERROR: The tesseract executable was not found.\n"
"Please, make sure Tesseract is installed and the appropriate directory is included "
"in your PATH environment variable.\n")
sys.exit(1)
except TesseractError as ex:
sys.stderr.write("ERROR: %s" % ex.message)
sys.exit(ex.status)
d = mrz_.to_dict() if mrz_ is not None else {'mrz_type': None, 'valid': False, 'valid_score': 0}
d['walltime'] = walltime
d['filename'] = filename
if args.save_roi is not None and mrz_ is not None and 'roi' in mrz_.aux:
io.imsave(args.save_roi, mrz_.aux['roi'])
if not args.json:
for k in d:
print("%s\t%s" % (k, str(d[k])))
else:
print(json.dumps(d, indent=2))
|
konstantint/PassportEye | passporteye/mrz/scripts.py | mrz | python | def mrz():
parser = argparse.ArgumentParser(description='Run the MRZ OCR recognition algorithm on the given image.')
parser.add_argument('filename')
parser.add_argument('--json', action='store_true', help='Produce JSON (rather than tabular) output')
parser.add_argument('--legacy', action='store_true',
help='Use the "legacy" Tesseract OCR engine (--oem 0). Despite the name, it most often results in better '
'results. It is not the default option, because it will only work if '
'your Tesseract installation includes the legacy *.traineddata files. You can download them at '
'https://github.com/tesseract-ocr/tesseract/wiki/Data-Files#data-files-for-version-400-november-29-2016')
parser.add_argument('-r', '--save-roi', default=None,
help='Output the region of the image that is detected to contain the MRZ to the given png file')
parser.add_argument('--version', action='version', version='PassportEye MRZ v%s' % passporteye.__version__)
args = parser.parse_args()
try:
extra_params = '--oem 0' if args.legacy else ''
filename, mrz_, walltime = process_file((args.filename, args.save_roi is not None, extra_params))
except TesseractNotFoundError:
sys.stderr.write("ERROR: The tesseract executable was not found.\n"
"Please, make sure Tesseract is installed and the appropriate directory is included "
"in your PATH environment variable.\n")
sys.exit(1)
except TesseractError as ex:
sys.stderr.write("ERROR: %s" % ex.message)
sys.exit(ex.status)
d = mrz_.to_dict() if mrz_ is not None else {'mrz_type': None, 'valid': False, 'valid_score': 0}
d['walltime'] = walltime
d['filename'] = filename
if args.save_roi is not None and mrz_ is not None and 'roi' in mrz_.aux:
io.imsave(args.save_roi, mrz_.aux['roi'])
if not args.json:
for k in d:
print("%s\t%s" % (k, str(d[k])))
else:
print(json.dumps(d, indent=2)) | Command-line script for extracting MRZ from a given image | train | https://github.com/konstantint/PassportEye/blob/b32afba0f5dc4eb600c4edc4f49e5d49959c5415/passporteye/mrz/scripts.py#L134-L174 | [
"def process_file(params):\n \"\"\"\n Processes a file and returns the parsed MRZ (or None if no candidate regions were even found).\n\n The input argument is a list (filename, save_roi, extra_params).\n (Because we need to use this function within imap_unordered)\n \"\"\"\n tic = time.time()\n ... | '''
PassportEye::MRZ: Machine-readable zone extraction and parsing.
Command-line scripts
Author: Konstantin Tretyakov
License: MIT
'''
import argparse
import glob
import json
import logging
import multiprocessing
import os
import shutil
import sys
import time
from collections import Counter
import pkg_resources
from skimage import io
from pytesseract.pytesseract import TesseractNotFoundError, TesseractError
import passporteye
from .image import read_mrz
def process_file(params):
"""
Processes a file and returns the parsed MRZ (or None if no candidate regions were even found).
The input argument is a list (filename, save_roi, extra_params).
(Because we need to use this function within imap_unordered)
"""
tic = time.time()
filename, save_roi, extra_params = params
result = read_mrz(filename, save_roi=save_roi, extra_cmdline_params=extra_params)
walltime = time.time() - tic
return (filename, result, walltime)
def evaluate_mrz():
"""
A script for evaluating the current MRZ recognition pipeline by applying it to a list of files in a directory and reporting how well it went.
"""
parser = argparse.ArgumentParser(description='Run the MRZ OCR recognition algorithm on the sample test data, reporting the quality summary.')
parser.add_argument('-j', '--jobs', default=1, type=int, help='Number of parallel jobs to run')
parser.add_argument('-dd', '--data-dir', default=pkg_resources.resource_filename('passporteye.mrz', 'testdata'),
help='Read files from this directory instead of the package test files')
parser.add_argument('-sd', '--success-dir', default=None,
help='Copy files with successful (nonzero score) extraction results to this directory')
parser.add_argument('-fd', '--fail-dir', default=None,
help='Copy files with unsuccessful (zero score) extraction resutls to this directory')
parser.add_argument('-rd', '--roi-dir', default=None,
help='Extract ROIs to this directory')
parser.add_argument('-l', '--limit', default=-1, type=int, help='Only process the first <limit> files in the directory.')
parser.add_argument('--legacy', action='store_true',
help='Use the "legacy" Tesseract OCR engine (--oem 0). Despite the name, it most often results in better '
'results. It is not the default option, because it will only work if '
'your Tesseract installation includes the legacy *.traineddata files. You can download them at '
'https://github.com/tesseract-ocr/tesseract/wiki/Data-Files#data-files-for-version-400-november-29-2016')
args = parser.parse_args()
files = sorted(glob.glob(os.path.join(args.data_dir, '*.*')))
if args.limit >= 0:
files = files[0:args.limit]
logging.basicConfig(level=logging.INFO)
log = logging.getLogger("evaluate_mrz")
tic = time.time()
pool = multiprocessing.Pool(args.jobs)
log.info("Preparing computation for %d files from %s", len(files), args.data_dir)
log.info("Running %d workers", args.jobs)
results = []
save_roi = args.roi_dir is not None
for d in [args.success_dir, args.fail_dir, args.roi_dir]:
if d is not None and not os.path.isdir(d):
os.mkdir(d)
def valid_score(mrz_):
return 0 if mrz_ is None else mrz_.valid_score
def score_change_type(filename, mrz_):
try:
new_score = valid_score(mrz_)
old_score = int(os.path.basename(filename).split('_')[0])
schange = new_score - old_score
return '=' if schange == 0 else ('>' if schange > 0 else '<')
except Exception:
return '?'
method_stats = Counter()
extra_params = '--oem 0' if args.legacy else ''
for result in pool.imap_unordered(process_file, [(f, save_roi, extra_params) for f in files]):
filename, mrz_, walltime = result
results.append(result)
log.info("Processed %s in %0.2fs (score %d) [%s]", os.path.basename(filename), walltime, valid_score(mrz_), score_change_type(filename, mrz_))
log.debug("\t%s", mrz_)
vs = valid_score(mrz_)
if args.success_dir is not None and vs > 0:
shutil.copyfile(filename, os.path.join(args.success_dir, '%d_%s' % (vs, os.path.basename(filename))))
if args.fail_dir is not None and vs == 0:
shutil.copyfile(filename, os.path.join(args.fail_dir, '%d_%s' % (vs, os.path.basename(filename))))
if args.roi_dir is not None and mrz_ is not None and 'roi' in mrz_.aux:
roi_fn = '%d_roi_%s.png' % (vs, os.path.basename(filename))
io.imsave(os.path.join(args.roi_dir, roi_fn), mrz_.aux['roi'])
if vs > 0 and 'method' in mrz_.aux:
method_stats[mrz_.aux['method']] += 1
num_files = len(results)
score_changes = [score_change_type(fn, mrz_) for fn, mrz_, wt in results]
scores = [valid_score(mrz_) for fn, mrz_, wt in results]
num_perfect = scores.count(100)
num_invalid = scores.count(0)
total_score = sum(scores)
total_computation_walltime = sum([wt for fn, mrz_, wt in results])
total_walltime = time.time() - tic
log.info("Completed")
print("Walltime: %0.2fs" % total_walltime)
print("Compute walltime: %0.2fs" % total_computation_walltime)
print("Processed files: %d" % num_files)
print("Perfect parses: %d" % num_perfect)
print("Invalid parses: %d" % num_invalid)
print("Improved parses: %d" % len([x for x in score_changes if x == '>']))
print("Worsened parses: %d" % len([x for x in score_changes if x == '<']))
print("Total score: %d" % total_score)
print("Mean score: %0.2f" % (float(total_score)/num_files))
print("Mean compute time: %0.2fs" % (total_computation_walltime/num_files))
print("Methods used:")
for stat in method_stats.most_common():
print(" %s: %d" % stat)
def mrz():
"""
Command-line script for extracting MRZ from a given image
"""
parser = argparse.ArgumentParser(description='Run the MRZ OCR recognition algorithm on the given image.')
parser.add_argument('filename')
parser.add_argument('--json', action='store_true', help='Produce JSON (rather than tabular) output')
parser.add_argument('--legacy', action='store_true',
help='Use the "legacy" Tesseract OCR engine (--oem 0). Despite the name, it most often results in better '
'results. It is not the default option, because it will only work if '
'your Tesseract installation includes the legacy *.traineddata files. You can download them at '
'https://github.com/tesseract-ocr/tesseract/wiki/Data-Files#data-files-for-version-400-november-29-2016')
parser.add_argument('-r', '--save-roi', default=None,
help='Output the region of the image that is detected to contain the MRZ to the given png file')
parser.add_argument('--version', action='version', version='PassportEye MRZ v%s' % passporteye.__version__)
args = parser.parse_args()
try:
extra_params = '--oem 0' if args.legacy else ''
filename, mrz_, walltime = process_file((args.filename, args.save_roi is not None, extra_params))
except TesseractNotFoundError:
sys.stderr.write("ERROR: The tesseract executable was not found.\n"
"Please, make sure Tesseract is installed and the appropriate directory is included "
"in your PATH environment variable.\n")
sys.exit(1)
except TesseractError as ex:
sys.stderr.write("ERROR: %s" % ex.message)
sys.exit(ex.status)
d = mrz_.to_dict() if mrz_ is not None else {'mrz_type': None, 'valid': False, 'valid_score': 0}
d['walltime'] = walltime
d['filename'] = filename
if args.save_roi is not None and mrz_ is not None and 'roi' in mrz_.aux:
io.imsave(args.save_roi, mrz_.aux['roi'])
if not args.json:
for k in d:
print("%s\t%s" % (k, str(d[k])))
else:
print(json.dumps(d, indent=2))
|
maximtrp/scikit-posthocs | scikit_posthocs/_outliers.py | outliers_iqr | python | def outliers_iqr(x, ret='filtered', coef = 1.5):
x = np.asarray(x)
q1, q3 = np.percentile(x, [25, 75])
iqr = q3 - q1
ll = q1 - iqr * coef
ul = q3 + iqr * coef
if ret == 'indices':
return np.where((x >= ll) & (x <= ul))[0]
elif ret == 'outliers':
return x[(x < ll) | (x > ul)]
elif ret == 'outliers_indices':
return np.where((x < ll) | (x > ul))[0]
else:
return x[(x >= ll) & (x <= ul)] | Simple detection of potential outliers based on interquartile range (IQR).
Data that lie within the lower and upper limits are considered
non-outliers. The lower limit is the number that lies 1.5 IQRs below
(coefficient may be changed with an argument, see Parameters)
the first quartile; the upper limit is the number that lies 1.5 IQRs
above the third quartile.
Parameters
----------
x : array_like or ndarray, 1d
An array, any object exposing the array interface, containing
p values.
ret : str, optional
Specifies object to be returned. Available options are:
'filtered' : return a filtered array (default)
'outliers' : return outliers
'indices' : return indices of non-outliers
'outliers_indices' : return indices of outliers
coef : float, optional
Coefficient by which IQR is multiplied. Default is 1.5.
Returns
-------
Numpy array where 0 is False (not significant), 1 is True (significant),
and -1 is for diagonal elements.
Examples
--------
>>> x = np.array([4,5,6,10,12,4,3,1,2,3,23,5,3])
>>> outliers_iqr(x, ret = 'outliers')
array([12, 23]) | train | https://github.com/maximtrp/scikit-posthocs/blob/5476b09e2a325cd4e31c0b0bc6906ab5cd77fc5d/scikit_posthocs/_outliers.py#L6-L60 | null | # -*- coding: utf-8 -*-
import numpy as np
from scipy.stats import t
def outliers_grubbs(x, hypo = False, alpha = 0.05):
"""
Grubbs' Test for Outliers [1]_. This is the two-sided version of the test.
The null hypothesis implies that there are no outliers in the data set.
Parameters
----------
x : array_like or ndarray, 1d
An array, any object exposing the array interface, containing
data to test for an outlier in.
hypo : bool, optional
Specifies whether to return a bool value of a hypothesis test result.
Returns True when we can reject the null hypothesis. Otherwise, False.
Available options are:
1) True - return a hypothesis test result
2) False - return a filtered array without an outlier (default)
alpha : float, optional
Significance level for a hypothesis test. Default is 0.05.
Returns
-------
Numpy array if hypo is False or a bool value of a hypothesis test result.
Notes
-----
.. [1] http://www.itl.nist.gov/div898/handbook/eda/section3/eda35h1.htm
Examples
--------
>>> x = np.array([199.31,199.53,200.19,200.82,201.92,201.95,202.18,245.57])
>>> ph.outliers_grubbs(x)
array([ 199.31, 199.53, 200.19, 200.82, 201.92, 201.95, 202.18])
"""
val = np.max(np.abs(x - np.mean(x)))
ind = np.argmax(np.abs(x - np.mean(x)))
G = val / np.std(x, ddof=1)
N = len(x)
result = G > (N - 1)/np.sqrt(N) * np.sqrt((t.ppf(1-alpha/(2*N), N-2) ** 2) / (N - 2 + t.ppf(1-alpha/(2*N), N-2) ** 2 ))
if hypo:
return result
else:
if result:
return np.delete(x, ind)
else:
return x
def outliers_tietjen(x, k, hypo = False, alpha = 0.05):
"""
Tietjen-Moore test [1]_ to detect multiple outliers in a univariate
data set that follows an approximately normal distribution.
The Tietjen-Moore test [2]_ is a generalization of the Grubbs' test to
the case of multiple outliers. If testing for a single outlier,
the Tietjen-Moore test is equivalent to the Grubbs' test.
The null hypothesis implies that there are no outliers in the data set.
Parameters
----------
x : array_like or ndarray, 1d
An array, any object exposing the array interface, containing
data to test for an outlier in.
k : int
Number of potential outliers to test for. Function tests for
outliers in both tails.
hypo : bool, optional
Specifies whether to return a bool value of a hypothesis test result.
Returns True when we can reject the null hypothesis. Otherwise, False.
Available options are:
1) True - return a hypothesis test result
2) False - return a filtered array without outliers (default)
alpha : float, optional
Significance level for a hypothesis test. Default is 0.05.
Returns
-------
Numpy array if hypo is False or a bool value of a hypothesis test result.
Notes
-----
.. [1] Tietjen and Moore (August 1972), Some Grubbs-Type Statistics
for the Detection of Outliers, Technometrics, 14(3), pp. 583-597.
.. [2] http://www.itl.nist.gov/div898/handbook/eda/section3/eda35h2.htm
Examples
--------
>>> x = np.array([-1.40, -0.44, -0.30, -0.24, -0.22, -0.13, -0.05, 0.06,
0.10, 0.18, 0.20, 0.39, 0.48, 0.63, 1.01])
>>> outliers_tietjen(x, 2)
array([-0.44, -0.3 , -0.24, -0.22, -0.13, -0.05, 0.06, 0.1 , 0.18,
0.2 , 0.39, 0.48, 0.63])
"""
n = x.size
def tietjen(x_, k_):
x_mean = x_.mean()
r = np.abs(x_ - x_mean)
z = x_[r.argsort()]
E = np.sum((z[:-k_] - z[:-k_].mean()) ** 2) / np.sum((z - x_mean) ** 2)
return E
E_x = tietjen(x, k)
E_norm = np.zeros(10000)
for i in np.arange(10000):
norm = np.random.normal(size=n)
E_norm[i] = tietjen(norm, k)
CV = np.percentile(E_norm, alpha * 100)
result = E_x < CV
if hypo:
return result
else:
if result:
ind = np.argpartition(np.abs(x - x.mean()), -k)[-k:]
return np.delete(x, ind)
else:
return x
def outliers_gesd(data, outliers = 5, report = False, alpha=0.05):
"""
The generalized (Extreme Studentized Deviate) ESD test is used
to detect one or more outliers in a univariate data set that follows
an approximately normal distribution [1]_.
Parameters
----------
data : array_like or ndarray, 1d
An array, any object exposing the array interface, containing
data to test for outliers.
outliers : int, optional
Number of potential outliers to test for. Test is two-tailed, i.e.
maximum and minimum values are checked for potential outliers.
report : bool, optional
Specifies whether to return a summary table of the test.
Available options are:
1) True - return a summary table
2) False - return the array with outliers removed. (default)
alpha : float, optional
Significance level for a hypothesis test. Default is 0.05.
Returns
-------
Numpy array if hypo is False or a bool value of a hypothesis test result.
Notes
-----
.. [1] Rosner, Bernard (May 1983), Percentage Points for a Generalized
ESD Many-Outlier Procedure,Technometrics, 25(2), pp. 165-172.
Examples
--------
>>> data = np.array([-0.25, 0.68, 0.94, 1.15, 1.2, 1.26, 1.26, 1.34, 1.38, 1.43, 1.49, 1.49, 1.55, 1.56, 1.58, 1.65, 1.69, 1.7, 1.76, 1.77, 1.81, 1.91, 1.94, 1.96, 1.99, 2.06, 2.09, 2.1, 2.14, 2.15, 2.23, 2.24, 2.26, 2.35, 2.37, 2.4, 2.47, 2.54, 2.62, 2.64, 2.9, 2.92, 2.92, 2.93, 3.21, 3.26, 3.3, 3.59, 3.68, 4.3, 4.64, 5.34, 5.42, 6.01])
>>> outliers_gesd(data, 5)
array([-0.25, 0.68, 0.94, 1.15, 1.2 , 1.26, 1.26, 1.34, 1.38,
1.43, 1.49, 1.49, 1.55, 1.56, 1.58, 1.65, 1.69, 1.7 ,
1.76, 1.77, 1.81, 1.91, 1.94, 1.96, 1.99, 2.06, 2.09,
2.1 , 2.14, 2.15, 2.23, 2.24, 2.26, 2.35, 2.37, 2.4 ,
2.47, 2.54, 2.62, 2.64, 2.9 , 2.92, 2.92, 2.93, 3.21,
3.26, 3.3 , 3.59, 3.68, 4.3 , 4.64])
>>> outliers_gesd(data, outliers = 5, report = True)
H0: no outliers in the data
Ha: up to 5 outliers in the data
Significance level: α = 0.05
Reject H0 if Ri > Critical Value (λi)
Summary Table for Two-Tailed Test
---------------------------------------
Exact Test Critical
Number of Statistic Value, λi
Outliers, i Value, Ri 5 %
---------------------------------------
1 3.119 3.159
2 2.943 3.151
3 3.179 3.144 *
4 2.81 3.136
5 2.816 3.128
"""
Rs, ls = np.zeros(outliers, dtype = np.float), np.zeros(outliers, dtype = np.float)
ms = []
data = np.sort(np.array(data))
data_proc = np.copy(data)
n = data_proc.size
mean = np.mean(data_proc)
for i in np.arange(outliers):
abs_d = np.abs(data_proc - np.mean(data_proc))
# R-value calculation
R = np.max(abs_d) / np.std(data_proc, ddof=1)
Rs[i] = R
# Masked values
lms = ms[-1] if len(ms) > 0 else []
ms.append(lms + [np.argmax(abs_d)])
# Lambdas calculation
p = 1 - alpha / (2 * (n - i))
df = n - i - 2
t_ppr = t.ppf(p, df)
lambd = ((n - i - 1) * t_ppr) / np.sqrt((n - i - 2 + t_ppr**2) * (n - i))
ls[i] = lambd
# Remove the observation that maximizes |xi − xmean|
data_proc = np.delete(data_proc, np.argmax(abs_d))
if report:
report = ["H0: no outliers in the data",
"Ha: up to " + str(outliers) + " outliers in the data",
"Significance level: α = " + str(alpha),
"Reject H0 if Ri > Critical Value (λi)", "",
"Summary Table for Two-Tailed Test",
"---------------------------------------",
" Exact Test Critical",
" Number of Statistic Value, λi",
"Outliers, i Value, Ri 5 %",
"---------------------------------------"]
for i, (r, l) in enumerate(zip(Rs, ls)):
report.append('{: >11s}'.format(str(i+1)) + \
'{: >15s}'.format(str(np.round(r, 3))) + \
'{: >13s}'.format(str(np.round(l, 3))) + (" *" if r > l else ""))
print("\n".join(report))
else:
# Remove masked values
# for which the test statistic is greater
# than the critical value and return the result
if any(Rs > ls):
data = np.delete(data, ms[np.max(np.where(Rs > ls))])
return data
|
maximtrp/scikit-posthocs | scikit_posthocs/_outliers.py | outliers_grubbs | python | def outliers_grubbs(x, hypo = False, alpha = 0.05):
val = np.max(np.abs(x - np.mean(x)))
ind = np.argmax(np.abs(x - np.mean(x)))
G = val / np.std(x, ddof=1)
N = len(x)
result = G > (N - 1)/np.sqrt(N) * np.sqrt((t.ppf(1-alpha/(2*N), N-2) ** 2) / (N - 2 + t.ppf(1-alpha/(2*N), N-2) ** 2 ))
if hypo:
return result
else:
if result:
return np.delete(x, ind)
else:
return x | Grubbs' Test for Outliers [1]_. This is the two-sided version of the test.
The null hypothesis implies that there are no outliers in the data set.
Parameters
----------
x : array_like or ndarray, 1d
An array, any object exposing the array interface, containing
data to test for an outlier in.
hypo : bool, optional
Specifies whether to return a bool value of a hypothesis test result.
Returns True when we can reject the null hypothesis. Otherwise, False.
Available options are:
1) True - return a hypothesis test result
2) False - return a filtered array without an outlier (default)
alpha : float, optional
Significance level for a hypothesis test. Default is 0.05.
Returns
-------
Numpy array if hypo is False or a bool value of a hypothesis test result.
Notes
-----
.. [1] http://www.itl.nist.gov/div898/handbook/eda/section3/eda35h1.htm
Examples
--------
>>> x = np.array([199.31,199.53,200.19,200.82,201.92,201.95,202.18,245.57])
>>> ph.outliers_grubbs(x)
array([ 199.31, 199.53, 200.19, 200.82, 201.92, 201.95, 202.18]) | train | https://github.com/maximtrp/scikit-posthocs/blob/5476b09e2a325cd4e31c0b0bc6906ab5cd77fc5d/scikit_posthocs/_outliers.py#L62-L113 | null | # -*- coding: utf-8 -*-
import numpy as np
from scipy.stats import t
def outliers_iqr(x, ret='filtered', coef = 1.5):
"""
Simple detection of potential outliers based on interquartile range (IQR).
Data that lie within the lower and upper limits are considered
non-outliers. The lower limit is the number that lies 1.5 IQRs below
(coefficient may be changed with an argument, see Parameters)
the first quartile; the upper limit is the number that lies 1.5 IQRs
above the third quartile.
Parameters
----------
x : array_like or ndarray, 1d
An array, any object exposing the array interface, containing
p values.
ret : str, optional
Specifies object to be returned. Available options are:
'filtered' : return a filtered array (default)
'outliers' : return outliers
'indices' : return indices of non-outliers
'outliers_indices' : return indices of outliers
coef : float, optional
Coefficient by which IQR is multiplied. Default is 1.5.
Returns
-------
Numpy array where 0 is False (not significant), 1 is True (significant),
and -1 is for diagonal elements.
Examples
--------
>>> x = np.array([4,5,6,10,12,4,3,1,2,3,23,5,3])
>>> outliers_iqr(x, ret = 'outliers')
array([12, 23])
"""
x = np.asarray(x)
q1, q3 = np.percentile(x, [25, 75])
iqr = q3 - q1
ll = q1 - iqr * coef
ul = q3 + iqr * coef
if ret == 'indices':
return np.where((x >= ll) & (x <= ul))[0]
elif ret == 'outliers':
return x[(x < ll) | (x > ul)]
elif ret == 'outliers_indices':
return np.where((x < ll) | (x > ul))[0]
else:
return x[(x >= ll) & (x <= ul)]
def outliers_tietjen(x, k, hypo = False, alpha = 0.05):
"""
Tietjen-Moore test [1]_ to detect multiple outliers in a univariate
data set that follows an approximately normal distribution.
The Tietjen-Moore test [2]_ is a generalization of the Grubbs' test to
the case of multiple outliers. If testing for a single outlier,
the Tietjen-Moore test is equivalent to the Grubbs' test.
The null hypothesis implies that there are no outliers in the data set.
Parameters
----------
x : array_like or ndarray, 1d
An array, any object exposing the array interface, containing
data to test for an outlier in.
k : int
Number of potential outliers to test for. Function tests for
outliers in both tails.
hypo : bool, optional
Specifies whether to return a bool value of a hypothesis test result.
Returns True when we can reject the null hypothesis. Otherwise, False.
Available options are:
1) True - return a hypothesis test result
2) False - return a filtered array without outliers (default)
alpha : float, optional
Significance level for a hypothesis test. Default is 0.05.
Returns
-------
Numpy array if hypo is False or a bool value of a hypothesis test result.
Notes
-----
.. [1] Tietjen and Moore (August 1972), Some Grubbs-Type Statistics
for the Detection of Outliers, Technometrics, 14(3), pp. 583-597.
.. [2] http://www.itl.nist.gov/div898/handbook/eda/section3/eda35h2.htm
Examples
--------
>>> x = np.array([-1.40, -0.44, -0.30, -0.24, -0.22, -0.13, -0.05, 0.06,
0.10, 0.18, 0.20, 0.39, 0.48, 0.63, 1.01])
>>> outliers_tietjen(x, 2)
array([-0.44, -0.3 , -0.24, -0.22, -0.13, -0.05, 0.06, 0.1 , 0.18,
0.2 , 0.39, 0.48, 0.63])
"""
n = x.size
def tietjen(x_, k_):
x_mean = x_.mean()
r = np.abs(x_ - x_mean)
z = x_[r.argsort()]
E = np.sum((z[:-k_] - z[:-k_].mean()) ** 2) / np.sum((z - x_mean) ** 2)
return E
E_x = tietjen(x, k)
E_norm = np.zeros(10000)
for i in np.arange(10000):
norm = np.random.normal(size=n)
E_norm[i] = tietjen(norm, k)
CV = np.percentile(E_norm, alpha * 100)
result = E_x < CV
if hypo:
return result
else:
if result:
ind = np.argpartition(np.abs(x - x.mean()), -k)[-k:]
return np.delete(x, ind)
else:
return x
def outliers_gesd(data, outliers = 5, report = False, alpha=0.05):
"""
The generalized (Extreme Studentized Deviate) ESD test is used
to detect one or more outliers in a univariate data set that follows
an approximately normal distribution [1]_.
Parameters
----------
data : array_like or ndarray, 1d
An array, any object exposing the array interface, containing
data to test for outliers.
outliers : int, optional
Number of potential outliers to test for. Test is two-tailed, i.e.
maximum and minimum values are checked for potential outliers.
report : bool, optional
Specifies whether to return a summary table of the test.
Available options are:
1) True - return a summary table
2) False - return the array with outliers removed. (default)
alpha : float, optional
Significance level for a hypothesis test. Default is 0.05.
Returns
-------
Numpy array if hypo is False or a bool value of a hypothesis test result.
Notes
-----
.. [1] Rosner, Bernard (May 1983), Percentage Points for a Generalized
ESD Many-Outlier Procedure,Technometrics, 25(2), pp. 165-172.
Examples
--------
>>> data = np.array([-0.25, 0.68, 0.94, 1.15, 1.2, 1.26, 1.26, 1.34, 1.38, 1.43, 1.49, 1.49, 1.55, 1.56, 1.58, 1.65, 1.69, 1.7, 1.76, 1.77, 1.81, 1.91, 1.94, 1.96, 1.99, 2.06, 2.09, 2.1, 2.14, 2.15, 2.23, 2.24, 2.26, 2.35, 2.37, 2.4, 2.47, 2.54, 2.62, 2.64, 2.9, 2.92, 2.92, 2.93, 3.21, 3.26, 3.3, 3.59, 3.68, 4.3, 4.64, 5.34, 5.42, 6.01])
>>> outliers_gesd(data, 5)
array([-0.25, 0.68, 0.94, 1.15, 1.2 , 1.26, 1.26, 1.34, 1.38,
1.43, 1.49, 1.49, 1.55, 1.56, 1.58, 1.65, 1.69, 1.7 ,
1.76, 1.77, 1.81, 1.91, 1.94, 1.96, 1.99, 2.06, 2.09,
2.1 , 2.14, 2.15, 2.23, 2.24, 2.26, 2.35, 2.37, 2.4 ,
2.47, 2.54, 2.62, 2.64, 2.9 , 2.92, 2.92, 2.93, 3.21,
3.26, 3.3 , 3.59, 3.68, 4.3 , 4.64])
>>> outliers_gesd(data, outliers = 5, report = True)
H0: no outliers in the data
Ha: up to 5 outliers in the data
Significance level: α = 0.05
Reject H0 if Ri > Critical Value (λi)
Summary Table for Two-Tailed Test
---------------------------------------
Exact Test Critical
Number of Statistic Value, λi
Outliers, i Value, Ri 5 %
---------------------------------------
1 3.119 3.159
2 2.943 3.151
3 3.179 3.144 *
4 2.81 3.136
5 2.816 3.128
"""
Rs, ls = np.zeros(outliers, dtype = np.float), np.zeros(outliers, dtype = np.float)
ms = []
data = np.sort(np.array(data))
data_proc = np.copy(data)
n = data_proc.size
mean = np.mean(data_proc)
for i in np.arange(outliers):
abs_d = np.abs(data_proc - np.mean(data_proc))
# R-value calculation
R = np.max(abs_d) / np.std(data_proc, ddof=1)
Rs[i] = R
# Masked values
lms = ms[-1] if len(ms) > 0 else []
ms.append(lms + [np.argmax(abs_d)])
# Lambdas calculation
p = 1 - alpha / (2 * (n - i))
df = n - i - 2
t_ppr = t.ppf(p, df)
lambd = ((n - i - 1) * t_ppr) / np.sqrt((n - i - 2 + t_ppr**2) * (n - i))
ls[i] = lambd
# Remove the observation that maximizes |xi − xmean|
data_proc = np.delete(data_proc, np.argmax(abs_d))
if report:
report = ["H0: no outliers in the data",
"Ha: up to " + str(outliers) + " outliers in the data",
"Significance level: α = " + str(alpha),
"Reject H0 if Ri > Critical Value (λi)", "",
"Summary Table for Two-Tailed Test",
"---------------------------------------",
" Exact Test Critical",
" Number of Statistic Value, λi",
"Outliers, i Value, Ri 5 %",
"---------------------------------------"]
for i, (r, l) in enumerate(zip(Rs, ls)):
report.append('{: >11s}'.format(str(i+1)) + \
'{: >15s}'.format(str(np.round(r, 3))) + \
'{: >13s}'.format(str(np.round(l, 3))) + (" *" if r > l else ""))
print("\n".join(report))
else:
# Remove masked values
# for which the test statistic is greater
# than the critical value and return the result
if any(Rs > ls):
data = np.delete(data, ms[np.max(np.where(Rs > ls))])
return data
|
maximtrp/scikit-posthocs | scikit_posthocs/_outliers.py | outliers_tietjen | python | def outliers_tietjen(x, k, hypo = False, alpha = 0.05):
n = x.size
def tietjen(x_, k_):
x_mean = x_.mean()
r = np.abs(x_ - x_mean)
z = x_[r.argsort()]
E = np.sum((z[:-k_] - z[:-k_].mean()) ** 2) / np.sum((z - x_mean) ** 2)
return E
E_x = tietjen(x, k)
E_norm = np.zeros(10000)
for i in np.arange(10000):
norm = np.random.normal(size=n)
E_norm[i] = tietjen(norm, k)
CV = np.percentile(E_norm, alpha * 100)
result = E_x < CV
if hypo:
return result
else:
if result:
ind = np.argpartition(np.abs(x - x.mean()), -k)[-k:]
return np.delete(x, ind)
else:
return x | Tietjen-Moore test [1]_ to detect multiple outliers in a univariate
data set that follows an approximately normal distribution.
The Tietjen-Moore test [2]_ is a generalization of the Grubbs' test to
the case of multiple outliers. If testing for a single outlier,
the Tietjen-Moore test is equivalent to the Grubbs' test.
The null hypothesis implies that there are no outliers in the data set.
Parameters
----------
x : array_like or ndarray, 1d
An array, any object exposing the array interface, containing
data to test for an outlier in.
k : int
Number of potential outliers to test for. Function tests for
outliers in both tails.
hypo : bool, optional
Specifies whether to return a bool value of a hypothesis test result.
Returns True when we can reject the null hypothesis. Otherwise, False.
Available options are:
1) True - return a hypothesis test result
2) False - return a filtered array without outliers (default)
alpha : float, optional
Significance level for a hypothesis test. Default is 0.05.
Returns
-------
Numpy array if hypo is False or a bool value of a hypothesis test result.
Notes
-----
.. [1] Tietjen and Moore (August 1972), Some Grubbs-Type Statistics
for the Detection of Outliers, Technometrics, 14(3), pp. 583-597.
.. [2] http://www.itl.nist.gov/div898/handbook/eda/section3/eda35h2.htm
Examples
--------
>>> x = np.array([-1.40, -0.44, -0.30, -0.24, -0.22, -0.13, -0.05, 0.06,
0.10, 0.18, 0.20, 0.39, 0.48, 0.63, 1.01])
>>> outliers_tietjen(x, 2)
array([-0.44, -0.3 , -0.24, -0.22, -0.13, -0.05, 0.06, 0.1 , 0.18,
0.2 , 0.39, 0.48, 0.63]) | train | https://github.com/maximtrp/scikit-posthocs/blob/5476b09e2a325cd4e31c0b0bc6906ab5cd77fc5d/scikit_posthocs/_outliers.py#L115-L191 | [
"def tietjen(x_, k_):\n x_mean = x_.mean()\n r = np.abs(x_ - x_mean)\n z = x_[r.argsort()]\n E = np.sum((z[:-k_] - z[:-k_].mean()) ** 2) / np.sum((z - x_mean) ** 2)\n return E\n"
] | # -*- coding: utf-8 -*-
import numpy as np
from scipy.stats import t
def outliers_iqr(x, ret='filtered', coef = 1.5):
"""
Simple detection of potential outliers based on interquartile range (IQR).
Data that lie within the lower and upper limits are considered
non-outliers. The lower limit is the number that lies 1.5 IQRs below
(coefficient may be changed with an argument, see Parameters)
the first quartile; the upper limit is the number that lies 1.5 IQRs
above the third quartile.
Parameters
----------
x : array_like or ndarray, 1d
An array, any object exposing the array interface, containing
p values.
ret : str, optional
Specifies object to be returned. Available options are:
'filtered' : return a filtered array (default)
'outliers' : return outliers
'indices' : return indices of non-outliers
'outliers_indices' : return indices of outliers
coef : float, optional
Coefficient by which IQR is multiplied. Default is 1.5.
Returns
-------
Numpy array where 0 is False (not significant), 1 is True (significant),
and -1 is for diagonal elements.
Examples
--------
>>> x = np.array([4,5,6,10,12,4,3,1,2,3,23,5,3])
>>> outliers_iqr(x, ret = 'outliers')
array([12, 23])
"""
x = np.asarray(x)
q1, q3 = np.percentile(x, [25, 75])
iqr = q3 - q1
ll = q1 - iqr * coef
ul = q3 + iqr * coef
if ret == 'indices':
return np.where((x >= ll) & (x <= ul))[0]
elif ret == 'outliers':
return x[(x < ll) | (x > ul)]
elif ret == 'outliers_indices':
return np.where((x < ll) | (x > ul))[0]
else:
return x[(x >= ll) & (x <= ul)]
def outliers_grubbs(x, hypo = False, alpha = 0.05):
"""
Grubbs' Test for Outliers [1]_. This is the two-sided version of the test.
The null hypothesis implies that there are no outliers in the data set.
Parameters
----------
x : array_like or ndarray, 1d
An array, any object exposing the array interface, containing
data to test for an outlier in.
hypo : bool, optional
Specifies whether to return a bool value of a hypothesis test result.
Returns True when we can reject the null hypothesis. Otherwise, False.
Available options are:
1) True - return a hypothesis test result
2) False - return a filtered array without an outlier (default)
alpha : float, optional
Significance level for a hypothesis test. Default is 0.05.
Returns
-------
Numpy array if hypo is False or a bool value of a hypothesis test result.
Notes
-----
.. [1] http://www.itl.nist.gov/div898/handbook/eda/section3/eda35h1.htm
Examples
--------
>>> x = np.array([199.31,199.53,200.19,200.82,201.92,201.95,202.18,245.57])
>>> ph.outliers_grubbs(x)
array([ 199.31, 199.53, 200.19, 200.82, 201.92, 201.95, 202.18])
"""
val = np.max(np.abs(x - np.mean(x)))
ind = np.argmax(np.abs(x - np.mean(x)))
G = val / np.std(x, ddof=1)
N = len(x)
result = G > (N - 1)/np.sqrt(N) * np.sqrt((t.ppf(1-alpha/(2*N), N-2) ** 2) / (N - 2 + t.ppf(1-alpha/(2*N), N-2) ** 2 ))
if hypo:
return result
else:
if result:
return np.delete(x, ind)
else:
return x
def outliers_gesd(data, outliers = 5, report = False, alpha=0.05):
"""
The generalized (Extreme Studentized Deviate) ESD test is used
to detect one or more outliers in a univariate data set that follows
an approximately normal distribution [1]_.
Parameters
----------
data : array_like or ndarray, 1d
An array, any object exposing the array interface, containing
data to test for outliers.
outliers : int, optional
Number of potential outliers to test for. Test is two-tailed, i.e.
maximum and minimum values are checked for potential outliers.
report : bool, optional
Specifies whether to return a summary table of the test.
Available options are:
1) True - return a summary table
2) False - return the array with outliers removed. (default)
alpha : float, optional
Significance level for a hypothesis test. Default is 0.05.
Returns
-------
Numpy array if hypo is False or a bool value of a hypothesis test result.
Notes
-----
.. [1] Rosner, Bernard (May 1983), Percentage Points for a Generalized
ESD Many-Outlier Procedure,Technometrics, 25(2), pp. 165-172.
Examples
--------
>>> data = np.array([-0.25, 0.68, 0.94, 1.15, 1.2, 1.26, 1.26, 1.34, 1.38, 1.43, 1.49, 1.49, 1.55, 1.56, 1.58, 1.65, 1.69, 1.7, 1.76, 1.77, 1.81, 1.91, 1.94, 1.96, 1.99, 2.06, 2.09, 2.1, 2.14, 2.15, 2.23, 2.24, 2.26, 2.35, 2.37, 2.4, 2.47, 2.54, 2.62, 2.64, 2.9, 2.92, 2.92, 2.93, 3.21, 3.26, 3.3, 3.59, 3.68, 4.3, 4.64, 5.34, 5.42, 6.01])
>>> outliers_gesd(data, 5)
array([-0.25, 0.68, 0.94, 1.15, 1.2 , 1.26, 1.26, 1.34, 1.38,
1.43, 1.49, 1.49, 1.55, 1.56, 1.58, 1.65, 1.69, 1.7 ,
1.76, 1.77, 1.81, 1.91, 1.94, 1.96, 1.99, 2.06, 2.09,
2.1 , 2.14, 2.15, 2.23, 2.24, 2.26, 2.35, 2.37, 2.4 ,
2.47, 2.54, 2.62, 2.64, 2.9 , 2.92, 2.92, 2.93, 3.21,
3.26, 3.3 , 3.59, 3.68, 4.3 , 4.64])
>>> outliers_gesd(data, outliers = 5, report = True)
H0: no outliers in the data
Ha: up to 5 outliers in the data
Significance level: α = 0.05
Reject H0 if Ri > Critical Value (λi)
Summary Table for Two-Tailed Test
---------------------------------------
Exact Test Critical
Number of Statistic Value, λi
Outliers, i Value, Ri 5 %
---------------------------------------
1 3.119 3.159
2 2.943 3.151
3 3.179 3.144 *
4 2.81 3.136
5 2.816 3.128
"""
Rs, ls = np.zeros(outliers, dtype = np.float), np.zeros(outliers, dtype = np.float)
ms = []
data = np.sort(np.array(data))
data_proc = np.copy(data)
n = data_proc.size
mean = np.mean(data_proc)
for i in np.arange(outliers):
abs_d = np.abs(data_proc - np.mean(data_proc))
# R-value calculation
R = np.max(abs_d) / np.std(data_proc, ddof=1)
Rs[i] = R
# Masked values
lms = ms[-1] if len(ms) > 0 else []
ms.append(lms + [np.argmax(abs_d)])
# Lambdas calculation
p = 1 - alpha / (2 * (n - i))
df = n - i - 2
t_ppr = t.ppf(p, df)
lambd = ((n - i - 1) * t_ppr) / np.sqrt((n - i - 2 + t_ppr**2) * (n - i))
ls[i] = lambd
# Remove the observation that maximizes |xi − xmean|
data_proc = np.delete(data_proc, np.argmax(abs_d))
if report:
report = ["H0: no outliers in the data",
"Ha: up to " + str(outliers) + " outliers in the data",
"Significance level: α = " + str(alpha),
"Reject H0 if Ri > Critical Value (λi)", "",
"Summary Table for Two-Tailed Test",
"---------------------------------------",
" Exact Test Critical",
" Number of Statistic Value, λi",
"Outliers, i Value, Ri 5 %",
"---------------------------------------"]
for i, (r, l) in enumerate(zip(Rs, ls)):
report.append('{: >11s}'.format(str(i+1)) + \
'{: >15s}'.format(str(np.round(r, 3))) + \
'{: >13s}'.format(str(np.round(l, 3))) + (" *" if r > l else ""))
print("\n".join(report))
else:
# Remove masked values
# for which the test statistic is greater
# than the critical value and return the result
if any(Rs > ls):
data = np.delete(data, ms[np.max(np.where(Rs > ls))])
return data
|
maximtrp/scikit-posthocs | scikit_posthocs/_outliers.py | outliers_gesd | python | def outliers_gesd(data, outliers = 5, report = False, alpha=0.05):
Rs, ls = np.zeros(outliers, dtype = np.float), np.zeros(outliers, dtype = np.float)
ms = []
data = np.sort(np.array(data))
data_proc = np.copy(data)
n = data_proc.size
mean = np.mean(data_proc)
for i in np.arange(outliers):
abs_d = np.abs(data_proc - np.mean(data_proc))
# R-value calculation
R = np.max(abs_d) / np.std(data_proc, ddof=1)
Rs[i] = R
# Masked values
lms = ms[-1] if len(ms) > 0 else []
ms.append(lms + [np.argmax(abs_d)])
# Lambdas calculation
p = 1 - alpha / (2 * (n - i))
df = n - i - 2
t_ppr = t.ppf(p, df)
lambd = ((n - i - 1) * t_ppr) / np.sqrt((n - i - 2 + t_ppr**2) * (n - i))
ls[i] = lambd
# Remove the observation that maximizes |xi − xmean|
data_proc = np.delete(data_proc, np.argmax(abs_d))
if report:
report = ["H0: no outliers in the data",
"Ha: up to " + str(outliers) + " outliers in the data",
"Significance level: α = " + str(alpha),
"Reject H0 if Ri > Critical Value (λi)", "",
"Summary Table for Two-Tailed Test",
"---------------------------------------",
" Exact Test Critical",
" Number of Statistic Value, λi",
"Outliers, i Value, Ri 5 %",
"---------------------------------------"]
for i, (r, l) in enumerate(zip(Rs, ls)):
report.append('{: >11s}'.format(str(i+1)) + \
'{: >15s}'.format(str(np.round(r, 3))) + \
'{: >13s}'.format(str(np.round(l, 3))) + (" *" if r > l else ""))
print("\n".join(report))
else:
# Remove masked values
# for which the test statistic is greater
# than the critical value and return the result
if any(Rs > ls):
data = np.delete(data, ms[np.max(np.where(Rs > ls))])
return data | The generalized (Extreme Studentized Deviate) ESD test is used
to detect one or more outliers in a univariate data set that follows
an approximately normal distribution [1]_.
Parameters
----------
data : array_like or ndarray, 1d
An array, any object exposing the array interface, containing
data to test for outliers.
outliers : int, optional
Number of potential outliers to test for. Test is two-tailed, i.e.
maximum and minimum values are checked for potential outliers.
report : bool, optional
Specifies whether to return a summary table of the test.
Available options are:
1) True - return a summary table
2) False - return the array with outliers removed. (default)
alpha : float, optional
Significance level for a hypothesis test. Default is 0.05.
Returns
-------
Numpy array if hypo is False or a bool value of a hypothesis test result.
Notes
-----
.. [1] Rosner, Bernard (May 1983), Percentage Points for a Generalized
ESD Many-Outlier Procedure,Technometrics, 25(2), pp. 165-172.
Examples
--------
>>> data = np.array([-0.25, 0.68, 0.94, 1.15, 1.2, 1.26, 1.26, 1.34, 1.38, 1.43, 1.49, 1.49, 1.55, 1.56, 1.58, 1.65, 1.69, 1.7, 1.76, 1.77, 1.81, 1.91, 1.94, 1.96, 1.99, 2.06, 2.09, 2.1, 2.14, 2.15, 2.23, 2.24, 2.26, 2.35, 2.37, 2.4, 2.47, 2.54, 2.62, 2.64, 2.9, 2.92, 2.92, 2.93, 3.21, 3.26, 3.3, 3.59, 3.68, 4.3, 4.64, 5.34, 5.42, 6.01])
>>> outliers_gesd(data, 5)
array([-0.25, 0.68, 0.94, 1.15, 1.2 , 1.26, 1.26, 1.34, 1.38,
1.43, 1.49, 1.49, 1.55, 1.56, 1.58, 1.65, 1.69, 1.7 ,
1.76, 1.77, 1.81, 1.91, 1.94, 1.96, 1.99, 2.06, 2.09,
2.1 , 2.14, 2.15, 2.23, 2.24, 2.26, 2.35, 2.37, 2.4 ,
2.47, 2.54, 2.62, 2.64, 2.9 , 2.92, 2.92, 2.93, 3.21,
3.26, 3.3 , 3.59, 3.68, 4.3 , 4.64])
>>> outliers_gesd(data, outliers = 5, report = True)
H0: no outliers in the data
Ha: up to 5 outliers in the data
Significance level: α = 0.05
Reject H0 if Ri > Critical Value (λi)
Summary Table for Two-Tailed Test
---------------------------------------
Exact Test Critical
Number of Statistic Value, λi
Outliers, i Value, Ri 5 %
---------------------------------------
1 3.119 3.159
2 2.943 3.151
3 3.179 3.144 *
4 2.81 3.136
5 2.816 3.128 | train | https://github.com/maximtrp/scikit-posthocs/blob/5476b09e2a325cd4e31c0b0bc6906ab5cd77fc5d/scikit_posthocs/_outliers.py#L193-L318 | null | # -*- coding: utf-8 -*-
import numpy as np
from scipy.stats import t
def outliers_iqr(x, ret='filtered', coef = 1.5):
"""
Simple detection of potential outliers based on interquartile range (IQR).
Data that lie within the lower and upper limits are considered
non-outliers. The lower limit is the number that lies 1.5 IQRs below
(coefficient may be changed with an argument, see Parameters)
the first quartile; the upper limit is the number that lies 1.5 IQRs
above the third quartile.
Parameters
----------
x : array_like or ndarray, 1d
An array, any object exposing the array interface, containing
p values.
ret : str, optional
Specifies object to be returned. Available options are:
'filtered' : return a filtered array (default)
'outliers' : return outliers
'indices' : return indices of non-outliers
'outliers_indices' : return indices of outliers
coef : float, optional
Coefficient by which IQR is multiplied. Default is 1.5.
Returns
-------
Numpy array where 0 is False (not significant), 1 is True (significant),
and -1 is for diagonal elements.
Examples
--------
>>> x = np.array([4,5,6,10,12,4,3,1,2,3,23,5,3])
>>> outliers_iqr(x, ret = 'outliers')
array([12, 23])
"""
x = np.asarray(x)
q1, q3 = np.percentile(x, [25, 75])
iqr = q3 - q1
ll = q1 - iqr * coef
ul = q3 + iqr * coef
if ret == 'indices':
return np.where((x >= ll) & (x <= ul))[0]
elif ret == 'outliers':
return x[(x < ll) | (x > ul)]
elif ret == 'outliers_indices':
return np.where((x < ll) | (x > ul))[0]
else:
return x[(x >= ll) & (x <= ul)]
def outliers_grubbs(x, hypo = False, alpha = 0.05):
"""
Grubbs' Test for Outliers [1]_. This is the two-sided version of the test.
The null hypothesis implies that there are no outliers in the data set.
Parameters
----------
x : array_like or ndarray, 1d
An array, any object exposing the array interface, containing
data to test for an outlier in.
hypo : bool, optional
Specifies whether to return a bool value of a hypothesis test result.
Returns True when we can reject the null hypothesis. Otherwise, False.
Available options are:
1) True - return a hypothesis test result
2) False - return a filtered array without an outlier (default)
alpha : float, optional
Significance level for a hypothesis test. Default is 0.05.
Returns
-------
Numpy array if hypo is False or a bool value of a hypothesis test result.
Notes
-----
.. [1] http://www.itl.nist.gov/div898/handbook/eda/section3/eda35h1.htm
Examples
--------
>>> x = np.array([199.31,199.53,200.19,200.82,201.92,201.95,202.18,245.57])
>>> ph.outliers_grubbs(x)
array([ 199.31, 199.53, 200.19, 200.82, 201.92, 201.95, 202.18])
"""
val = np.max(np.abs(x - np.mean(x)))
ind = np.argmax(np.abs(x - np.mean(x)))
G = val / np.std(x, ddof=1)
N = len(x)
result = G > (N - 1)/np.sqrt(N) * np.sqrt((t.ppf(1-alpha/(2*N), N-2) ** 2) / (N - 2 + t.ppf(1-alpha/(2*N), N-2) ** 2 ))
if hypo:
return result
else:
if result:
return np.delete(x, ind)
else:
return x
def outliers_tietjen(x, k, hypo = False, alpha = 0.05):
"""
Tietjen-Moore test [1]_ to detect multiple outliers in a univariate
data set that follows an approximately normal distribution.
The Tietjen-Moore test [2]_ is a generalization of the Grubbs' test to
the case of multiple outliers. If testing for a single outlier,
the Tietjen-Moore test is equivalent to the Grubbs' test.
The null hypothesis implies that there are no outliers in the data set.
Parameters
----------
x : array_like or ndarray, 1d
An array, any object exposing the array interface, containing
data to test for an outlier in.
k : int
Number of potential outliers to test for. Function tests for
outliers in both tails.
hypo : bool, optional
Specifies whether to return a bool value of a hypothesis test result.
Returns True when we can reject the null hypothesis. Otherwise, False.
Available options are:
1) True - return a hypothesis test result
2) False - return a filtered array without outliers (default)
alpha : float, optional
Significance level for a hypothesis test. Default is 0.05.
Returns
-------
Numpy array if hypo is False or a bool value of a hypothesis test result.
Notes
-----
.. [1] Tietjen and Moore (August 1972), Some Grubbs-Type Statistics
for the Detection of Outliers, Technometrics, 14(3), pp. 583-597.
.. [2] http://www.itl.nist.gov/div898/handbook/eda/section3/eda35h2.htm
Examples
--------
>>> x = np.array([-1.40, -0.44, -0.30, -0.24, -0.22, -0.13, -0.05, 0.06,
0.10, 0.18, 0.20, 0.39, 0.48, 0.63, 1.01])
>>> outliers_tietjen(x, 2)
array([-0.44, -0.3 , -0.24, -0.22, -0.13, -0.05, 0.06, 0.1 , 0.18,
0.2 , 0.39, 0.48, 0.63])
"""
n = x.size
def tietjen(x_, k_):
x_mean = x_.mean()
r = np.abs(x_ - x_mean)
z = x_[r.argsort()]
E = np.sum((z[:-k_] - z[:-k_].mean()) ** 2) / np.sum((z - x_mean) ** 2)
return E
E_x = tietjen(x, k)
E_norm = np.zeros(10000)
for i in np.arange(10000):
norm = np.random.normal(size=n)
E_norm[i] = tietjen(norm, k)
CV = np.percentile(E_norm, alpha * 100)
result = E_x < CV
if hypo:
return result
else:
if result:
ind = np.argpartition(np.abs(x - x.mean()), -k)[-k:]
return np.delete(x, ind)
else:
return x
|
maximtrp/scikit-posthocs | scikit_posthocs/_posthocs.py | __convert_to_df | python | def __convert_to_df(a, val_col=None, group_col=None, val_id=None, group_id=None):
'''Hidden helper method to create a DataFrame with input data for further
processing.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas DataFrame.
Array must be two-dimensional. Second dimension may vary,
i.e. groups may have different lengths.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
val_id : int, optional
Index of a column that contains dependent variable values (test or
response variable). Should be specified if a NumPy ndarray is used as an
input. It will be inferred from data, if not specified.
group_id : int, optional
Index of a column that contains independent variable values (grouping or
predictor variable). Should be specified if a NumPy ndarray is used as
an input. It will be inferred from data, if not specified.
Returns
-------
x : pandas DataFrame
DataFrame with input data, `val_col` column contains numerical values and
`group_col` column contains categorical values.
val_col : str
Name of a DataFrame column that contains dependent variable values (test
or response variable).
group_col : str
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable).
Notes
-----
Inferrence algorithm for determining `val_id` and `group_id` args is rather
simple, so it is better to specify them explicitly to prevent errors.
'''
if not group_col:
group_col = 'groups'
if not val_col:
val_col = 'vals'
if isinstance(a, DataFrame):
x = a.copy()
if not {group_col, val_col}.issubset(a.columns):
raise ValueError('Specify correct column names using `group_col` and `val_col` args')
return x, val_col, group_col
elif isinstance(a, list) or (isinstance(a, np.ndarray) and not a.shape.count(2)):
grps_len = map(len, a)
grps = list(it.chain(*[[i+1] * l for i, l in enumerate(grps_len)]))
vals = list(it.chain(*a))
return DataFrame({val_col: vals, group_col: grps}), val_col, group_col
elif isinstance(a, np.ndarray):
# cols ids not defined
# trying to infer
if not(all([val_id, group_id])):
if np.argmax(a.shape):
a = a.T
ax = [np.unique(a[:, 0]).size, np.unique(a[:, 1]).size]
if np.asscalar(np.diff(ax)):
__val_col = np.argmax(ax)
__group_col = np.argmin(ax)
else:
raise ValueError('Cannot infer input format.\nPlease specify `val_id` and `group_id` args')
cols = {__val_col: val_col,
__group_col: group_col}
else:
cols = {val_id: val_col,
group_id: group_col}
cols_vals = dict(sorted(cols.items())).values()
return DataFrame(a, columns=cols_vals), val_col, group_col | Hidden helper method to create a DataFrame with input data for further
processing.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas DataFrame.
Array must be two-dimensional. Second dimension may vary,
i.e. groups may have different lengths.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
val_id : int, optional
Index of a column that contains dependent variable values (test or
response variable). Should be specified if a NumPy ndarray is used as an
input. It will be inferred from data, if not specified.
group_id : int, optional
Index of a column that contains independent variable values (grouping or
predictor variable). Should be specified if a NumPy ndarray is used as
an input. It will be inferred from data, if not specified.
Returns
-------
x : pandas DataFrame
DataFrame with input data, `val_col` column contains numerical values and
`group_col` column contains categorical values.
val_col : str
Name of a DataFrame column that contains dependent variable values (test
or response variable).
group_col : str
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable).
Notes
-----
Inferrence algorithm for determining `val_id` and `group_id` args is rather
simple, so it is better to specify them explicitly to prevent errors. | train | https://github.com/maximtrp/scikit-posthocs/blob/5476b09e2a325cd4e31c0b0bc6906ab5cd77fc5d/scikit_posthocs/_posthocs.py#L11-L106 | null | # -*- coding: utf-8 -*-
import numpy as np
import scipy.stats as ss
import itertools as it
from statsmodels.sandbox.stats.multicomp import multipletests
from statsmodels.stats.multicomp import pairwise_tukeyhsd
from statsmodels.stats.libqsturng import psturng
from pandas import DataFrame, Categorical, Series
def __convert_to_block_df(a, y_col=None, group_col=None, block_col=None, melted=False):
if isinstance(a, DataFrame) and not melted:
x = a.copy(deep=True)
group_col = 'groups'
block_col = 'blocks'
y_col = 'y'
x.columns.name = group_col
x.index.name = block_col
x = x.reset_index().melt(id_vars=block_col, var_name=group_col, value_name=y_col)
elif melted:
x = DataFrame.from_dict({'groups': a[group_col],
'blocks': a[block_col],
'y': a[y_col]})
elif not isinstance(a, DataFrame):
x = np.array(a)
x = DataFrame(x, index=np.arange(x.shape[0]), columns=np.arange(x.shape[1]))
if not melted:
group_col = 'groups'
block_col = 'blocks'
y_col = 'y'
x.columns.name = group_col
x.index.name = block_col
x = x.reset_index().melt(id_vars=block_col, var_name=group_col, value_name=y_col)
else:
x.columns[group_col] = 'groups'
x.columns[block_col] = 'blocks'
x.columns[y_col] = 'y'
group_col = 'groups'
block_col = 'blocks'
y_col = 'y'
return x, 'y', 'groups', 'blocks'
def posthoc_conover(a, val_col=None, group_col=None, p_adjust=None, sort=True):
'''Post hoc pairwise test for multiple comparisons of mean rank sums
(Conover's test). May be used after Kruskal-Wallis one-way analysis of
variance by ranks to do pairwise comparisons [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas DataFrame.
Array must be two-dimensional. Second dimension may vary,
i.e. groups may have different lengths.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
p_adjust : str, optional
Method for adjusting p values. See `statsmodels.sandbox.stats.multicomp`
for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by `group_col` or not. Recommended
unless you sort your data manually.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
A tie correction are employed according to Conover [1]_.
References
----------
.. [1] W. J. Conover and R. L. Iman (1979), On multiple-comparisons procedures,
Tech. Rep. LA-7677-MS, Los Alamos Scientific Laboratory.
Examples
--------
>>> x = [[1,2,3,5,1], [12,31,54, np.nan], [10,12,6,74,11]]
>>> sp.posthoc_conover(x, p_adjust = 'holm')
'''
def compare_conover(i, j):
diff = np.abs(x_ranks_avg.loc[i] - x_ranks_avg.loc[j])
B = (1. / x_lens.loc[i] + 1. / x_lens.loc[j])
D = (n - 1. - H_cor) / (n - x_len)
t_value = diff / np.sqrt(S2 * B * D)
p_value = 2. * ss.t.sf(np.abs(t_value), df = n - x_len)
return p_value
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col, _val_col], ascending=True, inplace=True)
n = len(x.index)
x_groups_unique = np.unique(x[_group_col])
x_len = x_groups_unique.size
x_lens = x.groupby(_group_col)[_val_col].count()
x['ranks'] = x[_val_col].rank()
x_ranks_avg = x.groupby(_group_col)['ranks'].mean()
x_ranks_sum = x.groupby(_group_col)['ranks'].sum()
# ties
vals = x.groupby('ranks').count()[_val_col].values
tie_sum = np.sum(vals[vals != 1] ** 3 - vals[vals != 1])
tie_sum = 0 if not tie_sum else tie_sum
x_ties = np.min([1., 1. - tie_sum / (n ** 3. - n)])
H = (12. / (n * (n + 1.))) * np.sum(x_ranks_sum**2 / x_lens) - 3. * (n + 1.)
H_cor = H / x_ties
if x_ties == 1:
S2 = n * (n + 1.) / 12.
else:
S2 = (1. / (n - 1.)) * (np.sum(x['ranks'] ** 2.) - (n * (((n + 1.)**2.) / 4.)))
vs = np.zeros((x_len, x_len))
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(x_len), 2)
for i, j in combs:
vs[i, j] = compare_conover(x_groups_unique[i], x_groups_unique[j])
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=x_groups_unique, columns=x_groups_unique)
def posthoc_dunn(a, val_col=None, group_col=None, p_adjust=None, sort=True):
'''Post hoc pairwise test for multiple comparisons of mean rank sums
(Dunn's test). May be used after Kruskal-Wallis one-way analysis of
variance by ranks to do pairwise comparisons [1]_, [2]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas DataFrame.
Array must be two-dimensional. Second dimension may vary,
i.e. groups may have different lengths.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
p_adjust : str, optional
Method for adjusting p values. See `statsmodels.sandbox.stats.multicomp`
for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by group_col or not. Recommended
unless you sort your data manually.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
A tie correction will be employed according to Glantz (2012).
References
----------
.. [1] O.J. Dunn (1964). Multiple comparisons using rank sums.
Technometrics, 6, 241-252.
.. [2] S.A. Glantz (2012), Primer of Biostatistics. New York: McGraw Hill.
Examples
--------
>>> x = [[1,2,3,5,1], [12,31,54, np.nan], [10,12,6,74,11]]
>>> sp.posthoc_dunn(x, p_adjust = 'holm')
'''
def compare_dunn(i, j):
diff = np.abs(x_ranks_avg.loc[i] - x_ranks_avg.loc[j])
A = n * (n + 1.) / 12.
B = (1. / x_lens.loc[i] + 1. / x_lens.loc[j])
z_value = diff / np.sqrt((A - x_ties) * B)
p_value = 2. * ss.norm.sf(np.abs(z_value))
return p_value
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col, _val_col], ascending=True, inplace=True)
n = len(x.index)
x_groups_unique = np.unique(x[_group_col])
x_len = x_groups_unique.size
x_lens = x.groupby(_group_col)[_val_col].count()
x['ranks'] = x[_val_col].rank()
x_ranks_avg = x.groupby(_group_col)['ranks'].mean()
# ties
vals = x.groupby('ranks').count()[_val_col].values
tie_sum = np.sum(vals[vals != 1] ** 3 - vals[vals != 1])
tie_sum = 0 if not tie_sum else tie_sum
x_ties = tie_sum / (12. * (n - 1))
vs = np.zeros((x_len, x_len))
combs = it.combinations(range(x_len), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
for i,j in combs:
vs[i, j] = compare_dunn(x_groups_unique[i], x_groups_unique[j])
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=x_groups_unique, columns=x_groups_unique)
def posthoc_nemenyi(a, val_col=None, group_col=None, dist='chi', sort=True):
'''Post hoc pairwise test for multiple comparisons of mean rank sums
(Nemenyi's test). May be used after Kruskal-Wallis one-way analysis of
variance by ranks to do pairwise comparisons [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame. Array must be two-dimensional. Second dimension may vary,
i.e. groups may have different lengths.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
dist : str, optional
Method for determining the p value. The default distribution is "chi"
(chi-squared), else "tukey" (studentized range).
sort : bool, optional
Specifies whether to sort DataFrame by group_col or not. Recommended
unless you sort your data manually.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
A tie correction will be employed according to Glantz (2012).
References
----------
.. [1] Lothar Sachs (1997), Angewandte Statistik. Berlin: Springer.
Pages: 395-397, 662-664.
Examples
--------
>>> x = [[1,2,3,5,1], [12,31,54, np.nan], [10,12,6,74,11]]
>>> sp.posthoc_nemenyi(x)
'''
def compare_stats_chi(i, j):
diff = np.abs(x_ranks_avg.loc[i] - x_ranks_avg.loc[j])
A = n * (n + 1.) / 12.
B = (1. / x_lens.loc[i] + 1. / x_lens.loc[j])
chi = diff ** 2. / (A * B)
return chi
def compare_stats_tukey(i, j):
diff = np.abs(x_ranks_avg.loc[i] - x_ranks_avg.loc[j])
B = (1. / x_lens.loc[i] + 1. / x_lens.loc[j])
q = diff / np.sqrt((n * (n + 1.) / 12.) * B)
return q
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col, _val_col], ascending=True, inplace=True)
n = len(x.index)
x_groups_unique = np.unique(x[_group_col])
x_len = x_groups_unique.size
x_lens = x.groupby(_group_col)[_val_col].count()
x['ranks'] = x[_val_col].rank()
x_ranks_avg = x.groupby(_group_col)['ranks'].mean()
# ties
vals = x.groupby('ranks').count()[_val_col].values
tie_sum = np.sum(vals[vals != 1] ** 3 - vals[vals != 1])
tie_sum = 0 if not tie_sum else tie_sum
x_ties = np.min([1., 1. - tie_sum / (n ** 3. - n)])
vs = np.zeros((x_len, x_len))
combs = it.combinations(range(x_len), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
if dist == 'chi':
for i,j in combs:
vs[i, j] = compare_stats_chi(x_groups_unique[i], x_groups_unique[j]) / x_ties
vs[tri_upper] = ss.chi2.sf(vs[tri_upper], x_len - 1)
elif dist == 'tukey':
for i,j in combs:
vs[i, j] = compare_stats_tukey(x_groups_unique[i], x_groups_unique[j]) * np.sqrt(2.)
vs[tri_upper] = psturng(vs[tri_upper], x_len, np.inf)
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=x_groups_unique, columns=x_groups_unique)
def posthoc_nemenyi_friedman(a, y_col=None, block_col=None, group_col=None, melted=False, sort=False):
'''Calculate pairwise comparisons using Nemenyi post hoc test for
unreplicated blocked data. This test is usually conducted post hoc if
significant results of the Friedman's test are obtained. The statistics
refer to upper quantiles of the studentized range distribution (Tukey) [1]_,
[2]_, [3]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of
block design, i.e. rows are blocks, and columns are groups. In this
case you do not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (strings).
y_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
A one-way ANOVA with repeated measures that is also referred to as ANOVA
with unreplicated block design can also be conducted via Friedman's
test. The consequent post hoc pairwise multiple comparison test
according to Nemenyi is conducted with this function.
This function does not test for ties.
References
----------
.. [1] J. Demsar (2006), Statistical comparisons of classifiers over
multiple data sets, Journal of Machine Learning Research, 7, 1-30.
.. [2] P. Nemenyi (1963) Distribution-free Multiple Comparisons. Ph.D.
thesis, Princeton University.
.. [3] L. Sachs (1997), Angewandte Statistik. Berlin: Springer.
Pages: 668-675.
Examples
--------
>>> # Non-melted case, x is a block design matrix, i.e. rows are blocks
>>> # and columns are groups.
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_nemenyi_friedman(x)
'''
if melted and not all([block_col, group_col, y_col]):
raise ValueError('block_col, group_col, y_col should be explicitly specified if using melted data')
def compare_stats(i, j):
dif = np.abs(R[groups[i]] - R[groups[j]])
qval = dif / np.sqrt(k * (k + 1.) / (6. * n))
return qval
x, _y_col, _group_col, _block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
#if not sort:
# x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
# x[block_col] = Categorical(x[block_col], categories=x[block_col].unique(), ordered=True)
x.sort_values(by=[_group_col, _block_col], ascending=True, inplace=True)
x.dropna(inplace=True)
groups = x[_group_col].unique()
k = groups.size
n = x[_block_col].unique().size
x['mat'] = x.groupby(_block_col)[_y_col].rank()
R = x.groupby(_group_col)['mat'].mean()
vs = np.zeros((k, k))
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
for i, j in combs:
vs[i, j] = compare_stats(i, j)
vs *= np.sqrt(2.)
vs[tri_upper] = psturng(vs[tri_upper], k, np.inf)
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_conover_friedman(a, y_col=None, block_col=None, group_col=None, melted=False, sort=False, p_adjust=None):
'''Calculate pairwise comparisons using Conover post hoc test for unreplicated
blocked data. This test is usually conducted post hoc after
significant results of the Friedman test. The statistics refer to
the Student t distribution [1]_, [2]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of
block design, i.e. rows are blocks, and columns are groups. In this
case you do not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (strings).
y_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values. See statsmodels.sandbox.stats.multicomp
for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
'single-step' : uses Tukey distribution for multiple comparisons
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
A one-way ANOVA with repeated measures that is also referred to as ANOVA
with unreplicated block design can also be conducted via the
friedman.test. The consequent post hoc pairwise multiple comparison test
according to Conover is conducted with this function.
If y is a matrix, than the columns refer to the treatment and the rows
indicate the block.
References
----------
.. [1] W. J. Conover and R. L. Iman (1979), On multiple-comparisons procedures,
Tech. Rep. LA-7677-MS, Los Alamos Scientific Laboratory.
.. [2] W. J. Conover (1999), Practical nonparametric Statistics, 3rd. Edition,
Wiley.
Examples
--------
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_conover_friedman(x)
'''
if melted and not all([block_col, group_col, y_col]):
raise ValueError('block_col, group_col, y_col should be explicitly specified if using melted data')
def compare_stats(i, j):
dif = np.abs(R.loc[groups[i]] - R.loc[groups[j]])
tval = dif / np.sqrt(A) / np.sqrt(B)
pval = 2. * ss.t.sf(np.abs(tval), df = (m*n*k - k - n + 1))
return pval
def compare_tukey(i, j):
dif = np.abs(R.loc[groups[i]] - R.loc[groups[j]])
qval = np.sqrt(2.) * dif / (np.sqrt(A) * np.sqrt(B))
pval = psturng(qval, k, np.inf)
return pval
x, _y_col, _group_col, _block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
#if not sort:
# x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
# x[block_col] = Categorical(x[block_col], categories=x[block_col].unique(), ordered=True)
x.sort_values(by=[_group_col,_block_col], ascending=True, inplace=True)
x.dropna(inplace=True)
groups = np.unique(x[_group_col])
k = groups.size
n = x[_block_col].unique().size
x['mat'] = x.groupby(_block_col)[_y_col].rank()
R = x.groupby(_group_col)['mat'].sum()
A1 = (x['mat'] ** 2).sum()
m = 1
S2 = m/(m*k - 1.) * (A1 - m*k*n*(m*k + 1.)**2./4.)
T2 = 1 / S2 * (np.sum(R) - n * m * ((m * k + 1.) / 2.)**2.)
A = S2 * (2. * n * (m * k - 1.)) / ( m * n * k - k - n + 1.)
B = 1. - T2 / (n * (m * k - 1.))
vs = np.zeros((k, k))
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
if p_adjust == 'single-step':
for i, j in combs:
vs[i, j] = compare_tukey(i, j)
else:
for i, j in combs:
vs[i, j] = compare_stats(i, j)
if p_adjust is not None:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_npm_test(a, val_col=None, group_col=None, sort=False, p_adjust=None):
'''Calculate pairwise comparisons using Nashimoto and Wright's all-pairs
comparison procedure (NPM test) for simply ordered mean ranksums.
NPM test is basically an extension of Nemenyi's procedure for testing
increasingly ordered alternatives [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values. See `statsmodels.sandbox.stats.multicomp`
for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
The p values are estimated from the studentized range distribution. If
the medians are already increasingly ordered, than the NPM-test
simplifies to the ordinary Nemenyi test
References
----------
.. [1] Nashimoto, K., Wright, F.T., (2005), Multiple comparison procedures for
detecting differences in simply ordered means. Comput. Statist. Data
Anal. 48, 291--306.
Examples
--------
>>> x = np.array([[102,109,114,120,124],
[110,112,123,130,145],
[132,141,156,160,172]])
>>> sp.posthoc_npm_test(x)
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
#if not sort:
# x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = np.unique(x[_group_col])
x['ranks'] = x[_val_col].rank()
Ri = x.groupby(_group_col)['ranks'].mean()
ni = x.groupby(_group_col)[_val_col].count()
k = groups.size
n = x.shape[0]
sigma = np.sqrt(n * (n + 1) / 12.)
df = np.inf
def compare(m, u):
a = [(Ri.loc[groups[u]]-Ri.loc[groups[_mi]])/(sigma/np.sqrt(2)*np.sqrt(1./ni.loc[groups[_mi]] + 1./ni.loc[groups[u]])) for _mi in m]
return np.array(a)
stat = np.zeros((k, k))
for i in range(k-1):
for j in range(i+1, k):
u = j
m = np.arange(i, u)
tmp = compare(m, u)
stat[j, i] = np.max(tmp)
stat[stat < 0] = 0
p_values = psturng(stat, k, np.inf)
tri_upper = np.triu_indices(p_values.shape[0], 1)
tri_lower = np.tril_indices(p_values.shape[0], -1)
p_values[tri_lower] = p_values.T[tri_lower]
#if p_adjust:
# p_values[tri_upper] = multipletests(p_values[tri_upper], method = p_adjust)[1]
np.fill_diagonal(p_values, -1)
return DataFrame(p_values, index=groups, columns=groups)
def posthoc_siegel_friedman(a, y_col=None, block_col=None, group_col=None, melted=False, sort=False, p_adjust=None):
'''Siegel and Castellan's All-Pairs Comparisons Test for Unreplicated Blocked
Data. See authors' paper for additional information [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of
block design, i.e. rows are blocks, and columns are groups. In this
case you do not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (strings).
y_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values. See statsmodels.sandbox.stats.multicomp for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
For all-pairs comparisons in a two factorial unreplicated complete block design
with non-normally distributed residuals, Siegel and Castellan's test can be
performed on Friedman-type ranked data.
References
----------
.. [1] S. Siegel, N. J. Castellan Jr. (1988), Nonparametric Statistics for the
Behavioral Sciences. 2nd ed. New York: McGraw-Hill.
Examples
--------
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_siegel_friedman(x)
'''
if melted and not all([block_col, group_col, y_col]):
raise ValueError('block_col, group_col, y_col should be explicitly specified if using melted data')
def compare_stats(i, j):
dif = np.abs(R[groups[i]] - R[groups[j]])
zval = dif / np.sqrt(k * (k + 1.) / (6. * n))
return zval
x, y_col, group_col, block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
#if not sort:
# x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
# x[block_col] = Categorical(x[block_col], categories=x[block_col].unique(), ordered=True)
x.sort_values(by=[group_col,block_col], ascending=True, inplace=True)
x.dropna(inplace=True)
groups = x[group_col].unique()
k = groups.size
n = x[block_col].unique().size
x['mat'] = x.groupby(block_col)[y_col].rank()
R = x.groupby(group_col)['mat'].mean()
vs = np.zeros((k, k), dtype=np.float)
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
for i, j in combs:
vs[i, j] = compare_stats(i, j)
vs = 2. * ss.norm.sf(np.abs(vs))
vs[vs > 1] = 1.
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_miller_friedman(a, y_col=None, block_col=None, group_col=None, melted=False, sort=False):
'''Miller's All-Pairs Comparisons Test for Unreplicated Blocked Data.
The p-values are computed from the chi-square distribution [1]_, [2]_,
[3]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of
block design, i.e. rows are blocks, and columns are groups. In this
case you do not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (strings).
y_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
Pandas DataFrame containing p values.
Notes
-----
For all-pairs comparisons in a two factorial unreplicated complete block
design with non-normally distributed residuals, Miller's test can be
performed on Friedman-type ranked data.
References
----------
.. [1] J. Bortz J, G. A. Lienert, K. Boehnke (1990), Verteilungsfreie
Methoden in der Biostatistik. Berlin: Springerself.
.. [2] R. G. Miller Jr. (1996), Simultaneous statistical inference. New
York: McGraw-Hill.
.. [3] E. L. Wike (2006), Data Analysis. A Statistical Primer for Psychology
Students. New Brunswick: Aldine Transaction.
Examples
--------
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_miller_friedman(x)
'''
if melted and not all([block_col, group_col, y_col]):
raise ValueError('block_col, group_col, y_col should be explicitly specified if using melted data')
def compare_stats(i, j):
dif = np.abs(R[groups[i]] - R[groups[j]])
qval = dif / np.sqrt(k * (k + 1.) / (6. * n))
return qval
x, y_col, group_col, block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
#if not sort:
# x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
# x[block_col] = Categorical(x[block_col], categories=x[block_col].unique(), ordered=True)
x.sort_values(by=[group_col,block_col], ascending=True, inplace=True)
x.dropna(inplace=True)
groups = x[group_col].unique()
k = groups.size
n = x[block_col].unique().size
x['mat'] = x.groupby(block_col)[y_col].rank()
R = x.groupby(group_col)['mat'].mean()
vs = np.zeros((k, k), dtype=np.float)
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
for i, j in combs:
vs[i, j] = compare_stats(i, j)
vs = vs ** 2
vs = ss.chi2.sf(vs, k - 1)
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_durbin(a, y_col=None, block_col=None, group_col=None, melted=False, sort=False, p_adjust=None):
'''Pairwise post hoc test for multiple comparisons of rank sums according to
Durbin and Conover for a two-way balanced incomplete block design (BIBD). See
references for additional information [1]_, [2]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of block design,
i.e. rows are blocks, and columns are groups. In this case you do
not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (string).
y_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values. See statsmodels.sandbox.stats.multicomp
for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
Pandas DataFrame containing p values.
References
----------
.. [1] W. J. Conover and R. L. Iman (1979), On multiple-comparisons procedures,
Tech. Rep. LA-7677-MS, Los Alamos Scientific Laboratory.
.. [2] W. J. Conover (1999), Practical nonparametric Statistics,
3rd. edition, Wiley.
Examples
--------
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_durbin(x)
'''
if melted and not all([block_col, group_col, y_col]):
raise ValueError('block_col, group_col, y_col should be explicitly specified if using melted data')
def compare_stats(i, j):
dif = np.abs(Rj[groups[i]] - Rj[groups[j]])
tval = dif / denom
pval = 2. * ss.t.sf(np.abs(tval), df = df)
return pval
x, y_col, group_col, block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
if not sort:
x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
x[block_col] = Categorical(x[block_col], categories=x[block_col].unique(), ordered=True)
x.sort_values(by=[block_col, group_col], ascending=True, inplace=True)
x.dropna(inplace=True)
groups = x[group_col].unique()
t = len(groups)
b = x[block_col].unique().size
r = b
k = t
x['y_ranked'] = x.groupby(block_col)[y_col].rank()
Rj = x.groupby(group_col)['y_ranked'].sum()
A = (x['y_ranked'] ** 2).sum()
C = (b * k * (k + 1) ** 2) / 4.
D = (Rj ** 2).sum() - r * C
T1 = (t - 1) / (A - C) * D
denom = np.sqrt(((A - C) * 2 * r) / (b * k - b - t + 1) * (1 - T1 / (b * (k -1))))
df = b * k - b - t + 1
vs = np.zeros((t, t), dtype=np.float)
combs = it.combinations(range(t), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
for i, j in combs:
vs[i, j] = compare_stats(i, j)
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_anderson(a, val_col=None, group_col=None, midrank=True, sort=False, p_adjust=None):
'''Anderson-Darling Pairwise Test for k-samples. Tests the null hypothesis
that k-samples are drawn from the same population without having to specify
the distribution function of that population [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
midrank : bool, optional
Type of Anderson-Darling test which is computed. If set to True (default), the
midrank test applicable to continuous and discrete populations is performed. If
False, the right side empirical distribution is used.
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values. See statsmodels.sandbox.stats.multicomp for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
result : pandas DataFrame
P values.
References
----------
.. [1] F.W. Scholz, M.A. Stephens (1987), K-Sample Anderson-Darling Tests,
Journal of the American Statistical Association, Vol. 82, pp. 918-924.
Examples
--------
>>> x = np.array([[2.9, 3.0, 2.5, 2.6, 3.2], [3.8, 2.7, 4.0, 2.4], [2.8, 3.4, 3.7, 2.2, 2.0]])
>>> sp.posthoc_anderson(x)
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = x[_group_col].unique()
k = groups.size
vs = np.zeros((k, k), dtype=np.float)
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
for i, j in combs:
vs[i, j] = ss.anderson_ksamp([x.loc[x[_group_col] == groups[i], _val_col], x.loc[x[_group_col] == groups[j], _val_col]])[2]
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_quade(a, y_col=None, block_col=None, group_col=None, dist='t', melted=False, sort=False, p_adjust=None):
'''Calculate pairwise comparisons using Quade's post hoc test for
unreplicated blocked data. This test is usually conducted if significant
results were obtained by the omnibus test [1]_, [2]_, [3]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of
block design, i.e. rows are blocks, and columns are groups. In this
case you do not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (string).
y_col : str or int, optional
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
dist : str, optional
Method for determining p values.
The default distribution is "t", else "normal".
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
Pandas DataFrame containing p values.
References
----------
.. [1] W. J. Conover (1999), Practical nonparametric Statistics, 3rd. Edition,
Wiley.
.. [2] N. A. Heckert and J. J. Filliben (2003). NIST Handbook 148: Dataplot
Reference Manual, Volume 2: Let Subcommands and Library Functions.
National Institute of Standards and Technology Handbook Series, June 2003.
.. [3] D. Quade (1979), Using weighted rankings in the analysis of complete
blocks with additive block effects. Journal of the American Statistical
Association, 74, 680-683.
Examples
--------
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_quade(x)
'''
if melted and not all([block_col, group_col, y_col]):
raise ValueError('block_col, group_col, y_col should be explicitly specified if using melted data')
def compare_stats_t(i, j):
dif = np.abs(S[groups[i]] - S[groups[j]])
tval = dif / denom
pval = 2. * ss.t.sf(np.abs(tval), df = (b - 1) * (k - 1))
return pval
def compare_stats_norm(i, j):
dif = np.abs(W[groups[i]] * ff - W[groups[j]] * ff)
zval = dif / denom
pval = 2. * ss.norm.sf(np.abs(zval))
return pval
x, y_col, group_col, block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
if not sort:
x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
x[block_col] = Categorical(x[block_col], categories=x[block_col].unique(), ordered=True)
x.sort_values(by=[block_col, group_col], ascending=True, inplace=True)
x.dropna(inplace=True)
groups = x[group_col].unique()
k = len(groups)
b = x[block_col].unique().size
x['r'] = x.groupby(block_col)[y_col].rank()
q = (x.groupby(block_col)[y_col].max() - x.groupby(block_col)[y_col].min()).rank()
x['rr'] = x['r'] - (k + 1)/2
x['s'] = x.apply(lambda x, y: x['rr'] * y[x['blocks']], axis=1, args=(q,))
x['w'] = x.apply(lambda x, y: x['r'] * y[x['blocks']], axis=1, args=(q,))
A = (x['s'] ** 2).sum()
S = x.groupby(group_col)['s'].sum()
B = np.sum(S ** 2) / b
W = x.groupby(group_col)['w'].sum()
vs = np.zeros((k, k), dtype=np.float)
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
if dist == 't':
denom = np.sqrt((2 * b * (A - B)) / ((b - 1) * (k - 1)))
for i, j in combs:
vs[i, j] = compare_stats_t(i, j)
else:
n = b * k
denom = np.sqrt((k * (k + 1) * (2 * n + 1) * (k-1)) / (18 * n * (n + 1)))
ff = 1. / (b * (b + 1)/2)
for i, j in combs:
vs[i, j] = compare_stats_norm(i, j)
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_mackwolfe(a, val_col, group_col, p=None, n_perm=100, sort=False, p_adjust=None):
'''Mack-Wolfe Test for Umbrella Alternatives.
In dose-finding studies one may assume an increasing treatment effect with
increasing dose level. However, the test subject may actually succumb to
toxic effects at high doses, which leads to decresing treatment
effects [1]_, [2]_.
The scope of the Mack-Wolfe Test is to test for umbrella alternatives for
either a known or unknown point P (i.e. dose-level), where the peak
(umbrella point) is present.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
p : int, optional
The a-priori known peak as an ordinal number of the treatment group
including the zero dose level, i.e. p = {1, ..., k}. Defaults to None.
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
p : float
P value.
stat : float
Statistic.
References
----------
.. [1] Chen, I.Y. (1991) Notes on the Mack-Wolfe and Chen-Wolfe Tests for
Umbrella Alternatives. Biom. J., 33, 281-290.
.. [2] Mack, G.A., Wolfe, D. A. (1981) K-sample rank tests for umbrella
alternatives. J. Amer. Statist. Assoc., 76, 175-181.
Examples
--------
>>> x = np.array([[10,'a'], [59,'a'], [76,'b'], [10, 'b']])
>>> sp.posthoc_mackwolfe(x, val_col = 0, group_col = 1)
'''
x = __convert_to_df(a, val_col, group_col)
if not sort:
x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
x.sort_values(by=[group_col], ascending=True, inplace=True)
k = x[group_col].unique().size
if p:
if p > k:
print("Selected 'p' > number of groups:", str(p), " > ", str(k))
return False
elif p < 1:
print("Selected 'p' < 1: ", str(p))
return False
Rij = x[val_col].rank()
n = x.groupby(group_col)[val_col].count()
def _fn(Ri, Rj):
return np.sum(Ri.apply(lambda x: Rj[Rj > x].size))
def _ustat(Rij, g, k):
levels = np.unique(g)
U = np.identity(k)
for i in range(k):
for j in range(i):
U[i,j] = _fn(Rij[x[group_col] == levels[i]], Rij[x[group_col] == levels[j]])
U[j,i] = _fn(Rij[x[group_col] == levels[j]], Rij[x[group_col] == levels[i]])
return U
def _ap(p, U):
tmp1 = 0
if p > 0:
for i in range(p):
for j in range(i+1, p+1):
tmp1 += U[i,j]
tmp2 = 0
if p < k:
for i in range(p, k):
for j in range(i+1, k):
tmp2 += U[j,i]
return tmp1 + tmp2
def _n1(p, n):
return np.sum(n[:p+1])
def _n2(p, n):
return np.sum(n[p:k])
def _mean_at(p, n):
N1 = _n1(p, n)
N2 = _n2(p, n)
return (N1**2 + N2**2 - np.sum(n**2) - n.iloc[p]**2)/4
def _var_at(p, n):
N1 = _n1(p, n)
N2 = _n2(p, n)
N = np.sum(n)
var = (2 * (N1**3 + N2**3) + 3 * (N1**2 + N2**2) -\
np.sum(n**2 * (2*n + 3)) - n.iloc[p]**2 * (2 * n.iloc[p] + 3) +\
12. * n.iloc[p] * N1 * N2 - 12. * n.iloc[p] ** 2 * N) / 72.
return var
if p:
if (x.groupby(val_col).count() > 1).any().any():
print("Ties are present")
U = _ustat(Rij, x[group_col], k)
est = _ap(p, U)
mean = _mean_at(p, n)
sd = np.sqrt(_var_at(p, n))
stat = (est - mean)/sd
p_value = ss.norm.sf(stat)
else:
U = _ustat(Rij, x[group_col], k)
Ap = np.array([_ap(i, U) for i in range(k)]).ravel()
mean = np.array([_mean_at(i, n) for i in range(k)]).ravel()
var = np.array([_var_at(i, n) for i in range(k)]).ravel()
A = (Ap - mean) / np.sqrt(var)
stat = np.max(A)
p = A == stat
est = None
mt = []
for i in range(n_perm):
ix = Series(np.random.permutation(Rij))
Uix = _ustat(ix, x[group_col], k)
Apix = np.array([_ap(i, Uix) for i in range(k)])
Astarix = (Apix - mean) / np.sqrt(var)
mt.append(np.max(Astarix))
mt = np.array(mt)
p_value = mt[mt > stat] / n_perm
return p_value, stat
def posthoc_vanwaerden(a, val_col=None, group_col=None, sort=True, p_adjust=None):
'''Van der Waerden's test for pairwise multiple comparisons between group
levels. See references for additional information [1]_, [2]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
For one-factorial designs with samples that do not meet the assumptions for
one-way-ANOVA and subsequent post hoc tests, the van der Waerden test using
normal scores can be employed. Provided that significant differences were
detected by this global test, one may be interested in applying post hoc
tests according to van der Waerden for pairwise multiple comparisons of the
group levels.
There is no tie correction applied in this function.
References
----------
.. [1] W. J. Conover and R. L. Iman (1979), On multiple-comparisons procedures,
Tech. Rep. LA-7677-MS, Los Alamos Scientific Laboratory.
.. [2] B. L. van der Waerden (1952) Order tests for the two-sample problem and
their power, Indagationes Mathematicae, 14, 453-458.
Examples
--------
>>> x = np.array([[10,'a'], [59,'a'], [76,'b'], [10, 'b']])
>>> sp.posthoc_vanwaerden(x, val_col = 0, group_col = 1)
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = x[_group_col].unique()
n = x[_val_col].size
k = groups.size
r = ss.rankdata(x[_val_col])
x['z_scores'] = ss.norm.ppf(r / (n + 1))
aj = x.groupby(_group_col)['z_scores'].sum()
nj = x.groupby(_group_col)['z_scores'].count()
s2 = (1. / (n - 1.)) * (x['z_scores'] ** 2.).sum()
sts = (1. / s2) * np.sum(aj ** 2. / nj)
param = k - 1
A = aj / nj
vs = np.zeros((k, k), dtype=np.float)
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
def compare_stats(i, j):
dif = np.abs(A[groups[i]] - A[groups[j]])
B = 1. / nj[groups[i]] + 1. / nj[groups[j]]
tval = dif / np.sqrt(s2 * (n - 1. - sts)/(n - k) * B)
pval = 2. * ss.t.sf(np.abs(tval), df = n - k)
return pval
for i, j in combs:
vs[i, j] = compare_stats(i, j)
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_ttest(a, val_col=None, group_col=None, pool_sd=False, equal_var=True, p_adjust=None, sort=True):
'''Pairwise T test for multiple comparisons of independent groups. May be
used after a parametric ANOVA to do pairwise comparisons.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame. Array must be two-dimensional.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
equal_var : bool, optional
If True (default), perform a standard independent test
that assumes equal population variances [1]_.
If False, perform Welch's t-test, which does not assume equal
population variance [2]_.
pool_sd : bool, optional
Calculate a common SD for all groups and use that for all
comparisons (this can be useful if some groups are small).
This method does not actually call scipy ttest_ind() function,
so extra arguments are ignored. Default is False.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by group_col or not. Recommended
unless you sort your data manually.
Returns
-------
Numpy ndarray or pandas DataFrame of p values depending on input
data type.
References
----------
.. [1] http://en.wikipedia.org/wiki/T-test#Independent_two-sample_t-test
.. [2] http://en.wikipedia.org/wiki/Welch%27s_t_test
Examples
--------
>>> x = [[1,2,3,5,1], [12,31,54, np.nan], [10,12,6,74,11]]
>>> sp.posthoc_ttest(x, p_adjust = 'holm')
array([[-1. , 0.04600899, 0.31269089],
[ 0.04600899, -1. , 0.6327077 ],
[ 0.31269089, 0.6327077 , -1. ]])
'''
if isinstance(a, DataFrame):
x = a.copy()
if not sort:
x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
x.sort_values(by=[group_col, val_col], ascending=True, inplace=True)
x_lens = x.groupby(by=group_col)[val_col].count().values
x_lens_cumsum = np.insert(np.cumsum(x_lens), 0, 0)[:-1]
x_grouped = np.array([x[val_col][j:(j + x_lens[i])] for i, j in enumerate(x_lens_cumsum)])
#x_grouped = [x.loc[v, val_col].values.tolist() for g, v in x.groupby(group_col, sort=False).groups.items()]
else:
x = np.array(a)
x_grouped = np.array([np.asarray(a)[~np.isnan(a)] for a in x])
x_lens = np.asarray([len(a) for a in x_grouped])
x_lens_cumsum = np.insert(np.cumsum(x_lens), 0, 0)[:-1]
if any(x_lens == 0):
raise ValueError("All groups must contain data")
x_len = len(x_grouped)
vs = np.zeros((x_len, x_len), dtype=np.float)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
def compare_pooled(i, j):
diff = x_means[i] - x_means[j]
se_diff = pooled_sd * np.sqrt(1 / x_lens[i] + 1 / x_lens[j])
t_value = diff / se_diff
return 2 * ss.t.cdf(-np.abs(t_value), x_totaldegf)
combs = it.combinations(range(x_len), 2)
if pool_sd:
x_means = np.asarray([np.mean(xi) for xi in x_grouped])
x_sd = np.asarray([np.std(xi, ddof=1) for xi in x_grouped])
x_degf = x_lens - 1
x_totaldegf = np.sum(x_degf)
pooled_sd = np.sqrt(np.sum(x_sd ** 2 * x_degf) / x_totaldegf)
for i, j in combs:
vs[i, j] = compare_pooled(i, j)
else:
for i,j in combs:
vs[i, j] = ss.ttest_ind(x_grouped[i], x_grouped[j], equal_var=equal_var)[1]
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
if isinstance(x, DataFrame):
groups_unique = x[group_col].unique()
return DataFrame(vs, index=groups_unique, columns=groups_unique)
else:
return vs
def posthoc_tukey_hsd(x, g, alpha=0.05):
'''Pairwise comparisons with TukeyHSD confidence intervals. This is a
convenience function to make statsmodels `pairwise_tukeyhsd` method more
applicable for further use.
Parameters
----------
x : array_like or pandas Series object, 1d
An array, any object exposing the array interface, containing dependent
variable values (test or response variable). Values should have a
non-nominal scale. NaN values will cause an error (please handle
manually).
g : array_like or pandas Series object, 1d
An array, any object exposing the array interface, containing
independent variable values (grouping or predictor variable). Values
should have a nominal scale (categorical).
alpha : float, optional
Significance level for the test. Default is 0.05.
Returns
-------
result : pandas DataFrame
DataFrame with 0, 1, and -1 values, where 0 is False (not significant),
1 is True (significant), and -1 is for diagonal elements.
Examples
--------
>>> x = [[1,2,3,4,5], [35,31,75,40,21], [10,6,9,6,1]]
>>> g = [['a'] * 5, ['b'] * 5, ['c'] * 5]
>>> sp.posthoc_tukey_hsd(np.concatenate(x), np.concatenate(g))
'''
result = pairwise_tukeyhsd(x, g, alpha=0.05)
groups = np.array(result.groupsunique, dtype=np.str)
groups_len = len(groups)
vs = np.zeros((groups_len, groups_len), dtype=np.int)
for a in result.summary()[1:]:
a0 = str(a[0])
a1 = str(a[1])
a0i = np.where(groups == a0)[0][0]
a1i = np.where(groups == a1)[0][0]
vs[a0i, a1i] = 1 if str(a[5]) == 'True' else 0
vs = np.triu(vs)
np.fill_diagonal(vs, -1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[tri_lower] = vs.T[tri_lower]
return DataFrame(vs, index=groups, columns=groups)
def posthoc_mannwhitney(a, val_col=None, group_col=None, use_continuity=True, alternative='two-sided', p_adjust=None, sort=True):
'''Pairwise comparisons with Mann-Whitney rank test.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame. Array must be two-dimensional.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
use_continuity : bool, optional
Whether a continuity correction (1/2.) should be taken into account.
Default is True.
alternative : ['two-sided', 'less', or 'greater'], optional
Whether to get the p-value for the one-sided hypothesis
('less' or 'greater') or for the two-sided hypothesis ('two-sided').
Defaults to 'two-sided'.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by group_col or not. Recommended
unless you sort your data manually.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
Refer to `scipy.stats.mannwhitneyu` reference page for further details.
Examples
--------
>>> x = [[1,2,3,4,5], [35,31,75,40,21], [10,6,9,6,1]]
>>> sp.posthoc_mannwhitney(x, p_adjust = 'holm')
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col, _val_col], ascending=True, inplace=True)
groups = np.unique(x[_group_col])
x_len = groups.size
vs = np.zeros((x_len, x_len))
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(x_len), 2)
for i,j in combs:
vs[i, j] = ss.mannwhitneyu(x.loc[x[_group_col] == groups[i], _val_col],
x.loc[x[_group_col] == groups[j], _val_col],
use_continuity=use_continuity,
alternative=alternative)[1]
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_wilcoxon(a, val_col=None, group_col=None, zero_method='wilcox', correction=False, p_adjust=None, sort=False):
'''Pairwise comparisons with Wilcoxon signed-rank test. It is a non-parametric
version of the paired T-test for use with non-parametric ANOVA.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame. Array must be two-dimensional.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
zero_method : string, {"pratt", "wilcox", "zsplit"}, optional
"pratt": Pratt treatment, includes zero-differences in the ranking
process (more conservative)
"wilcox": Wilcox treatment, discards all zero-differences
"zsplit": Zero rank split, just like Pratt, but spliting the zero rank
between positive and negative ones
correction : bool, optional
If True, apply continuity correction by adjusting the Wilcoxon rank
statistic by 0.5 towards the mean value when computing the z-statistic.
Default is False.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by group_col and val_col or not.
Default is False.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
Refer to `scipy.stats.wilcoxon` reference page for further details.
Examples
--------
>>> x = [[1,2,3,4,5], [35,31,75,40,21], [10,6,9,6,1]]
>>> sp.posthoc_wilcoxon(x)
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
#x.sort_values(by=[_group_col, _val_col], ascending=True, inplace=True)
groups = np.unique(x[_group_col])
x_len = groups.size
vs = np.zeros((x_len, x_len))
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(x_len), 2)
for i,j in combs:
vs[i, j] = ss.wilcoxon(x.loc[x[_group_col] == groups[i], _val_col],
x.loc[x[_group_col] == groups[j], _val_col],
zero_method=zero_method, correction=correction)[1]
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method=p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_scheffe(a, val_col=None, group_col=None, sort=False, p_adjust=None):
'''Scheffe's all-pairs comparisons test for normally distributed data with equal
group variances. For all-pairs comparisons in an one-factorial layout with
normally distributed residuals and equal variances Scheffe's test can be
performed with parametric ANOVA [1]_, [2]_, [3]_.
A total of m = k(k-1)/2 hypotheses can be tested.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
The p values are computed from the F-distribution.
References
----------
.. [1] J. Bortz (1993) Statistik für Sozialwissenschaftler. 4. Aufl., Berlin:
Springer.
.. [2] L. Sachs (1997) Angewandte Statistik, New York: Springer.
.. [3] H. Scheffe (1953) A Method for Judging all Contrasts in the Analysis
of Variance. Biometrika 40, 87-110.
Examples
--------
>>> import scikit_posthocs as sp
>>> import pandas as pd
>>> x = pd.DataFrame({"a": [1,2,3,5,1], "b": [12,31,54,62,12], "c": [10,12,6,74,11]})
>>> x = x.melt(var_name='groups', value_name='values')
>>> sp.posthoc_scheffe(x, val_col='values', group_col='groups')
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = np.unique(x[_group_col])
x_grouped = x.groupby(_group_col)[_val_col]
ni = x_grouped.count()
n = ni.sum()
xi = x_grouped.mean()
si = x_grouped.var()
sin = 1. / (n - groups.size) * np.sum(si * (ni - 1.))
def compare(i, j):
dif = xi.loc[i] - xi.loc[j]
A = sin * (1. / ni[i] + 1. / ni[j]) * (groups.size - 1.)
f_val = dif ** 2. / A
return f_val
vs = np.zeros((groups.size, groups.size), dtype=np.float)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(groups.size), 2)
for i,j in combs:
vs[i, j] = compare(groups[i], groups[j])
vs[tri_lower] = vs.T[tri_lower]
p_values = ss.f.sf(vs, groups.size - 1., n - groups.size)
np.fill_diagonal(p_values, -1)
return DataFrame(p_values, index=groups, columns=groups)
def posthoc_tamhane(a, val_col=None, group_col=None, welch=True, sort=False):
'''Tamhane's T2 all-pairs comparison test for normally distributed data with
unequal variances. Tamhane's T2 test can be performed for all-pairs
comparisons in an one-factorial layout with normally distributed residuals
but unequal groups variances. A total of m = k(k-1)/2 hypotheses can be
tested. The null hypothesis is tested in the two-tailed test against the
alternative hypothesis [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
welch : bool, optional
If True, use Welch's approximate solution for calculating the degree of
freedom. T2 test uses the usual df = N - 2 approximation.
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
The p values are computed from the t-distribution and adjusted according to
Dunn-Sidak.
References
----------
.. [1] A.C. Tamhane (1979), A Comparison of Procedures for Multiple Comparisons of
Means with Unequal Variances. Journal of the American Statistical Association,
74, 471-480.
Examples
--------
>>> import scikit_posthocs as sp
>>> import pandas as pd
>>> x = pd.DataFrame({"a": [1,2,3,5,1], "b": [12,31,54,62,12], "c": [10,12,6,74,11]})
>>> x = x.melt(var_name='groups', value_name='values')
>>> sp.posthoc_tamhane(x, val_col='values', group_col='groups')
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = x[_group_col].unique()
x_grouped = x.groupby(_group_col)[_val_col]
ni = x_grouped.count()
n = ni.sum()
xi = x_grouped.mean()
si = x_grouped.var()
sin = 1. / (n - groups.size) * np.sum(si * (ni - 1))
def compare(i, j):
dif = xi[i] - xi[j]
A = si[i] / ni[i] + si[j] / ni[j]
t_val = dif / np.sqrt(A)
if welch:
df = A ** 2. / (si[i] ** 2. / (ni[i] ** 2. * (ni[i] - 1.)) + si[j] ** 2. / (ni[j] ** 2. * (ni[j] - 1.)))
else:
## checks according to Tamhane (1979, p. 474)
ok1 = (9./10. <= ni[i]/ni[j]) and (ni[i]/ni[j] <= 10./9.)
ok2 = (9./10. <= (s2i[i] / ni[i]) / (s2i[j] / ni[j])) and ((s2i[i] / ni[i]) / (s2i[j] / ni[j]) <= 10./9.)
ok3 = (4./5. <= ni[i]/ni[j]) and (ni[i]/ni[j] <= 5./4.) and (1./2. <= (s2i[i] / ni[i]) / (s2i[j] / ni[j])) and ((s2i[i] / ni[i]) / (s2i[j] / ni[j]) <= 2.)
ok4 = (2./3. <= ni[i]/ni[j]) and (ni[i]/ni[j] <= 3./2.) and (3./4. <= (s2i[i] / ni[i]) / (s2i[j] / ni[j])) and ((s2i[i] / ni[i]) / (s2i[j] / ni[j]) <= 4./3.)
OK = any(ok1, ok2, ok3, ok4)
if not OK:
print("Sample sizes or standard errors are not balanced. T2 test is recommended.")
df = ni[i] + ni[j] - 2.
p_val = 2. * ss.t.sf(np.abs(t_val), df=df)
return p_val
vs = np.zeros((groups.size, groups.size), dtype=np.float)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(groups.size), 2)
for i,j in combs:
vs[i, j] = compare(groups[i], groups[j])
vs[tri_upper] = 1. - (1. - vs[tri_upper]) ** groups.size
vs[tri_lower] = vs.T[tri_lower]
vs[vs > 1] = 1
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_tukey(a, val_col = None, group_col = None, sort = False):
'''Performs Tukey's all-pairs comparisons test for normally distributed data
with equal group variances. For all-pairs comparisons in an
one-factorial layout with normally distributed residuals and equal variances
Tukey's test can be performed. A total of m = k(k-1)/2 hypotheses can be
tested. The null hypothesis is tested in the two-tailed test against
the alternative hypothesis [1]_, [2]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
The p values are computed from the Tukey-distribution.
References
----------
.. [1] L. Sachs (1997) Angewandte Statistik, New York: Springer.
.. [2] J. Tukey (1949) Comparing Individual Means in the Analysis of Variance,
Biometrics 5, 99-114.
Examples
--------
>>> import scikit_posthocs as sp
>>> import pandas as pd
>>> x = pd.DataFrame({"a": [1,2,3,5,1], "b": [12,31,54,62,12], "c": [10,12,6,74,11]})
>>> x = x.melt(var_name='groups', value_name='values')
>>> sp.posthoc_tukey(x, val_col='values', group_col='groups')
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = np.unique(x[_group_col])
x_grouped = x.groupby(_group_col)[_val_col]
ni = x_grouped.count()
n = ni.sum()
xi = x_grouped.mean()
si = x_grouped.var()
sin = 1. / (n - groups.size) * np.sum(si * (ni - 1))
def compare(i, j):
dif = xi[i] - xi[j]
A = sin * 0.5 * (1. / ni.loc[i] + 1. / ni.loc[j])
q_val = dif / np.sqrt(A)
return q_val
vs = np.zeros((groups.size, groups.size), dtype=np.float)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(groups.size), 2)
for i,j in combs:
vs[i, j] = compare(groups[i], groups[j])
vs[tri_upper] = psturng(np.abs(vs[tri_upper]), groups.size, n - groups.size)
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_dscf(a, val_col=None, group_col=None, sort=False):
'''Dwass, Steel, Critchlow and Fligner all-pairs comparison test for a
one-factorial layout with non-normally distributed residuals. As opposed to
the all-pairs comparison procedures that depend on Kruskal ranks, the DSCF
test is basically an extension of the U-test as re-ranking is conducted for
each pairwise test [1]_, [2]_, [3]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
Pandas DataFrame containing p values.
Notes
-----
The p values are computed from the Tukey-distribution.
References
----------
.. [1] Douglas, C. E., Fligner, A. M. (1991) On distribution-free multiple
comparisons in the one-way analysis of variance, Communications in
Statistics - Theory and Methods, 20, 127-139.
.. [2] Dwass, M. (1960) Some k-sample rank-order tests. In Contributions to
Probability and Statistics, Edited by: I. Olkin, Stanford: Stanford
University Press.
.. [3] Steel, R. G. D. (1960) A rank sum test for comparing all pairs of
treatments, Technometrics, 2, 197-207.
Examples
--------
>>> import scikit_posthocs as sp
>>> import pandas as pd
>>> x = pd.DataFrame({"a": [1,2,3,5,1], "b": [12,31,54,62,12], "c": [10,12,6,74,11]})
>>> x = x.melt(var_name='groups', value_name='values')
>>> sp.posthoc_dscf(x, val_col='values', group_col='groups')
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = np.unique(x[_group_col])
x_grouped = x.groupby(_group_col)[_val_col]
n = x_grouped.count()
k = groups.size
def get_ties(x):
t = x.value_counts().values
c = np.sum((t ** 3 - t) / 12.)
return c
def compare(i, j):
ni = n.loc[i]
nj = n.loc[j]
x_raw = x.loc[(x[_group_col] == i) | (x[_group_col] == j)].copy()
x_raw['ranks'] = x_raw.loc[:, _val_col].rank()
r = x_raw.groupby(_group_col)['ranks'].sum().loc[[i, j]]
u = np.array([nj * ni + (nj * (nj + 1) / 2),
nj * ni + (ni * (ni + 1) / 2)]) - r
u_min = np.min(u)
s = ni + nj
var = (nj*ni/(s*(s - 1.))) * ((s**3 - s)/12. - get_ties(x_raw['ranks']))
p = np.sqrt(2.) * (u_min - nj * ni / 2.) / np.sqrt(var)
return p
vs = np.zeros((k, k))
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(k), 2)
for i, j in combs:
vs[i, j] = compare(groups[i], groups[j])
vs[tri_upper] = psturng(np.abs(vs[tri_upper]), k, np.inf)
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
|
maximtrp/scikit-posthocs | scikit_posthocs/_posthocs.py | posthoc_conover | python | def posthoc_conover(a, val_col=None, group_col=None, p_adjust=None, sort=True):
'''Post hoc pairwise test for multiple comparisons of mean rank sums
(Conover's test). May be used after Kruskal-Wallis one-way analysis of
variance by ranks to do pairwise comparisons [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas DataFrame.
Array must be two-dimensional. Second dimension may vary,
i.e. groups may have different lengths.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
p_adjust : str, optional
Method for adjusting p values. See `statsmodels.sandbox.stats.multicomp`
for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by `group_col` or not. Recommended
unless you sort your data manually.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
A tie correction are employed according to Conover [1]_.
References
----------
.. [1] W. J. Conover and R. L. Iman (1979), On multiple-comparisons procedures,
Tech. Rep. LA-7677-MS, Los Alamos Scientific Laboratory.
Examples
--------
>>> x = [[1,2,3,5,1], [12,31,54, np.nan], [10,12,6,74,11]]
>>> sp.posthoc_conover(x, p_adjust = 'holm')
'''
def compare_conover(i, j):
diff = np.abs(x_ranks_avg.loc[i] - x_ranks_avg.loc[j])
B = (1. / x_lens.loc[i] + 1. / x_lens.loc[j])
D = (n - 1. - H_cor) / (n - x_len)
t_value = diff / np.sqrt(S2 * B * D)
p_value = 2. * ss.t.sf(np.abs(t_value), df = n - x_len)
return p_value
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col, _val_col], ascending=True, inplace=True)
n = len(x.index)
x_groups_unique = np.unique(x[_group_col])
x_len = x_groups_unique.size
x_lens = x.groupby(_group_col)[_val_col].count()
x['ranks'] = x[_val_col].rank()
x_ranks_avg = x.groupby(_group_col)['ranks'].mean()
x_ranks_sum = x.groupby(_group_col)['ranks'].sum()
# ties
vals = x.groupby('ranks').count()[_val_col].values
tie_sum = np.sum(vals[vals != 1] ** 3 - vals[vals != 1])
tie_sum = 0 if not tie_sum else tie_sum
x_ties = np.min([1., 1. - tie_sum / (n ** 3. - n)])
H = (12. / (n * (n + 1.))) * np.sum(x_ranks_sum**2 / x_lens) - 3. * (n + 1.)
H_cor = H / x_ties
if x_ties == 1:
S2 = n * (n + 1.) / 12.
else:
S2 = (1. / (n - 1.)) * (np.sum(x['ranks'] ** 2.) - (n * (((n + 1.)**2.) / 4.)))
vs = np.zeros((x_len, x_len))
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(x_len), 2)
for i, j in combs:
vs[i, j] = compare_conover(x_groups_unique[i], x_groups_unique[j])
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=x_groups_unique, columns=x_groups_unique) | Post hoc pairwise test for multiple comparisons of mean rank sums
(Conover's test). May be used after Kruskal-Wallis one-way analysis of
variance by ranks to do pairwise comparisons [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas DataFrame.
Array must be two-dimensional. Second dimension may vary,
i.e. groups may have different lengths.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
p_adjust : str, optional
Method for adjusting p values. See `statsmodels.sandbox.stats.multicomp`
for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by `group_col` or not. Recommended
unless you sort your data manually.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
A tie correction are employed according to Conover [1]_.
References
----------
.. [1] W. J. Conover and R. L. Iman (1979), On multiple-comparisons procedures,
Tech. Rep. LA-7677-MS, Los Alamos Scientific Laboratory.
Examples
--------
>>> x = [[1,2,3,5,1], [12,31,54, np.nan], [10,12,6,74,11]]
>>> sp.posthoc_conover(x, p_adjust = 'holm') | train | https://github.com/maximtrp/scikit-posthocs/blob/5476b09e2a325cd4e31c0b0bc6906ab5cd77fc5d/scikit_posthocs/_posthocs.py#L148-L262 | [
"def __convert_to_df(a, val_col=None, group_col=None, val_id=None, group_id=None):\n\n '''Hidden helper method to create a DataFrame with input data for further\n processing.\n\n Parameters\n ----------\n a : array_like or pandas DataFrame object\n An array, any object exposing the array inter... | # -*- coding: utf-8 -*-
import numpy as np
import scipy.stats as ss
import itertools as it
from statsmodels.sandbox.stats.multicomp import multipletests
from statsmodels.stats.multicomp import pairwise_tukeyhsd
from statsmodels.stats.libqsturng import psturng
from pandas import DataFrame, Categorical, Series
def __convert_to_df(a, val_col=None, group_col=None, val_id=None, group_id=None):
'''Hidden helper method to create a DataFrame with input data for further
processing.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas DataFrame.
Array must be two-dimensional. Second dimension may vary,
i.e. groups may have different lengths.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
val_id : int, optional
Index of a column that contains dependent variable values (test or
response variable). Should be specified if a NumPy ndarray is used as an
input. It will be inferred from data, if not specified.
group_id : int, optional
Index of a column that contains independent variable values (grouping or
predictor variable). Should be specified if a NumPy ndarray is used as
an input. It will be inferred from data, if not specified.
Returns
-------
x : pandas DataFrame
DataFrame with input data, `val_col` column contains numerical values and
`group_col` column contains categorical values.
val_col : str
Name of a DataFrame column that contains dependent variable values (test
or response variable).
group_col : str
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable).
Notes
-----
Inferrence algorithm for determining `val_id` and `group_id` args is rather
simple, so it is better to specify them explicitly to prevent errors.
'''
if not group_col:
group_col = 'groups'
if not val_col:
val_col = 'vals'
if isinstance(a, DataFrame):
x = a.copy()
if not {group_col, val_col}.issubset(a.columns):
raise ValueError('Specify correct column names using `group_col` and `val_col` args')
return x, val_col, group_col
elif isinstance(a, list) or (isinstance(a, np.ndarray) and not a.shape.count(2)):
grps_len = map(len, a)
grps = list(it.chain(*[[i+1] * l for i, l in enumerate(grps_len)]))
vals = list(it.chain(*a))
return DataFrame({val_col: vals, group_col: grps}), val_col, group_col
elif isinstance(a, np.ndarray):
# cols ids not defined
# trying to infer
if not(all([val_id, group_id])):
if np.argmax(a.shape):
a = a.T
ax = [np.unique(a[:, 0]).size, np.unique(a[:, 1]).size]
if np.asscalar(np.diff(ax)):
__val_col = np.argmax(ax)
__group_col = np.argmin(ax)
else:
raise ValueError('Cannot infer input format.\nPlease specify `val_id` and `group_id` args')
cols = {__val_col: val_col,
__group_col: group_col}
else:
cols = {val_id: val_col,
group_id: group_col}
cols_vals = dict(sorted(cols.items())).values()
return DataFrame(a, columns=cols_vals), val_col, group_col
def __convert_to_block_df(a, y_col=None, group_col=None, block_col=None, melted=False):
if isinstance(a, DataFrame) and not melted:
x = a.copy(deep=True)
group_col = 'groups'
block_col = 'blocks'
y_col = 'y'
x.columns.name = group_col
x.index.name = block_col
x = x.reset_index().melt(id_vars=block_col, var_name=group_col, value_name=y_col)
elif melted:
x = DataFrame.from_dict({'groups': a[group_col],
'blocks': a[block_col],
'y': a[y_col]})
elif not isinstance(a, DataFrame):
x = np.array(a)
x = DataFrame(x, index=np.arange(x.shape[0]), columns=np.arange(x.shape[1]))
if not melted:
group_col = 'groups'
block_col = 'blocks'
y_col = 'y'
x.columns.name = group_col
x.index.name = block_col
x = x.reset_index().melt(id_vars=block_col, var_name=group_col, value_name=y_col)
else:
x.columns[group_col] = 'groups'
x.columns[block_col] = 'blocks'
x.columns[y_col] = 'y'
group_col = 'groups'
block_col = 'blocks'
y_col = 'y'
return x, 'y', 'groups', 'blocks'
def posthoc_dunn(a, val_col=None, group_col=None, p_adjust=None, sort=True):
'''Post hoc pairwise test for multiple comparisons of mean rank sums
(Dunn's test). May be used after Kruskal-Wallis one-way analysis of
variance by ranks to do pairwise comparisons [1]_, [2]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas DataFrame.
Array must be two-dimensional. Second dimension may vary,
i.e. groups may have different lengths.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
p_adjust : str, optional
Method for adjusting p values. See `statsmodels.sandbox.stats.multicomp`
for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by group_col or not. Recommended
unless you sort your data manually.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
A tie correction will be employed according to Glantz (2012).
References
----------
.. [1] O.J. Dunn (1964). Multiple comparisons using rank sums.
Technometrics, 6, 241-252.
.. [2] S.A. Glantz (2012), Primer of Biostatistics. New York: McGraw Hill.
Examples
--------
>>> x = [[1,2,3,5,1], [12,31,54, np.nan], [10,12,6,74,11]]
>>> sp.posthoc_dunn(x, p_adjust = 'holm')
'''
def compare_dunn(i, j):
diff = np.abs(x_ranks_avg.loc[i] - x_ranks_avg.loc[j])
A = n * (n + 1.) / 12.
B = (1. / x_lens.loc[i] + 1. / x_lens.loc[j])
z_value = diff / np.sqrt((A - x_ties) * B)
p_value = 2. * ss.norm.sf(np.abs(z_value))
return p_value
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col, _val_col], ascending=True, inplace=True)
n = len(x.index)
x_groups_unique = np.unique(x[_group_col])
x_len = x_groups_unique.size
x_lens = x.groupby(_group_col)[_val_col].count()
x['ranks'] = x[_val_col].rank()
x_ranks_avg = x.groupby(_group_col)['ranks'].mean()
# ties
vals = x.groupby('ranks').count()[_val_col].values
tie_sum = np.sum(vals[vals != 1] ** 3 - vals[vals != 1])
tie_sum = 0 if not tie_sum else tie_sum
x_ties = tie_sum / (12. * (n - 1))
vs = np.zeros((x_len, x_len))
combs = it.combinations(range(x_len), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
for i,j in combs:
vs[i, j] = compare_dunn(x_groups_unique[i], x_groups_unique[j])
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=x_groups_unique, columns=x_groups_unique)
def posthoc_nemenyi(a, val_col=None, group_col=None, dist='chi', sort=True):
'''Post hoc pairwise test for multiple comparisons of mean rank sums
(Nemenyi's test). May be used after Kruskal-Wallis one-way analysis of
variance by ranks to do pairwise comparisons [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame. Array must be two-dimensional. Second dimension may vary,
i.e. groups may have different lengths.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
dist : str, optional
Method for determining the p value. The default distribution is "chi"
(chi-squared), else "tukey" (studentized range).
sort : bool, optional
Specifies whether to sort DataFrame by group_col or not. Recommended
unless you sort your data manually.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
A tie correction will be employed according to Glantz (2012).
References
----------
.. [1] Lothar Sachs (1997), Angewandte Statistik. Berlin: Springer.
Pages: 395-397, 662-664.
Examples
--------
>>> x = [[1,2,3,5,1], [12,31,54, np.nan], [10,12,6,74,11]]
>>> sp.posthoc_nemenyi(x)
'''
def compare_stats_chi(i, j):
diff = np.abs(x_ranks_avg.loc[i] - x_ranks_avg.loc[j])
A = n * (n + 1.) / 12.
B = (1. / x_lens.loc[i] + 1. / x_lens.loc[j])
chi = diff ** 2. / (A * B)
return chi
def compare_stats_tukey(i, j):
diff = np.abs(x_ranks_avg.loc[i] - x_ranks_avg.loc[j])
B = (1. / x_lens.loc[i] + 1. / x_lens.loc[j])
q = diff / np.sqrt((n * (n + 1.) / 12.) * B)
return q
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col, _val_col], ascending=True, inplace=True)
n = len(x.index)
x_groups_unique = np.unique(x[_group_col])
x_len = x_groups_unique.size
x_lens = x.groupby(_group_col)[_val_col].count()
x['ranks'] = x[_val_col].rank()
x_ranks_avg = x.groupby(_group_col)['ranks'].mean()
# ties
vals = x.groupby('ranks').count()[_val_col].values
tie_sum = np.sum(vals[vals != 1] ** 3 - vals[vals != 1])
tie_sum = 0 if not tie_sum else tie_sum
x_ties = np.min([1., 1. - tie_sum / (n ** 3. - n)])
vs = np.zeros((x_len, x_len))
combs = it.combinations(range(x_len), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
if dist == 'chi':
for i,j in combs:
vs[i, j] = compare_stats_chi(x_groups_unique[i], x_groups_unique[j]) / x_ties
vs[tri_upper] = ss.chi2.sf(vs[tri_upper], x_len - 1)
elif dist == 'tukey':
for i,j in combs:
vs[i, j] = compare_stats_tukey(x_groups_unique[i], x_groups_unique[j]) * np.sqrt(2.)
vs[tri_upper] = psturng(vs[tri_upper], x_len, np.inf)
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=x_groups_unique, columns=x_groups_unique)
def posthoc_nemenyi_friedman(a, y_col=None, block_col=None, group_col=None, melted=False, sort=False):
'''Calculate pairwise comparisons using Nemenyi post hoc test for
unreplicated blocked data. This test is usually conducted post hoc if
significant results of the Friedman's test are obtained. The statistics
refer to upper quantiles of the studentized range distribution (Tukey) [1]_,
[2]_, [3]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of
block design, i.e. rows are blocks, and columns are groups. In this
case you do not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (strings).
y_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
A one-way ANOVA with repeated measures that is also referred to as ANOVA
with unreplicated block design can also be conducted via Friedman's
test. The consequent post hoc pairwise multiple comparison test
according to Nemenyi is conducted with this function.
This function does not test for ties.
References
----------
.. [1] J. Demsar (2006), Statistical comparisons of classifiers over
multiple data sets, Journal of Machine Learning Research, 7, 1-30.
.. [2] P. Nemenyi (1963) Distribution-free Multiple Comparisons. Ph.D.
thesis, Princeton University.
.. [3] L. Sachs (1997), Angewandte Statistik. Berlin: Springer.
Pages: 668-675.
Examples
--------
>>> # Non-melted case, x is a block design matrix, i.e. rows are blocks
>>> # and columns are groups.
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_nemenyi_friedman(x)
'''
if melted and not all([block_col, group_col, y_col]):
raise ValueError('block_col, group_col, y_col should be explicitly specified if using melted data')
def compare_stats(i, j):
dif = np.abs(R[groups[i]] - R[groups[j]])
qval = dif / np.sqrt(k * (k + 1.) / (6. * n))
return qval
x, _y_col, _group_col, _block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
#if not sort:
# x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
# x[block_col] = Categorical(x[block_col], categories=x[block_col].unique(), ordered=True)
x.sort_values(by=[_group_col, _block_col], ascending=True, inplace=True)
x.dropna(inplace=True)
groups = x[_group_col].unique()
k = groups.size
n = x[_block_col].unique().size
x['mat'] = x.groupby(_block_col)[_y_col].rank()
R = x.groupby(_group_col)['mat'].mean()
vs = np.zeros((k, k))
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
for i, j in combs:
vs[i, j] = compare_stats(i, j)
vs *= np.sqrt(2.)
vs[tri_upper] = psturng(vs[tri_upper], k, np.inf)
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_conover_friedman(a, y_col=None, block_col=None, group_col=None, melted=False, sort=False, p_adjust=None):
'''Calculate pairwise comparisons using Conover post hoc test for unreplicated
blocked data. This test is usually conducted post hoc after
significant results of the Friedman test. The statistics refer to
the Student t distribution [1]_, [2]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of
block design, i.e. rows are blocks, and columns are groups. In this
case you do not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (strings).
y_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values. See statsmodels.sandbox.stats.multicomp
for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
'single-step' : uses Tukey distribution for multiple comparisons
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
A one-way ANOVA with repeated measures that is also referred to as ANOVA
with unreplicated block design can also be conducted via the
friedman.test. The consequent post hoc pairwise multiple comparison test
according to Conover is conducted with this function.
If y is a matrix, than the columns refer to the treatment and the rows
indicate the block.
References
----------
.. [1] W. J. Conover and R. L. Iman (1979), On multiple-comparisons procedures,
Tech. Rep. LA-7677-MS, Los Alamos Scientific Laboratory.
.. [2] W. J. Conover (1999), Practical nonparametric Statistics, 3rd. Edition,
Wiley.
Examples
--------
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_conover_friedman(x)
'''
if melted and not all([block_col, group_col, y_col]):
raise ValueError('block_col, group_col, y_col should be explicitly specified if using melted data')
def compare_stats(i, j):
dif = np.abs(R.loc[groups[i]] - R.loc[groups[j]])
tval = dif / np.sqrt(A) / np.sqrt(B)
pval = 2. * ss.t.sf(np.abs(tval), df = (m*n*k - k - n + 1))
return pval
def compare_tukey(i, j):
dif = np.abs(R.loc[groups[i]] - R.loc[groups[j]])
qval = np.sqrt(2.) * dif / (np.sqrt(A) * np.sqrt(B))
pval = psturng(qval, k, np.inf)
return pval
x, _y_col, _group_col, _block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
#if not sort:
# x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
# x[block_col] = Categorical(x[block_col], categories=x[block_col].unique(), ordered=True)
x.sort_values(by=[_group_col,_block_col], ascending=True, inplace=True)
x.dropna(inplace=True)
groups = np.unique(x[_group_col])
k = groups.size
n = x[_block_col].unique().size
x['mat'] = x.groupby(_block_col)[_y_col].rank()
R = x.groupby(_group_col)['mat'].sum()
A1 = (x['mat'] ** 2).sum()
m = 1
S2 = m/(m*k - 1.) * (A1 - m*k*n*(m*k + 1.)**2./4.)
T2 = 1 / S2 * (np.sum(R) - n * m * ((m * k + 1.) / 2.)**2.)
A = S2 * (2. * n * (m * k - 1.)) / ( m * n * k - k - n + 1.)
B = 1. - T2 / (n * (m * k - 1.))
vs = np.zeros((k, k))
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
if p_adjust == 'single-step':
for i, j in combs:
vs[i, j] = compare_tukey(i, j)
else:
for i, j in combs:
vs[i, j] = compare_stats(i, j)
if p_adjust is not None:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_npm_test(a, val_col=None, group_col=None, sort=False, p_adjust=None):
'''Calculate pairwise comparisons using Nashimoto and Wright's all-pairs
comparison procedure (NPM test) for simply ordered mean ranksums.
NPM test is basically an extension of Nemenyi's procedure for testing
increasingly ordered alternatives [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values. See `statsmodels.sandbox.stats.multicomp`
for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
The p values are estimated from the studentized range distribution. If
the medians are already increasingly ordered, than the NPM-test
simplifies to the ordinary Nemenyi test
References
----------
.. [1] Nashimoto, K., Wright, F.T., (2005), Multiple comparison procedures for
detecting differences in simply ordered means. Comput. Statist. Data
Anal. 48, 291--306.
Examples
--------
>>> x = np.array([[102,109,114,120,124],
[110,112,123,130,145],
[132,141,156,160,172]])
>>> sp.posthoc_npm_test(x)
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
#if not sort:
# x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = np.unique(x[_group_col])
x['ranks'] = x[_val_col].rank()
Ri = x.groupby(_group_col)['ranks'].mean()
ni = x.groupby(_group_col)[_val_col].count()
k = groups.size
n = x.shape[0]
sigma = np.sqrt(n * (n + 1) / 12.)
df = np.inf
def compare(m, u):
a = [(Ri.loc[groups[u]]-Ri.loc[groups[_mi]])/(sigma/np.sqrt(2)*np.sqrt(1./ni.loc[groups[_mi]] + 1./ni.loc[groups[u]])) for _mi in m]
return np.array(a)
stat = np.zeros((k, k))
for i in range(k-1):
for j in range(i+1, k):
u = j
m = np.arange(i, u)
tmp = compare(m, u)
stat[j, i] = np.max(tmp)
stat[stat < 0] = 0
p_values = psturng(stat, k, np.inf)
tri_upper = np.triu_indices(p_values.shape[0], 1)
tri_lower = np.tril_indices(p_values.shape[0], -1)
p_values[tri_lower] = p_values.T[tri_lower]
#if p_adjust:
# p_values[tri_upper] = multipletests(p_values[tri_upper], method = p_adjust)[1]
np.fill_diagonal(p_values, -1)
return DataFrame(p_values, index=groups, columns=groups)
def posthoc_siegel_friedman(a, y_col=None, block_col=None, group_col=None, melted=False, sort=False, p_adjust=None):
'''Siegel and Castellan's All-Pairs Comparisons Test for Unreplicated Blocked
Data. See authors' paper for additional information [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of
block design, i.e. rows are blocks, and columns are groups. In this
case you do not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (strings).
y_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values. See statsmodels.sandbox.stats.multicomp for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
For all-pairs comparisons in a two factorial unreplicated complete block design
with non-normally distributed residuals, Siegel and Castellan's test can be
performed on Friedman-type ranked data.
References
----------
.. [1] S. Siegel, N. J. Castellan Jr. (1988), Nonparametric Statistics for the
Behavioral Sciences. 2nd ed. New York: McGraw-Hill.
Examples
--------
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_siegel_friedman(x)
'''
if melted and not all([block_col, group_col, y_col]):
raise ValueError('block_col, group_col, y_col should be explicitly specified if using melted data')
def compare_stats(i, j):
dif = np.abs(R[groups[i]] - R[groups[j]])
zval = dif / np.sqrt(k * (k + 1.) / (6. * n))
return zval
x, y_col, group_col, block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
#if not sort:
# x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
# x[block_col] = Categorical(x[block_col], categories=x[block_col].unique(), ordered=True)
x.sort_values(by=[group_col,block_col], ascending=True, inplace=True)
x.dropna(inplace=True)
groups = x[group_col].unique()
k = groups.size
n = x[block_col].unique().size
x['mat'] = x.groupby(block_col)[y_col].rank()
R = x.groupby(group_col)['mat'].mean()
vs = np.zeros((k, k), dtype=np.float)
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
for i, j in combs:
vs[i, j] = compare_stats(i, j)
vs = 2. * ss.norm.sf(np.abs(vs))
vs[vs > 1] = 1.
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_miller_friedman(a, y_col=None, block_col=None, group_col=None, melted=False, sort=False):
'''Miller's All-Pairs Comparisons Test for Unreplicated Blocked Data.
The p-values are computed from the chi-square distribution [1]_, [2]_,
[3]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of
block design, i.e. rows are blocks, and columns are groups. In this
case you do not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (strings).
y_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
Pandas DataFrame containing p values.
Notes
-----
For all-pairs comparisons in a two factorial unreplicated complete block
design with non-normally distributed residuals, Miller's test can be
performed on Friedman-type ranked data.
References
----------
.. [1] J. Bortz J, G. A. Lienert, K. Boehnke (1990), Verteilungsfreie
Methoden in der Biostatistik. Berlin: Springerself.
.. [2] R. G. Miller Jr. (1996), Simultaneous statistical inference. New
York: McGraw-Hill.
.. [3] E. L. Wike (2006), Data Analysis. A Statistical Primer for Psychology
Students. New Brunswick: Aldine Transaction.
Examples
--------
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_miller_friedman(x)
'''
if melted and not all([block_col, group_col, y_col]):
raise ValueError('block_col, group_col, y_col should be explicitly specified if using melted data')
def compare_stats(i, j):
dif = np.abs(R[groups[i]] - R[groups[j]])
qval = dif / np.sqrt(k * (k + 1.) / (6. * n))
return qval
x, y_col, group_col, block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
#if not sort:
# x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
# x[block_col] = Categorical(x[block_col], categories=x[block_col].unique(), ordered=True)
x.sort_values(by=[group_col,block_col], ascending=True, inplace=True)
x.dropna(inplace=True)
groups = x[group_col].unique()
k = groups.size
n = x[block_col].unique().size
x['mat'] = x.groupby(block_col)[y_col].rank()
R = x.groupby(group_col)['mat'].mean()
vs = np.zeros((k, k), dtype=np.float)
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
for i, j in combs:
vs[i, j] = compare_stats(i, j)
vs = vs ** 2
vs = ss.chi2.sf(vs, k - 1)
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_durbin(a, y_col=None, block_col=None, group_col=None, melted=False, sort=False, p_adjust=None):
'''Pairwise post hoc test for multiple comparisons of rank sums according to
Durbin and Conover for a two-way balanced incomplete block design (BIBD). See
references for additional information [1]_, [2]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of block design,
i.e. rows are blocks, and columns are groups. In this case you do
not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (string).
y_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values. See statsmodels.sandbox.stats.multicomp
for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
Pandas DataFrame containing p values.
References
----------
.. [1] W. J. Conover and R. L. Iman (1979), On multiple-comparisons procedures,
Tech. Rep. LA-7677-MS, Los Alamos Scientific Laboratory.
.. [2] W. J. Conover (1999), Practical nonparametric Statistics,
3rd. edition, Wiley.
Examples
--------
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_durbin(x)
'''
if melted and not all([block_col, group_col, y_col]):
raise ValueError('block_col, group_col, y_col should be explicitly specified if using melted data')
def compare_stats(i, j):
dif = np.abs(Rj[groups[i]] - Rj[groups[j]])
tval = dif / denom
pval = 2. * ss.t.sf(np.abs(tval), df = df)
return pval
x, y_col, group_col, block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
if not sort:
x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
x[block_col] = Categorical(x[block_col], categories=x[block_col].unique(), ordered=True)
x.sort_values(by=[block_col, group_col], ascending=True, inplace=True)
x.dropna(inplace=True)
groups = x[group_col].unique()
t = len(groups)
b = x[block_col].unique().size
r = b
k = t
x['y_ranked'] = x.groupby(block_col)[y_col].rank()
Rj = x.groupby(group_col)['y_ranked'].sum()
A = (x['y_ranked'] ** 2).sum()
C = (b * k * (k + 1) ** 2) / 4.
D = (Rj ** 2).sum() - r * C
T1 = (t - 1) / (A - C) * D
denom = np.sqrt(((A - C) * 2 * r) / (b * k - b - t + 1) * (1 - T1 / (b * (k -1))))
df = b * k - b - t + 1
vs = np.zeros((t, t), dtype=np.float)
combs = it.combinations(range(t), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
for i, j in combs:
vs[i, j] = compare_stats(i, j)
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_anderson(a, val_col=None, group_col=None, midrank=True, sort=False, p_adjust=None):
'''Anderson-Darling Pairwise Test for k-samples. Tests the null hypothesis
that k-samples are drawn from the same population without having to specify
the distribution function of that population [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
midrank : bool, optional
Type of Anderson-Darling test which is computed. If set to True (default), the
midrank test applicable to continuous and discrete populations is performed. If
False, the right side empirical distribution is used.
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values. See statsmodels.sandbox.stats.multicomp for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
result : pandas DataFrame
P values.
References
----------
.. [1] F.W. Scholz, M.A. Stephens (1987), K-Sample Anderson-Darling Tests,
Journal of the American Statistical Association, Vol. 82, pp. 918-924.
Examples
--------
>>> x = np.array([[2.9, 3.0, 2.5, 2.6, 3.2], [3.8, 2.7, 4.0, 2.4], [2.8, 3.4, 3.7, 2.2, 2.0]])
>>> sp.posthoc_anderson(x)
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = x[_group_col].unique()
k = groups.size
vs = np.zeros((k, k), dtype=np.float)
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
for i, j in combs:
vs[i, j] = ss.anderson_ksamp([x.loc[x[_group_col] == groups[i], _val_col], x.loc[x[_group_col] == groups[j], _val_col]])[2]
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_quade(a, y_col=None, block_col=None, group_col=None, dist='t', melted=False, sort=False, p_adjust=None):
'''Calculate pairwise comparisons using Quade's post hoc test for
unreplicated blocked data. This test is usually conducted if significant
results were obtained by the omnibus test [1]_, [2]_, [3]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of
block design, i.e. rows are blocks, and columns are groups. In this
case you do not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (string).
y_col : str or int, optional
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
dist : str, optional
Method for determining p values.
The default distribution is "t", else "normal".
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
Pandas DataFrame containing p values.
References
----------
.. [1] W. J. Conover (1999), Practical nonparametric Statistics, 3rd. Edition,
Wiley.
.. [2] N. A. Heckert and J. J. Filliben (2003). NIST Handbook 148: Dataplot
Reference Manual, Volume 2: Let Subcommands and Library Functions.
National Institute of Standards and Technology Handbook Series, June 2003.
.. [3] D. Quade (1979), Using weighted rankings in the analysis of complete
blocks with additive block effects. Journal of the American Statistical
Association, 74, 680-683.
Examples
--------
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_quade(x)
'''
if melted and not all([block_col, group_col, y_col]):
raise ValueError('block_col, group_col, y_col should be explicitly specified if using melted data')
def compare_stats_t(i, j):
dif = np.abs(S[groups[i]] - S[groups[j]])
tval = dif / denom
pval = 2. * ss.t.sf(np.abs(tval), df = (b - 1) * (k - 1))
return pval
def compare_stats_norm(i, j):
dif = np.abs(W[groups[i]] * ff - W[groups[j]] * ff)
zval = dif / denom
pval = 2. * ss.norm.sf(np.abs(zval))
return pval
x, y_col, group_col, block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
if not sort:
x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
x[block_col] = Categorical(x[block_col], categories=x[block_col].unique(), ordered=True)
x.sort_values(by=[block_col, group_col], ascending=True, inplace=True)
x.dropna(inplace=True)
groups = x[group_col].unique()
k = len(groups)
b = x[block_col].unique().size
x['r'] = x.groupby(block_col)[y_col].rank()
q = (x.groupby(block_col)[y_col].max() - x.groupby(block_col)[y_col].min()).rank()
x['rr'] = x['r'] - (k + 1)/2
x['s'] = x.apply(lambda x, y: x['rr'] * y[x['blocks']], axis=1, args=(q,))
x['w'] = x.apply(lambda x, y: x['r'] * y[x['blocks']], axis=1, args=(q,))
A = (x['s'] ** 2).sum()
S = x.groupby(group_col)['s'].sum()
B = np.sum(S ** 2) / b
W = x.groupby(group_col)['w'].sum()
vs = np.zeros((k, k), dtype=np.float)
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
if dist == 't':
denom = np.sqrt((2 * b * (A - B)) / ((b - 1) * (k - 1)))
for i, j in combs:
vs[i, j] = compare_stats_t(i, j)
else:
n = b * k
denom = np.sqrt((k * (k + 1) * (2 * n + 1) * (k-1)) / (18 * n * (n + 1)))
ff = 1. / (b * (b + 1)/2)
for i, j in combs:
vs[i, j] = compare_stats_norm(i, j)
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_mackwolfe(a, val_col, group_col, p=None, n_perm=100, sort=False, p_adjust=None):
'''Mack-Wolfe Test for Umbrella Alternatives.
In dose-finding studies one may assume an increasing treatment effect with
increasing dose level. However, the test subject may actually succumb to
toxic effects at high doses, which leads to decresing treatment
effects [1]_, [2]_.
The scope of the Mack-Wolfe Test is to test for umbrella alternatives for
either a known or unknown point P (i.e. dose-level), where the peak
(umbrella point) is present.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
p : int, optional
The a-priori known peak as an ordinal number of the treatment group
including the zero dose level, i.e. p = {1, ..., k}. Defaults to None.
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
p : float
P value.
stat : float
Statistic.
References
----------
.. [1] Chen, I.Y. (1991) Notes on the Mack-Wolfe and Chen-Wolfe Tests for
Umbrella Alternatives. Biom. J., 33, 281-290.
.. [2] Mack, G.A., Wolfe, D. A. (1981) K-sample rank tests for umbrella
alternatives. J. Amer. Statist. Assoc., 76, 175-181.
Examples
--------
>>> x = np.array([[10,'a'], [59,'a'], [76,'b'], [10, 'b']])
>>> sp.posthoc_mackwolfe(x, val_col = 0, group_col = 1)
'''
x = __convert_to_df(a, val_col, group_col)
if not sort:
x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
x.sort_values(by=[group_col], ascending=True, inplace=True)
k = x[group_col].unique().size
if p:
if p > k:
print("Selected 'p' > number of groups:", str(p), " > ", str(k))
return False
elif p < 1:
print("Selected 'p' < 1: ", str(p))
return False
Rij = x[val_col].rank()
n = x.groupby(group_col)[val_col].count()
def _fn(Ri, Rj):
return np.sum(Ri.apply(lambda x: Rj[Rj > x].size))
def _ustat(Rij, g, k):
levels = np.unique(g)
U = np.identity(k)
for i in range(k):
for j in range(i):
U[i,j] = _fn(Rij[x[group_col] == levels[i]], Rij[x[group_col] == levels[j]])
U[j,i] = _fn(Rij[x[group_col] == levels[j]], Rij[x[group_col] == levels[i]])
return U
def _ap(p, U):
tmp1 = 0
if p > 0:
for i in range(p):
for j in range(i+1, p+1):
tmp1 += U[i,j]
tmp2 = 0
if p < k:
for i in range(p, k):
for j in range(i+1, k):
tmp2 += U[j,i]
return tmp1 + tmp2
def _n1(p, n):
return np.sum(n[:p+1])
def _n2(p, n):
return np.sum(n[p:k])
def _mean_at(p, n):
N1 = _n1(p, n)
N2 = _n2(p, n)
return (N1**2 + N2**2 - np.sum(n**2) - n.iloc[p]**2)/4
def _var_at(p, n):
N1 = _n1(p, n)
N2 = _n2(p, n)
N = np.sum(n)
var = (2 * (N1**3 + N2**3) + 3 * (N1**2 + N2**2) -\
np.sum(n**2 * (2*n + 3)) - n.iloc[p]**2 * (2 * n.iloc[p] + 3) +\
12. * n.iloc[p] * N1 * N2 - 12. * n.iloc[p] ** 2 * N) / 72.
return var
if p:
if (x.groupby(val_col).count() > 1).any().any():
print("Ties are present")
U = _ustat(Rij, x[group_col], k)
est = _ap(p, U)
mean = _mean_at(p, n)
sd = np.sqrt(_var_at(p, n))
stat = (est - mean)/sd
p_value = ss.norm.sf(stat)
else:
U = _ustat(Rij, x[group_col], k)
Ap = np.array([_ap(i, U) for i in range(k)]).ravel()
mean = np.array([_mean_at(i, n) for i in range(k)]).ravel()
var = np.array([_var_at(i, n) for i in range(k)]).ravel()
A = (Ap - mean) / np.sqrt(var)
stat = np.max(A)
p = A == stat
est = None
mt = []
for i in range(n_perm):
ix = Series(np.random.permutation(Rij))
Uix = _ustat(ix, x[group_col], k)
Apix = np.array([_ap(i, Uix) for i in range(k)])
Astarix = (Apix - mean) / np.sqrt(var)
mt.append(np.max(Astarix))
mt = np.array(mt)
p_value = mt[mt > stat] / n_perm
return p_value, stat
def posthoc_vanwaerden(a, val_col=None, group_col=None, sort=True, p_adjust=None):
'''Van der Waerden's test for pairwise multiple comparisons between group
levels. See references for additional information [1]_, [2]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
For one-factorial designs with samples that do not meet the assumptions for
one-way-ANOVA and subsequent post hoc tests, the van der Waerden test using
normal scores can be employed. Provided that significant differences were
detected by this global test, one may be interested in applying post hoc
tests according to van der Waerden for pairwise multiple comparisons of the
group levels.
There is no tie correction applied in this function.
References
----------
.. [1] W. J. Conover and R. L. Iman (1979), On multiple-comparisons procedures,
Tech. Rep. LA-7677-MS, Los Alamos Scientific Laboratory.
.. [2] B. L. van der Waerden (1952) Order tests for the two-sample problem and
their power, Indagationes Mathematicae, 14, 453-458.
Examples
--------
>>> x = np.array([[10,'a'], [59,'a'], [76,'b'], [10, 'b']])
>>> sp.posthoc_vanwaerden(x, val_col = 0, group_col = 1)
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = x[_group_col].unique()
n = x[_val_col].size
k = groups.size
r = ss.rankdata(x[_val_col])
x['z_scores'] = ss.norm.ppf(r / (n + 1))
aj = x.groupby(_group_col)['z_scores'].sum()
nj = x.groupby(_group_col)['z_scores'].count()
s2 = (1. / (n - 1.)) * (x['z_scores'] ** 2.).sum()
sts = (1. / s2) * np.sum(aj ** 2. / nj)
param = k - 1
A = aj / nj
vs = np.zeros((k, k), dtype=np.float)
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
def compare_stats(i, j):
dif = np.abs(A[groups[i]] - A[groups[j]])
B = 1. / nj[groups[i]] + 1. / nj[groups[j]]
tval = dif / np.sqrt(s2 * (n - 1. - sts)/(n - k) * B)
pval = 2. * ss.t.sf(np.abs(tval), df = n - k)
return pval
for i, j in combs:
vs[i, j] = compare_stats(i, j)
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_ttest(a, val_col=None, group_col=None, pool_sd=False, equal_var=True, p_adjust=None, sort=True):
'''Pairwise T test for multiple comparisons of independent groups. May be
used after a parametric ANOVA to do pairwise comparisons.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame. Array must be two-dimensional.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
equal_var : bool, optional
If True (default), perform a standard independent test
that assumes equal population variances [1]_.
If False, perform Welch's t-test, which does not assume equal
population variance [2]_.
pool_sd : bool, optional
Calculate a common SD for all groups and use that for all
comparisons (this can be useful if some groups are small).
This method does not actually call scipy ttest_ind() function,
so extra arguments are ignored. Default is False.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by group_col or not. Recommended
unless you sort your data manually.
Returns
-------
Numpy ndarray or pandas DataFrame of p values depending on input
data type.
References
----------
.. [1] http://en.wikipedia.org/wiki/T-test#Independent_two-sample_t-test
.. [2] http://en.wikipedia.org/wiki/Welch%27s_t_test
Examples
--------
>>> x = [[1,2,3,5,1], [12,31,54, np.nan], [10,12,6,74,11]]
>>> sp.posthoc_ttest(x, p_adjust = 'holm')
array([[-1. , 0.04600899, 0.31269089],
[ 0.04600899, -1. , 0.6327077 ],
[ 0.31269089, 0.6327077 , -1. ]])
'''
if isinstance(a, DataFrame):
x = a.copy()
if not sort:
x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
x.sort_values(by=[group_col, val_col], ascending=True, inplace=True)
x_lens = x.groupby(by=group_col)[val_col].count().values
x_lens_cumsum = np.insert(np.cumsum(x_lens), 0, 0)[:-1]
x_grouped = np.array([x[val_col][j:(j + x_lens[i])] for i, j in enumerate(x_lens_cumsum)])
#x_grouped = [x.loc[v, val_col].values.tolist() for g, v in x.groupby(group_col, sort=False).groups.items()]
else:
x = np.array(a)
x_grouped = np.array([np.asarray(a)[~np.isnan(a)] for a in x])
x_lens = np.asarray([len(a) for a in x_grouped])
x_lens_cumsum = np.insert(np.cumsum(x_lens), 0, 0)[:-1]
if any(x_lens == 0):
raise ValueError("All groups must contain data")
x_len = len(x_grouped)
vs = np.zeros((x_len, x_len), dtype=np.float)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
def compare_pooled(i, j):
diff = x_means[i] - x_means[j]
se_diff = pooled_sd * np.sqrt(1 / x_lens[i] + 1 / x_lens[j])
t_value = diff / se_diff
return 2 * ss.t.cdf(-np.abs(t_value), x_totaldegf)
combs = it.combinations(range(x_len), 2)
if pool_sd:
x_means = np.asarray([np.mean(xi) for xi in x_grouped])
x_sd = np.asarray([np.std(xi, ddof=1) for xi in x_grouped])
x_degf = x_lens - 1
x_totaldegf = np.sum(x_degf)
pooled_sd = np.sqrt(np.sum(x_sd ** 2 * x_degf) / x_totaldegf)
for i, j in combs:
vs[i, j] = compare_pooled(i, j)
else:
for i,j in combs:
vs[i, j] = ss.ttest_ind(x_grouped[i], x_grouped[j], equal_var=equal_var)[1]
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
if isinstance(x, DataFrame):
groups_unique = x[group_col].unique()
return DataFrame(vs, index=groups_unique, columns=groups_unique)
else:
return vs
def posthoc_tukey_hsd(x, g, alpha=0.05):
'''Pairwise comparisons with TukeyHSD confidence intervals. This is a
convenience function to make statsmodels `pairwise_tukeyhsd` method more
applicable for further use.
Parameters
----------
x : array_like or pandas Series object, 1d
An array, any object exposing the array interface, containing dependent
variable values (test or response variable). Values should have a
non-nominal scale. NaN values will cause an error (please handle
manually).
g : array_like or pandas Series object, 1d
An array, any object exposing the array interface, containing
independent variable values (grouping or predictor variable). Values
should have a nominal scale (categorical).
alpha : float, optional
Significance level for the test. Default is 0.05.
Returns
-------
result : pandas DataFrame
DataFrame with 0, 1, and -1 values, where 0 is False (not significant),
1 is True (significant), and -1 is for diagonal elements.
Examples
--------
>>> x = [[1,2,3,4,5], [35,31,75,40,21], [10,6,9,6,1]]
>>> g = [['a'] * 5, ['b'] * 5, ['c'] * 5]
>>> sp.posthoc_tukey_hsd(np.concatenate(x), np.concatenate(g))
'''
result = pairwise_tukeyhsd(x, g, alpha=0.05)
groups = np.array(result.groupsunique, dtype=np.str)
groups_len = len(groups)
vs = np.zeros((groups_len, groups_len), dtype=np.int)
for a in result.summary()[1:]:
a0 = str(a[0])
a1 = str(a[1])
a0i = np.where(groups == a0)[0][0]
a1i = np.where(groups == a1)[0][0]
vs[a0i, a1i] = 1 if str(a[5]) == 'True' else 0
vs = np.triu(vs)
np.fill_diagonal(vs, -1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[tri_lower] = vs.T[tri_lower]
return DataFrame(vs, index=groups, columns=groups)
def posthoc_mannwhitney(a, val_col=None, group_col=None, use_continuity=True, alternative='two-sided', p_adjust=None, sort=True):
'''Pairwise comparisons with Mann-Whitney rank test.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame. Array must be two-dimensional.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
use_continuity : bool, optional
Whether a continuity correction (1/2.) should be taken into account.
Default is True.
alternative : ['two-sided', 'less', or 'greater'], optional
Whether to get the p-value for the one-sided hypothesis
('less' or 'greater') or for the two-sided hypothesis ('two-sided').
Defaults to 'two-sided'.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by group_col or not. Recommended
unless you sort your data manually.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
Refer to `scipy.stats.mannwhitneyu` reference page for further details.
Examples
--------
>>> x = [[1,2,3,4,5], [35,31,75,40,21], [10,6,9,6,1]]
>>> sp.posthoc_mannwhitney(x, p_adjust = 'holm')
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col, _val_col], ascending=True, inplace=True)
groups = np.unique(x[_group_col])
x_len = groups.size
vs = np.zeros((x_len, x_len))
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(x_len), 2)
for i,j in combs:
vs[i, j] = ss.mannwhitneyu(x.loc[x[_group_col] == groups[i], _val_col],
x.loc[x[_group_col] == groups[j], _val_col],
use_continuity=use_continuity,
alternative=alternative)[1]
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_wilcoxon(a, val_col=None, group_col=None, zero_method='wilcox', correction=False, p_adjust=None, sort=False):
'''Pairwise comparisons with Wilcoxon signed-rank test. It is a non-parametric
version of the paired T-test for use with non-parametric ANOVA.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame. Array must be two-dimensional.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
zero_method : string, {"pratt", "wilcox", "zsplit"}, optional
"pratt": Pratt treatment, includes zero-differences in the ranking
process (more conservative)
"wilcox": Wilcox treatment, discards all zero-differences
"zsplit": Zero rank split, just like Pratt, but spliting the zero rank
between positive and negative ones
correction : bool, optional
If True, apply continuity correction by adjusting the Wilcoxon rank
statistic by 0.5 towards the mean value when computing the z-statistic.
Default is False.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by group_col and val_col or not.
Default is False.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
Refer to `scipy.stats.wilcoxon` reference page for further details.
Examples
--------
>>> x = [[1,2,3,4,5], [35,31,75,40,21], [10,6,9,6,1]]
>>> sp.posthoc_wilcoxon(x)
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
#x.sort_values(by=[_group_col, _val_col], ascending=True, inplace=True)
groups = np.unique(x[_group_col])
x_len = groups.size
vs = np.zeros((x_len, x_len))
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(x_len), 2)
for i,j in combs:
vs[i, j] = ss.wilcoxon(x.loc[x[_group_col] == groups[i], _val_col],
x.loc[x[_group_col] == groups[j], _val_col],
zero_method=zero_method, correction=correction)[1]
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method=p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_scheffe(a, val_col=None, group_col=None, sort=False, p_adjust=None):
'''Scheffe's all-pairs comparisons test for normally distributed data with equal
group variances. For all-pairs comparisons in an one-factorial layout with
normally distributed residuals and equal variances Scheffe's test can be
performed with parametric ANOVA [1]_, [2]_, [3]_.
A total of m = k(k-1)/2 hypotheses can be tested.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
The p values are computed from the F-distribution.
References
----------
.. [1] J. Bortz (1993) Statistik für Sozialwissenschaftler. 4. Aufl., Berlin:
Springer.
.. [2] L. Sachs (1997) Angewandte Statistik, New York: Springer.
.. [3] H. Scheffe (1953) A Method for Judging all Contrasts in the Analysis
of Variance. Biometrika 40, 87-110.
Examples
--------
>>> import scikit_posthocs as sp
>>> import pandas as pd
>>> x = pd.DataFrame({"a": [1,2,3,5,1], "b": [12,31,54,62,12], "c": [10,12,6,74,11]})
>>> x = x.melt(var_name='groups', value_name='values')
>>> sp.posthoc_scheffe(x, val_col='values', group_col='groups')
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = np.unique(x[_group_col])
x_grouped = x.groupby(_group_col)[_val_col]
ni = x_grouped.count()
n = ni.sum()
xi = x_grouped.mean()
si = x_grouped.var()
sin = 1. / (n - groups.size) * np.sum(si * (ni - 1.))
def compare(i, j):
dif = xi.loc[i] - xi.loc[j]
A = sin * (1. / ni[i] + 1. / ni[j]) * (groups.size - 1.)
f_val = dif ** 2. / A
return f_val
vs = np.zeros((groups.size, groups.size), dtype=np.float)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(groups.size), 2)
for i,j in combs:
vs[i, j] = compare(groups[i], groups[j])
vs[tri_lower] = vs.T[tri_lower]
p_values = ss.f.sf(vs, groups.size - 1., n - groups.size)
np.fill_diagonal(p_values, -1)
return DataFrame(p_values, index=groups, columns=groups)
def posthoc_tamhane(a, val_col=None, group_col=None, welch=True, sort=False):
'''Tamhane's T2 all-pairs comparison test for normally distributed data with
unequal variances. Tamhane's T2 test can be performed for all-pairs
comparisons in an one-factorial layout with normally distributed residuals
but unequal groups variances. A total of m = k(k-1)/2 hypotheses can be
tested. The null hypothesis is tested in the two-tailed test against the
alternative hypothesis [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
welch : bool, optional
If True, use Welch's approximate solution for calculating the degree of
freedom. T2 test uses the usual df = N - 2 approximation.
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
The p values are computed from the t-distribution and adjusted according to
Dunn-Sidak.
References
----------
.. [1] A.C. Tamhane (1979), A Comparison of Procedures for Multiple Comparisons of
Means with Unequal Variances. Journal of the American Statistical Association,
74, 471-480.
Examples
--------
>>> import scikit_posthocs as sp
>>> import pandas as pd
>>> x = pd.DataFrame({"a": [1,2,3,5,1], "b": [12,31,54,62,12], "c": [10,12,6,74,11]})
>>> x = x.melt(var_name='groups', value_name='values')
>>> sp.posthoc_tamhane(x, val_col='values', group_col='groups')
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = x[_group_col].unique()
x_grouped = x.groupby(_group_col)[_val_col]
ni = x_grouped.count()
n = ni.sum()
xi = x_grouped.mean()
si = x_grouped.var()
sin = 1. / (n - groups.size) * np.sum(si * (ni - 1))
def compare(i, j):
dif = xi[i] - xi[j]
A = si[i] / ni[i] + si[j] / ni[j]
t_val = dif / np.sqrt(A)
if welch:
df = A ** 2. / (si[i] ** 2. / (ni[i] ** 2. * (ni[i] - 1.)) + si[j] ** 2. / (ni[j] ** 2. * (ni[j] - 1.)))
else:
## checks according to Tamhane (1979, p. 474)
ok1 = (9./10. <= ni[i]/ni[j]) and (ni[i]/ni[j] <= 10./9.)
ok2 = (9./10. <= (s2i[i] / ni[i]) / (s2i[j] / ni[j])) and ((s2i[i] / ni[i]) / (s2i[j] / ni[j]) <= 10./9.)
ok3 = (4./5. <= ni[i]/ni[j]) and (ni[i]/ni[j] <= 5./4.) and (1./2. <= (s2i[i] / ni[i]) / (s2i[j] / ni[j])) and ((s2i[i] / ni[i]) / (s2i[j] / ni[j]) <= 2.)
ok4 = (2./3. <= ni[i]/ni[j]) and (ni[i]/ni[j] <= 3./2.) and (3./4. <= (s2i[i] / ni[i]) / (s2i[j] / ni[j])) and ((s2i[i] / ni[i]) / (s2i[j] / ni[j]) <= 4./3.)
OK = any(ok1, ok2, ok3, ok4)
if not OK:
print("Sample sizes or standard errors are not balanced. T2 test is recommended.")
df = ni[i] + ni[j] - 2.
p_val = 2. * ss.t.sf(np.abs(t_val), df=df)
return p_val
vs = np.zeros((groups.size, groups.size), dtype=np.float)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(groups.size), 2)
for i,j in combs:
vs[i, j] = compare(groups[i], groups[j])
vs[tri_upper] = 1. - (1. - vs[tri_upper]) ** groups.size
vs[tri_lower] = vs.T[tri_lower]
vs[vs > 1] = 1
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_tukey(a, val_col = None, group_col = None, sort = False):
'''Performs Tukey's all-pairs comparisons test for normally distributed data
with equal group variances. For all-pairs comparisons in an
one-factorial layout with normally distributed residuals and equal variances
Tukey's test can be performed. A total of m = k(k-1)/2 hypotheses can be
tested. The null hypothesis is tested in the two-tailed test against
the alternative hypothesis [1]_, [2]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
The p values are computed from the Tukey-distribution.
References
----------
.. [1] L. Sachs (1997) Angewandte Statistik, New York: Springer.
.. [2] J. Tukey (1949) Comparing Individual Means in the Analysis of Variance,
Biometrics 5, 99-114.
Examples
--------
>>> import scikit_posthocs as sp
>>> import pandas as pd
>>> x = pd.DataFrame({"a": [1,2,3,5,1], "b": [12,31,54,62,12], "c": [10,12,6,74,11]})
>>> x = x.melt(var_name='groups', value_name='values')
>>> sp.posthoc_tukey(x, val_col='values', group_col='groups')
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = np.unique(x[_group_col])
x_grouped = x.groupby(_group_col)[_val_col]
ni = x_grouped.count()
n = ni.sum()
xi = x_grouped.mean()
si = x_grouped.var()
sin = 1. / (n - groups.size) * np.sum(si * (ni - 1))
def compare(i, j):
dif = xi[i] - xi[j]
A = sin * 0.5 * (1. / ni.loc[i] + 1. / ni.loc[j])
q_val = dif / np.sqrt(A)
return q_val
vs = np.zeros((groups.size, groups.size), dtype=np.float)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(groups.size), 2)
for i,j in combs:
vs[i, j] = compare(groups[i], groups[j])
vs[tri_upper] = psturng(np.abs(vs[tri_upper]), groups.size, n - groups.size)
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_dscf(a, val_col=None, group_col=None, sort=False):
'''Dwass, Steel, Critchlow and Fligner all-pairs comparison test for a
one-factorial layout with non-normally distributed residuals. As opposed to
the all-pairs comparison procedures that depend on Kruskal ranks, the DSCF
test is basically an extension of the U-test as re-ranking is conducted for
each pairwise test [1]_, [2]_, [3]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
Pandas DataFrame containing p values.
Notes
-----
The p values are computed from the Tukey-distribution.
References
----------
.. [1] Douglas, C. E., Fligner, A. M. (1991) On distribution-free multiple
comparisons in the one-way analysis of variance, Communications in
Statistics - Theory and Methods, 20, 127-139.
.. [2] Dwass, M. (1960) Some k-sample rank-order tests. In Contributions to
Probability and Statistics, Edited by: I. Olkin, Stanford: Stanford
University Press.
.. [3] Steel, R. G. D. (1960) A rank sum test for comparing all pairs of
treatments, Technometrics, 2, 197-207.
Examples
--------
>>> import scikit_posthocs as sp
>>> import pandas as pd
>>> x = pd.DataFrame({"a": [1,2,3,5,1], "b": [12,31,54,62,12], "c": [10,12,6,74,11]})
>>> x = x.melt(var_name='groups', value_name='values')
>>> sp.posthoc_dscf(x, val_col='values', group_col='groups')
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = np.unique(x[_group_col])
x_grouped = x.groupby(_group_col)[_val_col]
n = x_grouped.count()
k = groups.size
def get_ties(x):
t = x.value_counts().values
c = np.sum((t ** 3 - t) / 12.)
return c
def compare(i, j):
ni = n.loc[i]
nj = n.loc[j]
x_raw = x.loc[(x[_group_col] == i) | (x[_group_col] == j)].copy()
x_raw['ranks'] = x_raw.loc[:, _val_col].rank()
r = x_raw.groupby(_group_col)['ranks'].sum().loc[[i, j]]
u = np.array([nj * ni + (nj * (nj + 1) / 2),
nj * ni + (ni * (ni + 1) / 2)]) - r
u_min = np.min(u)
s = ni + nj
var = (nj*ni/(s*(s - 1.))) * ((s**3 - s)/12. - get_ties(x_raw['ranks']))
p = np.sqrt(2.) * (u_min - nj * ni / 2.) / np.sqrt(var)
return p
vs = np.zeros((k, k))
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(k), 2)
for i, j in combs:
vs[i, j] = compare(groups[i], groups[j])
vs[tri_upper] = psturng(np.abs(vs[tri_upper]), k, np.inf)
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
|
maximtrp/scikit-posthocs | scikit_posthocs/_posthocs.py | posthoc_dunn | python | def posthoc_dunn(a, val_col=None, group_col=None, p_adjust=None, sort=True):
'''Post hoc pairwise test for multiple comparisons of mean rank sums
(Dunn's test). May be used after Kruskal-Wallis one-way analysis of
variance by ranks to do pairwise comparisons [1]_, [2]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas DataFrame.
Array must be two-dimensional. Second dimension may vary,
i.e. groups may have different lengths.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
p_adjust : str, optional
Method for adjusting p values. See `statsmodels.sandbox.stats.multicomp`
for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by group_col or not. Recommended
unless you sort your data manually.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
A tie correction will be employed according to Glantz (2012).
References
----------
.. [1] O.J. Dunn (1964). Multiple comparisons using rank sums.
Technometrics, 6, 241-252.
.. [2] S.A. Glantz (2012), Primer of Biostatistics. New York: McGraw Hill.
Examples
--------
>>> x = [[1,2,3,5,1], [12,31,54, np.nan], [10,12,6,74,11]]
>>> sp.posthoc_dunn(x, p_adjust = 'holm')
'''
def compare_dunn(i, j):
diff = np.abs(x_ranks_avg.loc[i] - x_ranks_avg.loc[j])
A = n * (n + 1.) / 12.
B = (1. / x_lens.loc[i] + 1. / x_lens.loc[j])
z_value = diff / np.sqrt((A - x_ties) * B)
p_value = 2. * ss.norm.sf(np.abs(z_value))
return p_value
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col, _val_col], ascending=True, inplace=True)
n = len(x.index)
x_groups_unique = np.unique(x[_group_col])
x_len = x_groups_unique.size
x_lens = x.groupby(_group_col)[_val_col].count()
x['ranks'] = x[_val_col].rank()
x_ranks_avg = x.groupby(_group_col)['ranks'].mean()
# ties
vals = x.groupby('ranks').count()[_val_col].values
tie_sum = np.sum(vals[vals != 1] ** 3 - vals[vals != 1])
tie_sum = 0 if not tie_sum else tie_sum
x_ties = tie_sum / (12. * (n - 1))
vs = np.zeros((x_len, x_len))
combs = it.combinations(range(x_len), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
for i,j in combs:
vs[i, j] = compare_dunn(x_groups_unique[i], x_groups_unique[j])
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=x_groups_unique, columns=x_groups_unique) | Post hoc pairwise test for multiple comparisons of mean rank sums
(Dunn's test). May be used after Kruskal-Wallis one-way analysis of
variance by ranks to do pairwise comparisons [1]_, [2]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas DataFrame.
Array must be two-dimensional. Second dimension may vary,
i.e. groups may have different lengths.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
p_adjust : str, optional
Method for adjusting p values. See `statsmodels.sandbox.stats.multicomp`
for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by group_col or not. Recommended
unless you sort your data manually.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
A tie correction will be employed according to Glantz (2012).
References
----------
.. [1] O.J. Dunn (1964). Multiple comparisons using rank sums.
Technometrics, 6, 241-252.
.. [2] S.A. Glantz (2012), Primer of Biostatistics. New York: McGraw Hill.
Examples
--------
>>> x = [[1,2,3,5,1], [12,31,54, np.nan], [10,12,6,74,11]]
>>> sp.posthoc_dunn(x, p_adjust = 'holm') | train | https://github.com/maximtrp/scikit-posthocs/blob/5476b09e2a325cd4e31c0b0bc6906ab5cd77fc5d/scikit_posthocs/_posthocs.py#L265-L371 | [
"def __convert_to_df(a, val_col=None, group_col=None, val_id=None, group_id=None):\n\n '''Hidden helper method to create a DataFrame with input data for further\n processing.\n\n Parameters\n ----------\n a : array_like or pandas DataFrame object\n An array, any object exposing the array inter... | # -*- coding: utf-8 -*-
import numpy as np
import scipy.stats as ss
import itertools as it
from statsmodels.sandbox.stats.multicomp import multipletests
from statsmodels.stats.multicomp import pairwise_tukeyhsd
from statsmodels.stats.libqsturng import psturng
from pandas import DataFrame, Categorical, Series
def __convert_to_df(a, val_col=None, group_col=None, val_id=None, group_id=None):
'''Hidden helper method to create a DataFrame with input data for further
processing.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas DataFrame.
Array must be two-dimensional. Second dimension may vary,
i.e. groups may have different lengths.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
val_id : int, optional
Index of a column that contains dependent variable values (test or
response variable). Should be specified if a NumPy ndarray is used as an
input. It will be inferred from data, if not specified.
group_id : int, optional
Index of a column that contains independent variable values (grouping or
predictor variable). Should be specified if a NumPy ndarray is used as
an input. It will be inferred from data, if not specified.
Returns
-------
x : pandas DataFrame
DataFrame with input data, `val_col` column contains numerical values and
`group_col` column contains categorical values.
val_col : str
Name of a DataFrame column that contains dependent variable values (test
or response variable).
group_col : str
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable).
Notes
-----
Inferrence algorithm for determining `val_id` and `group_id` args is rather
simple, so it is better to specify them explicitly to prevent errors.
'''
if not group_col:
group_col = 'groups'
if not val_col:
val_col = 'vals'
if isinstance(a, DataFrame):
x = a.copy()
if not {group_col, val_col}.issubset(a.columns):
raise ValueError('Specify correct column names using `group_col` and `val_col` args')
return x, val_col, group_col
elif isinstance(a, list) or (isinstance(a, np.ndarray) and not a.shape.count(2)):
grps_len = map(len, a)
grps = list(it.chain(*[[i+1] * l for i, l in enumerate(grps_len)]))
vals = list(it.chain(*a))
return DataFrame({val_col: vals, group_col: grps}), val_col, group_col
elif isinstance(a, np.ndarray):
# cols ids not defined
# trying to infer
if not(all([val_id, group_id])):
if np.argmax(a.shape):
a = a.T
ax = [np.unique(a[:, 0]).size, np.unique(a[:, 1]).size]
if np.asscalar(np.diff(ax)):
__val_col = np.argmax(ax)
__group_col = np.argmin(ax)
else:
raise ValueError('Cannot infer input format.\nPlease specify `val_id` and `group_id` args')
cols = {__val_col: val_col,
__group_col: group_col}
else:
cols = {val_id: val_col,
group_id: group_col}
cols_vals = dict(sorted(cols.items())).values()
return DataFrame(a, columns=cols_vals), val_col, group_col
def __convert_to_block_df(a, y_col=None, group_col=None, block_col=None, melted=False):
if isinstance(a, DataFrame) and not melted:
x = a.copy(deep=True)
group_col = 'groups'
block_col = 'blocks'
y_col = 'y'
x.columns.name = group_col
x.index.name = block_col
x = x.reset_index().melt(id_vars=block_col, var_name=group_col, value_name=y_col)
elif melted:
x = DataFrame.from_dict({'groups': a[group_col],
'blocks': a[block_col],
'y': a[y_col]})
elif not isinstance(a, DataFrame):
x = np.array(a)
x = DataFrame(x, index=np.arange(x.shape[0]), columns=np.arange(x.shape[1]))
if not melted:
group_col = 'groups'
block_col = 'blocks'
y_col = 'y'
x.columns.name = group_col
x.index.name = block_col
x = x.reset_index().melt(id_vars=block_col, var_name=group_col, value_name=y_col)
else:
x.columns[group_col] = 'groups'
x.columns[block_col] = 'blocks'
x.columns[y_col] = 'y'
group_col = 'groups'
block_col = 'blocks'
y_col = 'y'
return x, 'y', 'groups', 'blocks'
def posthoc_conover(a, val_col=None, group_col=None, p_adjust=None, sort=True):
'''Post hoc pairwise test for multiple comparisons of mean rank sums
(Conover's test). May be used after Kruskal-Wallis one-way analysis of
variance by ranks to do pairwise comparisons [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas DataFrame.
Array must be two-dimensional. Second dimension may vary,
i.e. groups may have different lengths.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
p_adjust : str, optional
Method for adjusting p values. See `statsmodels.sandbox.stats.multicomp`
for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by `group_col` or not. Recommended
unless you sort your data manually.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
A tie correction are employed according to Conover [1]_.
References
----------
.. [1] W. J. Conover and R. L. Iman (1979), On multiple-comparisons procedures,
Tech. Rep. LA-7677-MS, Los Alamos Scientific Laboratory.
Examples
--------
>>> x = [[1,2,3,5,1], [12,31,54, np.nan], [10,12,6,74,11]]
>>> sp.posthoc_conover(x, p_adjust = 'holm')
'''
def compare_conover(i, j):
diff = np.abs(x_ranks_avg.loc[i] - x_ranks_avg.loc[j])
B = (1. / x_lens.loc[i] + 1. / x_lens.loc[j])
D = (n - 1. - H_cor) / (n - x_len)
t_value = diff / np.sqrt(S2 * B * D)
p_value = 2. * ss.t.sf(np.abs(t_value), df = n - x_len)
return p_value
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col, _val_col], ascending=True, inplace=True)
n = len(x.index)
x_groups_unique = np.unique(x[_group_col])
x_len = x_groups_unique.size
x_lens = x.groupby(_group_col)[_val_col].count()
x['ranks'] = x[_val_col].rank()
x_ranks_avg = x.groupby(_group_col)['ranks'].mean()
x_ranks_sum = x.groupby(_group_col)['ranks'].sum()
# ties
vals = x.groupby('ranks').count()[_val_col].values
tie_sum = np.sum(vals[vals != 1] ** 3 - vals[vals != 1])
tie_sum = 0 if not tie_sum else tie_sum
x_ties = np.min([1., 1. - tie_sum / (n ** 3. - n)])
H = (12. / (n * (n + 1.))) * np.sum(x_ranks_sum**2 / x_lens) - 3. * (n + 1.)
H_cor = H / x_ties
if x_ties == 1:
S2 = n * (n + 1.) / 12.
else:
S2 = (1. / (n - 1.)) * (np.sum(x['ranks'] ** 2.) - (n * (((n + 1.)**2.) / 4.)))
vs = np.zeros((x_len, x_len))
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(x_len), 2)
for i, j in combs:
vs[i, j] = compare_conover(x_groups_unique[i], x_groups_unique[j])
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=x_groups_unique, columns=x_groups_unique)
def posthoc_nemenyi(a, val_col=None, group_col=None, dist='chi', sort=True):
'''Post hoc pairwise test for multiple comparisons of mean rank sums
(Nemenyi's test). May be used after Kruskal-Wallis one-way analysis of
variance by ranks to do pairwise comparisons [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame. Array must be two-dimensional. Second dimension may vary,
i.e. groups may have different lengths.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
dist : str, optional
Method for determining the p value. The default distribution is "chi"
(chi-squared), else "tukey" (studentized range).
sort : bool, optional
Specifies whether to sort DataFrame by group_col or not. Recommended
unless you sort your data manually.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
A tie correction will be employed according to Glantz (2012).
References
----------
.. [1] Lothar Sachs (1997), Angewandte Statistik. Berlin: Springer.
Pages: 395-397, 662-664.
Examples
--------
>>> x = [[1,2,3,5,1], [12,31,54, np.nan], [10,12,6,74,11]]
>>> sp.posthoc_nemenyi(x)
'''
def compare_stats_chi(i, j):
diff = np.abs(x_ranks_avg.loc[i] - x_ranks_avg.loc[j])
A = n * (n + 1.) / 12.
B = (1. / x_lens.loc[i] + 1. / x_lens.loc[j])
chi = diff ** 2. / (A * B)
return chi
def compare_stats_tukey(i, j):
diff = np.abs(x_ranks_avg.loc[i] - x_ranks_avg.loc[j])
B = (1. / x_lens.loc[i] + 1. / x_lens.loc[j])
q = diff / np.sqrt((n * (n + 1.) / 12.) * B)
return q
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col, _val_col], ascending=True, inplace=True)
n = len(x.index)
x_groups_unique = np.unique(x[_group_col])
x_len = x_groups_unique.size
x_lens = x.groupby(_group_col)[_val_col].count()
x['ranks'] = x[_val_col].rank()
x_ranks_avg = x.groupby(_group_col)['ranks'].mean()
# ties
vals = x.groupby('ranks').count()[_val_col].values
tie_sum = np.sum(vals[vals != 1] ** 3 - vals[vals != 1])
tie_sum = 0 if not tie_sum else tie_sum
x_ties = np.min([1., 1. - tie_sum / (n ** 3. - n)])
vs = np.zeros((x_len, x_len))
combs = it.combinations(range(x_len), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
if dist == 'chi':
for i,j in combs:
vs[i, j] = compare_stats_chi(x_groups_unique[i], x_groups_unique[j]) / x_ties
vs[tri_upper] = ss.chi2.sf(vs[tri_upper], x_len - 1)
elif dist == 'tukey':
for i,j in combs:
vs[i, j] = compare_stats_tukey(x_groups_unique[i], x_groups_unique[j]) * np.sqrt(2.)
vs[tri_upper] = psturng(vs[tri_upper], x_len, np.inf)
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=x_groups_unique, columns=x_groups_unique)
def posthoc_nemenyi_friedman(a, y_col=None, block_col=None, group_col=None, melted=False, sort=False):
'''Calculate pairwise comparisons using Nemenyi post hoc test for
unreplicated blocked data. This test is usually conducted post hoc if
significant results of the Friedman's test are obtained. The statistics
refer to upper quantiles of the studentized range distribution (Tukey) [1]_,
[2]_, [3]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of
block design, i.e. rows are blocks, and columns are groups. In this
case you do not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (strings).
y_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
A one-way ANOVA with repeated measures that is also referred to as ANOVA
with unreplicated block design can also be conducted via Friedman's
test. The consequent post hoc pairwise multiple comparison test
according to Nemenyi is conducted with this function.
This function does not test for ties.
References
----------
.. [1] J. Demsar (2006), Statistical comparisons of classifiers over
multiple data sets, Journal of Machine Learning Research, 7, 1-30.
.. [2] P. Nemenyi (1963) Distribution-free Multiple Comparisons. Ph.D.
thesis, Princeton University.
.. [3] L. Sachs (1997), Angewandte Statistik. Berlin: Springer.
Pages: 668-675.
Examples
--------
>>> # Non-melted case, x is a block design matrix, i.e. rows are blocks
>>> # and columns are groups.
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_nemenyi_friedman(x)
'''
if melted and not all([block_col, group_col, y_col]):
raise ValueError('block_col, group_col, y_col should be explicitly specified if using melted data')
def compare_stats(i, j):
dif = np.abs(R[groups[i]] - R[groups[j]])
qval = dif / np.sqrt(k * (k + 1.) / (6. * n))
return qval
x, _y_col, _group_col, _block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
#if not sort:
# x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
# x[block_col] = Categorical(x[block_col], categories=x[block_col].unique(), ordered=True)
x.sort_values(by=[_group_col, _block_col], ascending=True, inplace=True)
x.dropna(inplace=True)
groups = x[_group_col].unique()
k = groups.size
n = x[_block_col].unique().size
x['mat'] = x.groupby(_block_col)[_y_col].rank()
R = x.groupby(_group_col)['mat'].mean()
vs = np.zeros((k, k))
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
for i, j in combs:
vs[i, j] = compare_stats(i, j)
vs *= np.sqrt(2.)
vs[tri_upper] = psturng(vs[tri_upper], k, np.inf)
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_conover_friedman(a, y_col=None, block_col=None, group_col=None, melted=False, sort=False, p_adjust=None):
'''Calculate pairwise comparisons using Conover post hoc test for unreplicated
blocked data. This test is usually conducted post hoc after
significant results of the Friedman test. The statistics refer to
the Student t distribution [1]_, [2]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of
block design, i.e. rows are blocks, and columns are groups. In this
case you do not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (strings).
y_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values. See statsmodels.sandbox.stats.multicomp
for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
'single-step' : uses Tukey distribution for multiple comparisons
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
A one-way ANOVA with repeated measures that is also referred to as ANOVA
with unreplicated block design can also be conducted via the
friedman.test. The consequent post hoc pairwise multiple comparison test
according to Conover is conducted with this function.
If y is a matrix, than the columns refer to the treatment and the rows
indicate the block.
References
----------
.. [1] W. J. Conover and R. L. Iman (1979), On multiple-comparisons procedures,
Tech. Rep. LA-7677-MS, Los Alamos Scientific Laboratory.
.. [2] W. J. Conover (1999), Practical nonparametric Statistics, 3rd. Edition,
Wiley.
Examples
--------
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_conover_friedman(x)
'''
if melted and not all([block_col, group_col, y_col]):
raise ValueError('block_col, group_col, y_col should be explicitly specified if using melted data')
def compare_stats(i, j):
dif = np.abs(R.loc[groups[i]] - R.loc[groups[j]])
tval = dif / np.sqrt(A) / np.sqrt(B)
pval = 2. * ss.t.sf(np.abs(tval), df = (m*n*k - k - n + 1))
return pval
def compare_tukey(i, j):
dif = np.abs(R.loc[groups[i]] - R.loc[groups[j]])
qval = np.sqrt(2.) * dif / (np.sqrt(A) * np.sqrt(B))
pval = psturng(qval, k, np.inf)
return pval
x, _y_col, _group_col, _block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
#if not sort:
# x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
# x[block_col] = Categorical(x[block_col], categories=x[block_col].unique(), ordered=True)
x.sort_values(by=[_group_col,_block_col], ascending=True, inplace=True)
x.dropna(inplace=True)
groups = np.unique(x[_group_col])
k = groups.size
n = x[_block_col].unique().size
x['mat'] = x.groupby(_block_col)[_y_col].rank()
R = x.groupby(_group_col)['mat'].sum()
A1 = (x['mat'] ** 2).sum()
m = 1
S2 = m/(m*k - 1.) * (A1 - m*k*n*(m*k + 1.)**2./4.)
T2 = 1 / S2 * (np.sum(R) - n * m * ((m * k + 1.) / 2.)**2.)
A = S2 * (2. * n * (m * k - 1.)) / ( m * n * k - k - n + 1.)
B = 1. - T2 / (n * (m * k - 1.))
vs = np.zeros((k, k))
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
if p_adjust == 'single-step':
for i, j in combs:
vs[i, j] = compare_tukey(i, j)
else:
for i, j in combs:
vs[i, j] = compare_stats(i, j)
if p_adjust is not None:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_npm_test(a, val_col=None, group_col=None, sort=False, p_adjust=None):
'''Calculate pairwise comparisons using Nashimoto and Wright's all-pairs
comparison procedure (NPM test) for simply ordered mean ranksums.
NPM test is basically an extension of Nemenyi's procedure for testing
increasingly ordered alternatives [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values. See `statsmodels.sandbox.stats.multicomp`
for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
The p values are estimated from the studentized range distribution. If
the medians are already increasingly ordered, than the NPM-test
simplifies to the ordinary Nemenyi test
References
----------
.. [1] Nashimoto, K., Wright, F.T., (2005), Multiple comparison procedures for
detecting differences in simply ordered means. Comput. Statist. Data
Anal. 48, 291--306.
Examples
--------
>>> x = np.array([[102,109,114,120,124],
[110,112,123,130,145],
[132,141,156,160,172]])
>>> sp.posthoc_npm_test(x)
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
#if not sort:
# x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = np.unique(x[_group_col])
x['ranks'] = x[_val_col].rank()
Ri = x.groupby(_group_col)['ranks'].mean()
ni = x.groupby(_group_col)[_val_col].count()
k = groups.size
n = x.shape[0]
sigma = np.sqrt(n * (n + 1) / 12.)
df = np.inf
def compare(m, u):
a = [(Ri.loc[groups[u]]-Ri.loc[groups[_mi]])/(sigma/np.sqrt(2)*np.sqrt(1./ni.loc[groups[_mi]] + 1./ni.loc[groups[u]])) for _mi in m]
return np.array(a)
stat = np.zeros((k, k))
for i in range(k-1):
for j in range(i+1, k):
u = j
m = np.arange(i, u)
tmp = compare(m, u)
stat[j, i] = np.max(tmp)
stat[stat < 0] = 0
p_values = psturng(stat, k, np.inf)
tri_upper = np.triu_indices(p_values.shape[0], 1)
tri_lower = np.tril_indices(p_values.shape[0], -1)
p_values[tri_lower] = p_values.T[tri_lower]
#if p_adjust:
# p_values[tri_upper] = multipletests(p_values[tri_upper], method = p_adjust)[1]
np.fill_diagonal(p_values, -1)
return DataFrame(p_values, index=groups, columns=groups)
def posthoc_siegel_friedman(a, y_col=None, block_col=None, group_col=None, melted=False, sort=False, p_adjust=None):
'''Siegel and Castellan's All-Pairs Comparisons Test for Unreplicated Blocked
Data. See authors' paper for additional information [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of
block design, i.e. rows are blocks, and columns are groups. In this
case you do not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (strings).
y_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values. See statsmodels.sandbox.stats.multicomp for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
For all-pairs comparisons in a two factorial unreplicated complete block design
with non-normally distributed residuals, Siegel and Castellan's test can be
performed on Friedman-type ranked data.
References
----------
.. [1] S. Siegel, N. J. Castellan Jr. (1988), Nonparametric Statistics for the
Behavioral Sciences. 2nd ed. New York: McGraw-Hill.
Examples
--------
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_siegel_friedman(x)
'''
if melted and not all([block_col, group_col, y_col]):
raise ValueError('block_col, group_col, y_col should be explicitly specified if using melted data')
def compare_stats(i, j):
dif = np.abs(R[groups[i]] - R[groups[j]])
zval = dif / np.sqrt(k * (k + 1.) / (6. * n))
return zval
x, y_col, group_col, block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
#if not sort:
# x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
# x[block_col] = Categorical(x[block_col], categories=x[block_col].unique(), ordered=True)
x.sort_values(by=[group_col,block_col], ascending=True, inplace=True)
x.dropna(inplace=True)
groups = x[group_col].unique()
k = groups.size
n = x[block_col].unique().size
x['mat'] = x.groupby(block_col)[y_col].rank()
R = x.groupby(group_col)['mat'].mean()
vs = np.zeros((k, k), dtype=np.float)
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
for i, j in combs:
vs[i, j] = compare_stats(i, j)
vs = 2. * ss.norm.sf(np.abs(vs))
vs[vs > 1] = 1.
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_miller_friedman(a, y_col=None, block_col=None, group_col=None, melted=False, sort=False):
'''Miller's All-Pairs Comparisons Test for Unreplicated Blocked Data.
The p-values are computed from the chi-square distribution [1]_, [2]_,
[3]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of
block design, i.e. rows are blocks, and columns are groups. In this
case you do not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (strings).
y_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
Pandas DataFrame containing p values.
Notes
-----
For all-pairs comparisons in a two factorial unreplicated complete block
design with non-normally distributed residuals, Miller's test can be
performed on Friedman-type ranked data.
References
----------
.. [1] J. Bortz J, G. A. Lienert, K. Boehnke (1990), Verteilungsfreie
Methoden in der Biostatistik. Berlin: Springerself.
.. [2] R. G. Miller Jr. (1996), Simultaneous statistical inference. New
York: McGraw-Hill.
.. [3] E. L. Wike (2006), Data Analysis. A Statistical Primer for Psychology
Students. New Brunswick: Aldine Transaction.
Examples
--------
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_miller_friedman(x)
'''
if melted and not all([block_col, group_col, y_col]):
raise ValueError('block_col, group_col, y_col should be explicitly specified if using melted data')
def compare_stats(i, j):
dif = np.abs(R[groups[i]] - R[groups[j]])
qval = dif / np.sqrt(k * (k + 1.) / (6. * n))
return qval
x, y_col, group_col, block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
#if not sort:
# x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
# x[block_col] = Categorical(x[block_col], categories=x[block_col].unique(), ordered=True)
x.sort_values(by=[group_col,block_col], ascending=True, inplace=True)
x.dropna(inplace=True)
groups = x[group_col].unique()
k = groups.size
n = x[block_col].unique().size
x['mat'] = x.groupby(block_col)[y_col].rank()
R = x.groupby(group_col)['mat'].mean()
vs = np.zeros((k, k), dtype=np.float)
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
for i, j in combs:
vs[i, j] = compare_stats(i, j)
vs = vs ** 2
vs = ss.chi2.sf(vs, k - 1)
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_durbin(a, y_col=None, block_col=None, group_col=None, melted=False, sort=False, p_adjust=None):
'''Pairwise post hoc test for multiple comparisons of rank sums according to
Durbin and Conover for a two-way balanced incomplete block design (BIBD). See
references for additional information [1]_, [2]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of block design,
i.e. rows are blocks, and columns are groups. In this case you do
not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (string).
y_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values. See statsmodels.sandbox.stats.multicomp
for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
Pandas DataFrame containing p values.
References
----------
.. [1] W. J. Conover and R. L. Iman (1979), On multiple-comparisons procedures,
Tech. Rep. LA-7677-MS, Los Alamos Scientific Laboratory.
.. [2] W. J. Conover (1999), Practical nonparametric Statistics,
3rd. edition, Wiley.
Examples
--------
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_durbin(x)
'''
if melted and not all([block_col, group_col, y_col]):
raise ValueError('block_col, group_col, y_col should be explicitly specified if using melted data')
def compare_stats(i, j):
dif = np.abs(Rj[groups[i]] - Rj[groups[j]])
tval = dif / denom
pval = 2. * ss.t.sf(np.abs(tval), df = df)
return pval
x, y_col, group_col, block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
if not sort:
x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
x[block_col] = Categorical(x[block_col], categories=x[block_col].unique(), ordered=True)
x.sort_values(by=[block_col, group_col], ascending=True, inplace=True)
x.dropna(inplace=True)
groups = x[group_col].unique()
t = len(groups)
b = x[block_col].unique().size
r = b
k = t
x['y_ranked'] = x.groupby(block_col)[y_col].rank()
Rj = x.groupby(group_col)['y_ranked'].sum()
A = (x['y_ranked'] ** 2).sum()
C = (b * k * (k + 1) ** 2) / 4.
D = (Rj ** 2).sum() - r * C
T1 = (t - 1) / (A - C) * D
denom = np.sqrt(((A - C) * 2 * r) / (b * k - b - t + 1) * (1 - T1 / (b * (k -1))))
df = b * k - b - t + 1
vs = np.zeros((t, t), dtype=np.float)
combs = it.combinations(range(t), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
for i, j in combs:
vs[i, j] = compare_stats(i, j)
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_anderson(a, val_col=None, group_col=None, midrank=True, sort=False, p_adjust=None):
'''Anderson-Darling Pairwise Test for k-samples. Tests the null hypothesis
that k-samples are drawn from the same population without having to specify
the distribution function of that population [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
midrank : bool, optional
Type of Anderson-Darling test which is computed. If set to True (default), the
midrank test applicable to continuous and discrete populations is performed. If
False, the right side empirical distribution is used.
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values. See statsmodels.sandbox.stats.multicomp for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
result : pandas DataFrame
P values.
References
----------
.. [1] F.W. Scholz, M.A. Stephens (1987), K-Sample Anderson-Darling Tests,
Journal of the American Statistical Association, Vol. 82, pp. 918-924.
Examples
--------
>>> x = np.array([[2.9, 3.0, 2.5, 2.6, 3.2], [3.8, 2.7, 4.0, 2.4], [2.8, 3.4, 3.7, 2.2, 2.0]])
>>> sp.posthoc_anderson(x)
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = x[_group_col].unique()
k = groups.size
vs = np.zeros((k, k), dtype=np.float)
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
for i, j in combs:
vs[i, j] = ss.anderson_ksamp([x.loc[x[_group_col] == groups[i], _val_col], x.loc[x[_group_col] == groups[j], _val_col]])[2]
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_quade(a, y_col=None, block_col=None, group_col=None, dist='t', melted=False, sort=False, p_adjust=None):
'''Calculate pairwise comparisons using Quade's post hoc test for
unreplicated blocked data. This test is usually conducted if significant
results were obtained by the omnibus test [1]_, [2]_, [3]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of
block design, i.e. rows are blocks, and columns are groups. In this
case you do not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (string).
y_col : str or int, optional
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
dist : str, optional
Method for determining p values.
The default distribution is "t", else "normal".
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
Pandas DataFrame containing p values.
References
----------
.. [1] W. J. Conover (1999), Practical nonparametric Statistics, 3rd. Edition,
Wiley.
.. [2] N. A. Heckert and J. J. Filliben (2003). NIST Handbook 148: Dataplot
Reference Manual, Volume 2: Let Subcommands and Library Functions.
National Institute of Standards and Technology Handbook Series, June 2003.
.. [3] D. Quade (1979), Using weighted rankings in the analysis of complete
blocks with additive block effects. Journal of the American Statistical
Association, 74, 680-683.
Examples
--------
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_quade(x)
'''
if melted and not all([block_col, group_col, y_col]):
raise ValueError('block_col, group_col, y_col should be explicitly specified if using melted data')
def compare_stats_t(i, j):
dif = np.abs(S[groups[i]] - S[groups[j]])
tval = dif / denom
pval = 2. * ss.t.sf(np.abs(tval), df = (b - 1) * (k - 1))
return pval
def compare_stats_norm(i, j):
dif = np.abs(W[groups[i]] * ff - W[groups[j]] * ff)
zval = dif / denom
pval = 2. * ss.norm.sf(np.abs(zval))
return pval
x, y_col, group_col, block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
if not sort:
x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
x[block_col] = Categorical(x[block_col], categories=x[block_col].unique(), ordered=True)
x.sort_values(by=[block_col, group_col], ascending=True, inplace=True)
x.dropna(inplace=True)
groups = x[group_col].unique()
k = len(groups)
b = x[block_col].unique().size
x['r'] = x.groupby(block_col)[y_col].rank()
q = (x.groupby(block_col)[y_col].max() - x.groupby(block_col)[y_col].min()).rank()
x['rr'] = x['r'] - (k + 1)/2
x['s'] = x.apply(lambda x, y: x['rr'] * y[x['blocks']], axis=1, args=(q,))
x['w'] = x.apply(lambda x, y: x['r'] * y[x['blocks']], axis=1, args=(q,))
A = (x['s'] ** 2).sum()
S = x.groupby(group_col)['s'].sum()
B = np.sum(S ** 2) / b
W = x.groupby(group_col)['w'].sum()
vs = np.zeros((k, k), dtype=np.float)
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
if dist == 't':
denom = np.sqrt((2 * b * (A - B)) / ((b - 1) * (k - 1)))
for i, j in combs:
vs[i, j] = compare_stats_t(i, j)
else:
n = b * k
denom = np.sqrt((k * (k + 1) * (2 * n + 1) * (k-1)) / (18 * n * (n + 1)))
ff = 1. / (b * (b + 1)/2)
for i, j in combs:
vs[i, j] = compare_stats_norm(i, j)
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_mackwolfe(a, val_col, group_col, p=None, n_perm=100, sort=False, p_adjust=None):
'''Mack-Wolfe Test for Umbrella Alternatives.
In dose-finding studies one may assume an increasing treatment effect with
increasing dose level. However, the test subject may actually succumb to
toxic effects at high doses, which leads to decresing treatment
effects [1]_, [2]_.
The scope of the Mack-Wolfe Test is to test for umbrella alternatives for
either a known or unknown point P (i.e. dose-level), where the peak
(umbrella point) is present.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
p : int, optional
The a-priori known peak as an ordinal number of the treatment group
including the zero dose level, i.e. p = {1, ..., k}. Defaults to None.
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
p : float
P value.
stat : float
Statistic.
References
----------
.. [1] Chen, I.Y. (1991) Notes on the Mack-Wolfe and Chen-Wolfe Tests for
Umbrella Alternatives. Biom. J., 33, 281-290.
.. [2] Mack, G.A., Wolfe, D. A. (1981) K-sample rank tests for umbrella
alternatives. J. Amer. Statist. Assoc., 76, 175-181.
Examples
--------
>>> x = np.array([[10,'a'], [59,'a'], [76,'b'], [10, 'b']])
>>> sp.posthoc_mackwolfe(x, val_col = 0, group_col = 1)
'''
x = __convert_to_df(a, val_col, group_col)
if not sort:
x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
x.sort_values(by=[group_col], ascending=True, inplace=True)
k = x[group_col].unique().size
if p:
if p > k:
print("Selected 'p' > number of groups:", str(p), " > ", str(k))
return False
elif p < 1:
print("Selected 'p' < 1: ", str(p))
return False
Rij = x[val_col].rank()
n = x.groupby(group_col)[val_col].count()
def _fn(Ri, Rj):
return np.sum(Ri.apply(lambda x: Rj[Rj > x].size))
def _ustat(Rij, g, k):
levels = np.unique(g)
U = np.identity(k)
for i in range(k):
for j in range(i):
U[i,j] = _fn(Rij[x[group_col] == levels[i]], Rij[x[group_col] == levels[j]])
U[j,i] = _fn(Rij[x[group_col] == levels[j]], Rij[x[group_col] == levels[i]])
return U
def _ap(p, U):
tmp1 = 0
if p > 0:
for i in range(p):
for j in range(i+1, p+1):
tmp1 += U[i,j]
tmp2 = 0
if p < k:
for i in range(p, k):
for j in range(i+1, k):
tmp2 += U[j,i]
return tmp1 + tmp2
def _n1(p, n):
return np.sum(n[:p+1])
def _n2(p, n):
return np.sum(n[p:k])
def _mean_at(p, n):
N1 = _n1(p, n)
N2 = _n2(p, n)
return (N1**2 + N2**2 - np.sum(n**2) - n.iloc[p]**2)/4
def _var_at(p, n):
N1 = _n1(p, n)
N2 = _n2(p, n)
N = np.sum(n)
var = (2 * (N1**3 + N2**3) + 3 * (N1**2 + N2**2) -\
np.sum(n**2 * (2*n + 3)) - n.iloc[p]**2 * (2 * n.iloc[p] + 3) +\
12. * n.iloc[p] * N1 * N2 - 12. * n.iloc[p] ** 2 * N) / 72.
return var
if p:
if (x.groupby(val_col).count() > 1).any().any():
print("Ties are present")
U = _ustat(Rij, x[group_col], k)
est = _ap(p, U)
mean = _mean_at(p, n)
sd = np.sqrt(_var_at(p, n))
stat = (est - mean)/sd
p_value = ss.norm.sf(stat)
else:
U = _ustat(Rij, x[group_col], k)
Ap = np.array([_ap(i, U) for i in range(k)]).ravel()
mean = np.array([_mean_at(i, n) for i in range(k)]).ravel()
var = np.array([_var_at(i, n) for i in range(k)]).ravel()
A = (Ap - mean) / np.sqrt(var)
stat = np.max(A)
p = A == stat
est = None
mt = []
for i in range(n_perm):
ix = Series(np.random.permutation(Rij))
Uix = _ustat(ix, x[group_col], k)
Apix = np.array([_ap(i, Uix) for i in range(k)])
Astarix = (Apix - mean) / np.sqrt(var)
mt.append(np.max(Astarix))
mt = np.array(mt)
p_value = mt[mt > stat] / n_perm
return p_value, stat
def posthoc_vanwaerden(a, val_col=None, group_col=None, sort=True, p_adjust=None):
'''Van der Waerden's test for pairwise multiple comparisons between group
levels. See references for additional information [1]_, [2]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
For one-factorial designs with samples that do not meet the assumptions for
one-way-ANOVA and subsequent post hoc tests, the van der Waerden test using
normal scores can be employed. Provided that significant differences were
detected by this global test, one may be interested in applying post hoc
tests according to van der Waerden for pairwise multiple comparisons of the
group levels.
There is no tie correction applied in this function.
References
----------
.. [1] W. J. Conover and R. L. Iman (1979), On multiple-comparisons procedures,
Tech. Rep. LA-7677-MS, Los Alamos Scientific Laboratory.
.. [2] B. L. van der Waerden (1952) Order tests for the two-sample problem and
their power, Indagationes Mathematicae, 14, 453-458.
Examples
--------
>>> x = np.array([[10,'a'], [59,'a'], [76,'b'], [10, 'b']])
>>> sp.posthoc_vanwaerden(x, val_col = 0, group_col = 1)
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = x[_group_col].unique()
n = x[_val_col].size
k = groups.size
r = ss.rankdata(x[_val_col])
x['z_scores'] = ss.norm.ppf(r / (n + 1))
aj = x.groupby(_group_col)['z_scores'].sum()
nj = x.groupby(_group_col)['z_scores'].count()
s2 = (1. / (n - 1.)) * (x['z_scores'] ** 2.).sum()
sts = (1. / s2) * np.sum(aj ** 2. / nj)
param = k - 1
A = aj / nj
vs = np.zeros((k, k), dtype=np.float)
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
def compare_stats(i, j):
dif = np.abs(A[groups[i]] - A[groups[j]])
B = 1. / nj[groups[i]] + 1. / nj[groups[j]]
tval = dif / np.sqrt(s2 * (n - 1. - sts)/(n - k) * B)
pval = 2. * ss.t.sf(np.abs(tval), df = n - k)
return pval
for i, j in combs:
vs[i, j] = compare_stats(i, j)
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_ttest(a, val_col=None, group_col=None, pool_sd=False, equal_var=True, p_adjust=None, sort=True):
'''Pairwise T test for multiple comparisons of independent groups. May be
used after a parametric ANOVA to do pairwise comparisons.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame. Array must be two-dimensional.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
equal_var : bool, optional
If True (default), perform a standard independent test
that assumes equal population variances [1]_.
If False, perform Welch's t-test, which does not assume equal
population variance [2]_.
pool_sd : bool, optional
Calculate a common SD for all groups and use that for all
comparisons (this can be useful if some groups are small).
This method does not actually call scipy ttest_ind() function,
so extra arguments are ignored. Default is False.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by group_col or not. Recommended
unless you sort your data manually.
Returns
-------
Numpy ndarray or pandas DataFrame of p values depending on input
data type.
References
----------
.. [1] http://en.wikipedia.org/wiki/T-test#Independent_two-sample_t-test
.. [2] http://en.wikipedia.org/wiki/Welch%27s_t_test
Examples
--------
>>> x = [[1,2,3,5,1], [12,31,54, np.nan], [10,12,6,74,11]]
>>> sp.posthoc_ttest(x, p_adjust = 'holm')
array([[-1. , 0.04600899, 0.31269089],
[ 0.04600899, -1. , 0.6327077 ],
[ 0.31269089, 0.6327077 , -1. ]])
'''
if isinstance(a, DataFrame):
x = a.copy()
if not sort:
x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
x.sort_values(by=[group_col, val_col], ascending=True, inplace=True)
x_lens = x.groupby(by=group_col)[val_col].count().values
x_lens_cumsum = np.insert(np.cumsum(x_lens), 0, 0)[:-1]
x_grouped = np.array([x[val_col][j:(j + x_lens[i])] for i, j in enumerate(x_lens_cumsum)])
#x_grouped = [x.loc[v, val_col].values.tolist() for g, v in x.groupby(group_col, sort=False).groups.items()]
else:
x = np.array(a)
x_grouped = np.array([np.asarray(a)[~np.isnan(a)] for a in x])
x_lens = np.asarray([len(a) for a in x_grouped])
x_lens_cumsum = np.insert(np.cumsum(x_lens), 0, 0)[:-1]
if any(x_lens == 0):
raise ValueError("All groups must contain data")
x_len = len(x_grouped)
vs = np.zeros((x_len, x_len), dtype=np.float)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
def compare_pooled(i, j):
diff = x_means[i] - x_means[j]
se_diff = pooled_sd * np.sqrt(1 / x_lens[i] + 1 / x_lens[j])
t_value = diff / se_diff
return 2 * ss.t.cdf(-np.abs(t_value), x_totaldegf)
combs = it.combinations(range(x_len), 2)
if pool_sd:
x_means = np.asarray([np.mean(xi) for xi in x_grouped])
x_sd = np.asarray([np.std(xi, ddof=1) for xi in x_grouped])
x_degf = x_lens - 1
x_totaldegf = np.sum(x_degf)
pooled_sd = np.sqrt(np.sum(x_sd ** 2 * x_degf) / x_totaldegf)
for i, j in combs:
vs[i, j] = compare_pooled(i, j)
else:
for i,j in combs:
vs[i, j] = ss.ttest_ind(x_grouped[i], x_grouped[j], equal_var=equal_var)[1]
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
if isinstance(x, DataFrame):
groups_unique = x[group_col].unique()
return DataFrame(vs, index=groups_unique, columns=groups_unique)
else:
return vs
def posthoc_tukey_hsd(x, g, alpha=0.05):
'''Pairwise comparisons with TukeyHSD confidence intervals. This is a
convenience function to make statsmodels `pairwise_tukeyhsd` method more
applicable for further use.
Parameters
----------
x : array_like or pandas Series object, 1d
An array, any object exposing the array interface, containing dependent
variable values (test or response variable). Values should have a
non-nominal scale. NaN values will cause an error (please handle
manually).
g : array_like or pandas Series object, 1d
An array, any object exposing the array interface, containing
independent variable values (grouping or predictor variable). Values
should have a nominal scale (categorical).
alpha : float, optional
Significance level for the test. Default is 0.05.
Returns
-------
result : pandas DataFrame
DataFrame with 0, 1, and -1 values, where 0 is False (not significant),
1 is True (significant), and -1 is for diagonal elements.
Examples
--------
>>> x = [[1,2,3,4,5], [35,31,75,40,21], [10,6,9,6,1]]
>>> g = [['a'] * 5, ['b'] * 5, ['c'] * 5]
>>> sp.posthoc_tukey_hsd(np.concatenate(x), np.concatenate(g))
'''
result = pairwise_tukeyhsd(x, g, alpha=0.05)
groups = np.array(result.groupsunique, dtype=np.str)
groups_len = len(groups)
vs = np.zeros((groups_len, groups_len), dtype=np.int)
for a in result.summary()[1:]:
a0 = str(a[0])
a1 = str(a[1])
a0i = np.where(groups == a0)[0][0]
a1i = np.where(groups == a1)[0][0]
vs[a0i, a1i] = 1 if str(a[5]) == 'True' else 0
vs = np.triu(vs)
np.fill_diagonal(vs, -1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[tri_lower] = vs.T[tri_lower]
return DataFrame(vs, index=groups, columns=groups)
def posthoc_mannwhitney(a, val_col=None, group_col=None, use_continuity=True, alternative='two-sided', p_adjust=None, sort=True):
'''Pairwise comparisons with Mann-Whitney rank test.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame. Array must be two-dimensional.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
use_continuity : bool, optional
Whether a continuity correction (1/2.) should be taken into account.
Default is True.
alternative : ['two-sided', 'less', or 'greater'], optional
Whether to get the p-value for the one-sided hypothesis
('less' or 'greater') or for the two-sided hypothesis ('two-sided').
Defaults to 'two-sided'.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by group_col or not. Recommended
unless you sort your data manually.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
Refer to `scipy.stats.mannwhitneyu` reference page for further details.
Examples
--------
>>> x = [[1,2,3,4,5], [35,31,75,40,21], [10,6,9,6,1]]
>>> sp.posthoc_mannwhitney(x, p_adjust = 'holm')
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col, _val_col], ascending=True, inplace=True)
groups = np.unique(x[_group_col])
x_len = groups.size
vs = np.zeros((x_len, x_len))
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(x_len), 2)
for i,j in combs:
vs[i, j] = ss.mannwhitneyu(x.loc[x[_group_col] == groups[i], _val_col],
x.loc[x[_group_col] == groups[j], _val_col],
use_continuity=use_continuity,
alternative=alternative)[1]
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_wilcoxon(a, val_col=None, group_col=None, zero_method='wilcox', correction=False, p_adjust=None, sort=False):
'''Pairwise comparisons with Wilcoxon signed-rank test. It is a non-parametric
version of the paired T-test for use with non-parametric ANOVA.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame. Array must be two-dimensional.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
zero_method : string, {"pratt", "wilcox", "zsplit"}, optional
"pratt": Pratt treatment, includes zero-differences in the ranking
process (more conservative)
"wilcox": Wilcox treatment, discards all zero-differences
"zsplit": Zero rank split, just like Pratt, but spliting the zero rank
between positive and negative ones
correction : bool, optional
If True, apply continuity correction by adjusting the Wilcoxon rank
statistic by 0.5 towards the mean value when computing the z-statistic.
Default is False.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by group_col and val_col or not.
Default is False.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
Refer to `scipy.stats.wilcoxon` reference page for further details.
Examples
--------
>>> x = [[1,2,3,4,5], [35,31,75,40,21], [10,6,9,6,1]]
>>> sp.posthoc_wilcoxon(x)
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
#x.sort_values(by=[_group_col, _val_col], ascending=True, inplace=True)
groups = np.unique(x[_group_col])
x_len = groups.size
vs = np.zeros((x_len, x_len))
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(x_len), 2)
for i,j in combs:
vs[i, j] = ss.wilcoxon(x.loc[x[_group_col] == groups[i], _val_col],
x.loc[x[_group_col] == groups[j], _val_col],
zero_method=zero_method, correction=correction)[1]
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method=p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_scheffe(a, val_col=None, group_col=None, sort=False, p_adjust=None):
'''Scheffe's all-pairs comparisons test for normally distributed data with equal
group variances. For all-pairs comparisons in an one-factorial layout with
normally distributed residuals and equal variances Scheffe's test can be
performed with parametric ANOVA [1]_, [2]_, [3]_.
A total of m = k(k-1)/2 hypotheses can be tested.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
The p values are computed from the F-distribution.
References
----------
.. [1] J. Bortz (1993) Statistik für Sozialwissenschaftler. 4. Aufl., Berlin:
Springer.
.. [2] L. Sachs (1997) Angewandte Statistik, New York: Springer.
.. [3] H. Scheffe (1953) A Method for Judging all Contrasts in the Analysis
of Variance. Biometrika 40, 87-110.
Examples
--------
>>> import scikit_posthocs as sp
>>> import pandas as pd
>>> x = pd.DataFrame({"a": [1,2,3,5,1], "b": [12,31,54,62,12], "c": [10,12,6,74,11]})
>>> x = x.melt(var_name='groups', value_name='values')
>>> sp.posthoc_scheffe(x, val_col='values', group_col='groups')
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = np.unique(x[_group_col])
x_grouped = x.groupby(_group_col)[_val_col]
ni = x_grouped.count()
n = ni.sum()
xi = x_grouped.mean()
si = x_grouped.var()
sin = 1. / (n - groups.size) * np.sum(si * (ni - 1.))
def compare(i, j):
dif = xi.loc[i] - xi.loc[j]
A = sin * (1. / ni[i] + 1. / ni[j]) * (groups.size - 1.)
f_val = dif ** 2. / A
return f_val
vs = np.zeros((groups.size, groups.size), dtype=np.float)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(groups.size), 2)
for i,j in combs:
vs[i, j] = compare(groups[i], groups[j])
vs[tri_lower] = vs.T[tri_lower]
p_values = ss.f.sf(vs, groups.size - 1., n - groups.size)
np.fill_diagonal(p_values, -1)
return DataFrame(p_values, index=groups, columns=groups)
def posthoc_tamhane(a, val_col=None, group_col=None, welch=True, sort=False):
'''Tamhane's T2 all-pairs comparison test for normally distributed data with
unequal variances. Tamhane's T2 test can be performed for all-pairs
comparisons in an one-factorial layout with normally distributed residuals
but unequal groups variances. A total of m = k(k-1)/2 hypotheses can be
tested. The null hypothesis is tested in the two-tailed test against the
alternative hypothesis [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
welch : bool, optional
If True, use Welch's approximate solution for calculating the degree of
freedom. T2 test uses the usual df = N - 2 approximation.
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
The p values are computed from the t-distribution and adjusted according to
Dunn-Sidak.
References
----------
.. [1] A.C. Tamhane (1979), A Comparison of Procedures for Multiple Comparisons of
Means with Unequal Variances. Journal of the American Statistical Association,
74, 471-480.
Examples
--------
>>> import scikit_posthocs as sp
>>> import pandas as pd
>>> x = pd.DataFrame({"a": [1,2,3,5,1], "b": [12,31,54,62,12], "c": [10,12,6,74,11]})
>>> x = x.melt(var_name='groups', value_name='values')
>>> sp.posthoc_tamhane(x, val_col='values', group_col='groups')
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = x[_group_col].unique()
x_grouped = x.groupby(_group_col)[_val_col]
ni = x_grouped.count()
n = ni.sum()
xi = x_grouped.mean()
si = x_grouped.var()
sin = 1. / (n - groups.size) * np.sum(si * (ni - 1))
def compare(i, j):
dif = xi[i] - xi[j]
A = si[i] / ni[i] + si[j] / ni[j]
t_val = dif / np.sqrt(A)
if welch:
df = A ** 2. / (si[i] ** 2. / (ni[i] ** 2. * (ni[i] - 1.)) + si[j] ** 2. / (ni[j] ** 2. * (ni[j] - 1.)))
else:
## checks according to Tamhane (1979, p. 474)
ok1 = (9./10. <= ni[i]/ni[j]) and (ni[i]/ni[j] <= 10./9.)
ok2 = (9./10. <= (s2i[i] / ni[i]) / (s2i[j] / ni[j])) and ((s2i[i] / ni[i]) / (s2i[j] / ni[j]) <= 10./9.)
ok3 = (4./5. <= ni[i]/ni[j]) and (ni[i]/ni[j] <= 5./4.) and (1./2. <= (s2i[i] / ni[i]) / (s2i[j] / ni[j])) and ((s2i[i] / ni[i]) / (s2i[j] / ni[j]) <= 2.)
ok4 = (2./3. <= ni[i]/ni[j]) and (ni[i]/ni[j] <= 3./2.) and (3./4. <= (s2i[i] / ni[i]) / (s2i[j] / ni[j])) and ((s2i[i] / ni[i]) / (s2i[j] / ni[j]) <= 4./3.)
OK = any(ok1, ok2, ok3, ok4)
if not OK:
print("Sample sizes or standard errors are not balanced. T2 test is recommended.")
df = ni[i] + ni[j] - 2.
p_val = 2. * ss.t.sf(np.abs(t_val), df=df)
return p_val
vs = np.zeros((groups.size, groups.size), dtype=np.float)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(groups.size), 2)
for i,j in combs:
vs[i, j] = compare(groups[i], groups[j])
vs[tri_upper] = 1. - (1. - vs[tri_upper]) ** groups.size
vs[tri_lower] = vs.T[tri_lower]
vs[vs > 1] = 1
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_tukey(a, val_col = None, group_col = None, sort = False):
'''Performs Tukey's all-pairs comparisons test for normally distributed data
with equal group variances. For all-pairs comparisons in an
one-factorial layout with normally distributed residuals and equal variances
Tukey's test can be performed. A total of m = k(k-1)/2 hypotheses can be
tested. The null hypothesis is tested in the two-tailed test against
the alternative hypothesis [1]_, [2]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
The p values are computed from the Tukey-distribution.
References
----------
.. [1] L. Sachs (1997) Angewandte Statistik, New York: Springer.
.. [2] J. Tukey (1949) Comparing Individual Means in the Analysis of Variance,
Biometrics 5, 99-114.
Examples
--------
>>> import scikit_posthocs as sp
>>> import pandas as pd
>>> x = pd.DataFrame({"a": [1,2,3,5,1], "b": [12,31,54,62,12], "c": [10,12,6,74,11]})
>>> x = x.melt(var_name='groups', value_name='values')
>>> sp.posthoc_tukey(x, val_col='values', group_col='groups')
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = np.unique(x[_group_col])
x_grouped = x.groupby(_group_col)[_val_col]
ni = x_grouped.count()
n = ni.sum()
xi = x_grouped.mean()
si = x_grouped.var()
sin = 1. / (n - groups.size) * np.sum(si * (ni - 1))
def compare(i, j):
dif = xi[i] - xi[j]
A = sin * 0.5 * (1. / ni.loc[i] + 1. / ni.loc[j])
q_val = dif / np.sqrt(A)
return q_val
vs = np.zeros((groups.size, groups.size), dtype=np.float)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(groups.size), 2)
for i,j in combs:
vs[i, j] = compare(groups[i], groups[j])
vs[tri_upper] = psturng(np.abs(vs[tri_upper]), groups.size, n - groups.size)
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_dscf(a, val_col=None, group_col=None, sort=False):
'''Dwass, Steel, Critchlow and Fligner all-pairs comparison test for a
one-factorial layout with non-normally distributed residuals. As opposed to
the all-pairs comparison procedures that depend on Kruskal ranks, the DSCF
test is basically an extension of the U-test as re-ranking is conducted for
each pairwise test [1]_, [2]_, [3]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
Pandas DataFrame containing p values.
Notes
-----
The p values are computed from the Tukey-distribution.
References
----------
.. [1] Douglas, C. E., Fligner, A. M. (1991) On distribution-free multiple
comparisons in the one-way analysis of variance, Communications in
Statistics - Theory and Methods, 20, 127-139.
.. [2] Dwass, M. (1960) Some k-sample rank-order tests. In Contributions to
Probability and Statistics, Edited by: I. Olkin, Stanford: Stanford
University Press.
.. [3] Steel, R. G. D. (1960) A rank sum test for comparing all pairs of
treatments, Technometrics, 2, 197-207.
Examples
--------
>>> import scikit_posthocs as sp
>>> import pandas as pd
>>> x = pd.DataFrame({"a": [1,2,3,5,1], "b": [12,31,54,62,12], "c": [10,12,6,74,11]})
>>> x = x.melt(var_name='groups', value_name='values')
>>> sp.posthoc_dscf(x, val_col='values', group_col='groups')
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = np.unique(x[_group_col])
x_grouped = x.groupby(_group_col)[_val_col]
n = x_grouped.count()
k = groups.size
def get_ties(x):
t = x.value_counts().values
c = np.sum((t ** 3 - t) / 12.)
return c
def compare(i, j):
ni = n.loc[i]
nj = n.loc[j]
x_raw = x.loc[(x[_group_col] == i) | (x[_group_col] == j)].copy()
x_raw['ranks'] = x_raw.loc[:, _val_col].rank()
r = x_raw.groupby(_group_col)['ranks'].sum().loc[[i, j]]
u = np.array([nj * ni + (nj * (nj + 1) / 2),
nj * ni + (ni * (ni + 1) / 2)]) - r
u_min = np.min(u)
s = ni + nj
var = (nj*ni/(s*(s - 1.))) * ((s**3 - s)/12. - get_ties(x_raw['ranks']))
p = np.sqrt(2.) * (u_min - nj * ni / 2.) / np.sqrt(var)
return p
vs = np.zeros((k, k))
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(k), 2)
for i, j in combs:
vs[i, j] = compare(groups[i], groups[j])
vs[tri_upper] = psturng(np.abs(vs[tri_upper]), k, np.inf)
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
|
maximtrp/scikit-posthocs | scikit_posthocs/_posthocs.py | posthoc_nemenyi | python | def posthoc_nemenyi(a, val_col=None, group_col=None, dist='chi', sort=True):
'''Post hoc pairwise test for multiple comparisons of mean rank sums
(Nemenyi's test). May be used after Kruskal-Wallis one-way analysis of
variance by ranks to do pairwise comparisons [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame. Array must be two-dimensional. Second dimension may vary,
i.e. groups may have different lengths.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
dist : str, optional
Method for determining the p value. The default distribution is "chi"
(chi-squared), else "tukey" (studentized range).
sort : bool, optional
Specifies whether to sort DataFrame by group_col or not. Recommended
unless you sort your data manually.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
A tie correction will be employed according to Glantz (2012).
References
----------
.. [1] Lothar Sachs (1997), Angewandte Statistik. Berlin: Springer.
Pages: 395-397, 662-664.
Examples
--------
>>> x = [[1,2,3,5,1], [12,31,54, np.nan], [10,12,6,74,11]]
>>> sp.posthoc_nemenyi(x)
'''
def compare_stats_chi(i, j):
diff = np.abs(x_ranks_avg.loc[i] - x_ranks_avg.loc[j])
A = n * (n + 1.) / 12.
B = (1. / x_lens.loc[i] + 1. / x_lens.loc[j])
chi = diff ** 2. / (A * B)
return chi
def compare_stats_tukey(i, j):
diff = np.abs(x_ranks_avg.loc[i] - x_ranks_avg.loc[j])
B = (1. / x_lens.loc[i] + 1. / x_lens.loc[j])
q = diff / np.sqrt((n * (n + 1.) / 12.) * B)
return q
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col, _val_col], ascending=True, inplace=True)
n = len(x.index)
x_groups_unique = np.unique(x[_group_col])
x_len = x_groups_unique.size
x_lens = x.groupby(_group_col)[_val_col].count()
x['ranks'] = x[_val_col].rank()
x_ranks_avg = x.groupby(_group_col)['ranks'].mean()
# ties
vals = x.groupby('ranks').count()[_val_col].values
tie_sum = np.sum(vals[vals != 1] ** 3 - vals[vals != 1])
tie_sum = 0 if not tie_sum else tie_sum
x_ties = np.min([1., 1. - tie_sum / (n ** 3. - n)])
vs = np.zeros((x_len, x_len))
combs = it.combinations(range(x_len), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
if dist == 'chi':
for i,j in combs:
vs[i, j] = compare_stats_chi(x_groups_unique[i], x_groups_unique[j]) / x_ties
vs[tri_upper] = ss.chi2.sf(vs[tri_upper], x_len - 1)
elif dist == 'tukey':
for i,j in combs:
vs[i, j] = compare_stats_tukey(x_groups_unique[i], x_groups_unique[j]) * np.sqrt(2.)
vs[tri_upper] = psturng(vs[tri_upper], x_len, np.inf)
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=x_groups_unique, columns=x_groups_unique) | Post hoc pairwise test for multiple comparisons of mean rank sums
(Nemenyi's test). May be used after Kruskal-Wallis one-way analysis of
variance by ranks to do pairwise comparisons [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame. Array must be two-dimensional. Second dimension may vary,
i.e. groups may have different lengths.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
dist : str, optional
Method for determining the p value. The default distribution is "chi"
(chi-squared), else "tukey" (studentized range).
sort : bool, optional
Specifies whether to sort DataFrame by group_col or not. Recommended
unless you sort your data manually.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
A tie correction will be employed according to Glantz (2012).
References
----------
.. [1] Lothar Sachs (1997), Angewandte Statistik. Berlin: Springer.
Pages: 395-397, 662-664.
Examples
--------
>>> x = [[1,2,3,5,1], [12,31,54, np.nan], [10,12,6,74,11]]
>>> sp.posthoc_nemenyi(x) | train | https://github.com/maximtrp/scikit-posthocs/blob/5476b09e2a325cd4e31c0b0bc6906ab5cd77fc5d/scikit_posthocs/_posthocs.py#L374-L480 | [
"def __convert_to_df(a, val_col=None, group_col=None, val_id=None, group_id=None):\n\n '''Hidden helper method to create a DataFrame with input data for further\n processing.\n\n Parameters\n ----------\n a : array_like or pandas DataFrame object\n An array, any object exposing the array inter... | # -*- coding: utf-8 -*-
import numpy as np
import scipy.stats as ss
import itertools as it
from statsmodels.sandbox.stats.multicomp import multipletests
from statsmodels.stats.multicomp import pairwise_tukeyhsd
from statsmodels.stats.libqsturng import psturng
from pandas import DataFrame, Categorical, Series
def __convert_to_df(a, val_col=None, group_col=None, val_id=None, group_id=None):
'''Hidden helper method to create a DataFrame with input data for further
processing.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas DataFrame.
Array must be two-dimensional. Second dimension may vary,
i.e. groups may have different lengths.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
val_id : int, optional
Index of a column that contains dependent variable values (test or
response variable). Should be specified if a NumPy ndarray is used as an
input. It will be inferred from data, if not specified.
group_id : int, optional
Index of a column that contains independent variable values (grouping or
predictor variable). Should be specified if a NumPy ndarray is used as
an input. It will be inferred from data, if not specified.
Returns
-------
x : pandas DataFrame
DataFrame with input data, `val_col` column contains numerical values and
`group_col` column contains categorical values.
val_col : str
Name of a DataFrame column that contains dependent variable values (test
or response variable).
group_col : str
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable).
Notes
-----
Inferrence algorithm for determining `val_id` and `group_id` args is rather
simple, so it is better to specify them explicitly to prevent errors.
'''
if not group_col:
group_col = 'groups'
if not val_col:
val_col = 'vals'
if isinstance(a, DataFrame):
x = a.copy()
if not {group_col, val_col}.issubset(a.columns):
raise ValueError('Specify correct column names using `group_col` and `val_col` args')
return x, val_col, group_col
elif isinstance(a, list) or (isinstance(a, np.ndarray) and not a.shape.count(2)):
grps_len = map(len, a)
grps = list(it.chain(*[[i+1] * l for i, l in enumerate(grps_len)]))
vals = list(it.chain(*a))
return DataFrame({val_col: vals, group_col: grps}), val_col, group_col
elif isinstance(a, np.ndarray):
# cols ids not defined
# trying to infer
if not(all([val_id, group_id])):
if np.argmax(a.shape):
a = a.T
ax = [np.unique(a[:, 0]).size, np.unique(a[:, 1]).size]
if np.asscalar(np.diff(ax)):
__val_col = np.argmax(ax)
__group_col = np.argmin(ax)
else:
raise ValueError('Cannot infer input format.\nPlease specify `val_id` and `group_id` args')
cols = {__val_col: val_col,
__group_col: group_col}
else:
cols = {val_id: val_col,
group_id: group_col}
cols_vals = dict(sorted(cols.items())).values()
return DataFrame(a, columns=cols_vals), val_col, group_col
def __convert_to_block_df(a, y_col=None, group_col=None, block_col=None, melted=False):
if isinstance(a, DataFrame) and not melted:
x = a.copy(deep=True)
group_col = 'groups'
block_col = 'blocks'
y_col = 'y'
x.columns.name = group_col
x.index.name = block_col
x = x.reset_index().melt(id_vars=block_col, var_name=group_col, value_name=y_col)
elif melted:
x = DataFrame.from_dict({'groups': a[group_col],
'blocks': a[block_col],
'y': a[y_col]})
elif not isinstance(a, DataFrame):
x = np.array(a)
x = DataFrame(x, index=np.arange(x.shape[0]), columns=np.arange(x.shape[1]))
if not melted:
group_col = 'groups'
block_col = 'blocks'
y_col = 'y'
x.columns.name = group_col
x.index.name = block_col
x = x.reset_index().melt(id_vars=block_col, var_name=group_col, value_name=y_col)
else:
x.columns[group_col] = 'groups'
x.columns[block_col] = 'blocks'
x.columns[y_col] = 'y'
group_col = 'groups'
block_col = 'blocks'
y_col = 'y'
return x, 'y', 'groups', 'blocks'
def posthoc_conover(a, val_col=None, group_col=None, p_adjust=None, sort=True):
'''Post hoc pairwise test for multiple comparisons of mean rank sums
(Conover's test). May be used after Kruskal-Wallis one-way analysis of
variance by ranks to do pairwise comparisons [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas DataFrame.
Array must be two-dimensional. Second dimension may vary,
i.e. groups may have different lengths.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
p_adjust : str, optional
Method for adjusting p values. See `statsmodels.sandbox.stats.multicomp`
for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by `group_col` or not. Recommended
unless you sort your data manually.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
A tie correction are employed according to Conover [1]_.
References
----------
.. [1] W. J. Conover and R. L. Iman (1979), On multiple-comparisons procedures,
Tech. Rep. LA-7677-MS, Los Alamos Scientific Laboratory.
Examples
--------
>>> x = [[1,2,3,5,1], [12,31,54, np.nan], [10,12,6,74,11]]
>>> sp.posthoc_conover(x, p_adjust = 'holm')
'''
def compare_conover(i, j):
diff = np.abs(x_ranks_avg.loc[i] - x_ranks_avg.loc[j])
B = (1. / x_lens.loc[i] + 1. / x_lens.loc[j])
D = (n - 1. - H_cor) / (n - x_len)
t_value = diff / np.sqrt(S2 * B * D)
p_value = 2. * ss.t.sf(np.abs(t_value), df = n - x_len)
return p_value
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col, _val_col], ascending=True, inplace=True)
n = len(x.index)
x_groups_unique = np.unique(x[_group_col])
x_len = x_groups_unique.size
x_lens = x.groupby(_group_col)[_val_col].count()
x['ranks'] = x[_val_col].rank()
x_ranks_avg = x.groupby(_group_col)['ranks'].mean()
x_ranks_sum = x.groupby(_group_col)['ranks'].sum()
# ties
vals = x.groupby('ranks').count()[_val_col].values
tie_sum = np.sum(vals[vals != 1] ** 3 - vals[vals != 1])
tie_sum = 0 if not tie_sum else tie_sum
x_ties = np.min([1., 1. - tie_sum / (n ** 3. - n)])
H = (12. / (n * (n + 1.))) * np.sum(x_ranks_sum**2 / x_lens) - 3. * (n + 1.)
H_cor = H / x_ties
if x_ties == 1:
S2 = n * (n + 1.) / 12.
else:
S2 = (1. / (n - 1.)) * (np.sum(x['ranks'] ** 2.) - (n * (((n + 1.)**2.) / 4.)))
vs = np.zeros((x_len, x_len))
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(x_len), 2)
for i, j in combs:
vs[i, j] = compare_conover(x_groups_unique[i], x_groups_unique[j])
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=x_groups_unique, columns=x_groups_unique)
def posthoc_dunn(a, val_col=None, group_col=None, p_adjust=None, sort=True):
'''Post hoc pairwise test for multiple comparisons of mean rank sums
(Dunn's test). May be used after Kruskal-Wallis one-way analysis of
variance by ranks to do pairwise comparisons [1]_, [2]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas DataFrame.
Array must be two-dimensional. Second dimension may vary,
i.e. groups may have different lengths.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
p_adjust : str, optional
Method for adjusting p values. See `statsmodels.sandbox.stats.multicomp`
for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by group_col or not. Recommended
unless you sort your data manually.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
A tie correction will be employed according to Glantz (2012).
References
----------
.. [1] O.J. Dunn (1964). Multiple comparisons using rank sums.
Technometrics, 6, 241-252.
.. [2] S.A. Glantz (2012), Primer of Biostatistics. New York: McGraw Hill.
Examples
--------
>>> x = [[1,2,3,5,1], [12,31,54, np.nan], [10,12,6,74,11]]
>>> sp.posthoc_dunn(x, p_adjust = 'holm')
'''
def compare_dunn(i, j):
diff = np.abs(x_ranks_avg.loc[i] - x_ranks_avg.loc[j])
A = n * (n + 1.) / 12.
B = (1. / x_lens.loc[i] + 1. / x_lens.loc[j])
z_value = diff / np.sqrt((A - x_ties) * B)
p_value = 2. * ss.norm.sf(np.abs(z_value))
return p_value
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col, _val_col], ascending=True, inplace=True)
n = len(x.index)
x_groups_unique = np.unique(x[_group_col])
x_len = x_groups_unique.size
x_lens = x.groupby(_group_col)[_val_col].count()
x['ranks'] = x[_val_col].rank()
x_ranks_avg = x.groupby(_group_col)['ranks'].mean()
# ties
vals = x.groupby('ranks').count()[_val_col].values
tie_sum = np.sum(vals[vals != 1] ** 3 - vals[vals != 1])
tie_sum = 0 if not tie_sum else tie_sum
x_ties = tie_sum / (12. * (n - 1))
vs = np.zeros((x_len, x_len))
combs = it.combinations(range(x_len), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
for i,j in combs:
vs[i, j] = compare_dunn(x_groups_unique[i], x_groups_unique[j])
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=x_groups_unique, columns=x_groups_unique)
def posthoc_nemenyi_friedman(a, y_col=None, block_col=None, group_col=None, melted=False, sort=False):
'''Calculate pairwise comparisons using Nemenyi post hoc test for
unreplicated blocked data. This test is usually conducted post hoc if
significant results of the Friedman's test are obtained. The statistics
refer to upper quantiles of the studentized range distribution (Tukey) [1]_,
[2]_, [3]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of
block design, i.e. rows are blocks, and columns are groups. In this
case you do not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (strings).
y_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
A one-way ANOVA with repeated measures that is also referred to as ANOVA
with unreplicated block design can also be conducted via Friedman's
test. The consequent post hoc pairwise multiple comparison test
according to Nemenyi is conducted with this function.
This function does not test for ties.
References
----------
.. [1] J. Demsar (2006), Statistical comparisons of classifiers over
multiple data sets, Journal of Machine Learning Research, 7, 1-30.
.. [2] P. Nemenyi (1963) Distribution-free Multiple Comparisons. Ph.D.
thesis, Princeton University.
.. [3] L. Sachs (1997), Angewandte Statistik. Berlin: Springer.
Pages: 668-675.
Examples
--------
>>> # Non-melted case, x is a block design matrix, i.e. rows are blocks
>>> # and columns are groups.
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_nemenyi_friedman(x)
'''
if melted and not all([block_col, group_col, y_col]):
raise ValueError('block_col, group_col, y_col should be explicitly specified if using melted data')
def compare_stats(i, j):
dif = np.abs(R[groups[i]] - R[groups[j]])
qval = dif / np.sqrt(k * (k + 1.) / (6. * n))
return qval
x, _y_col, _group_col, _block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
#if not sort:
# x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
# x[block_col] = Categorical(x[block_col], categories=x[block_col].unique(), ordered=True)
x.sort_values(by=[_group_col, _block_col], ascending=True, inplace=True)
x.dropna(inplace=True)
groups = x[_group_col].unique()
k = groups.size
n = x[_block_col].unique().size
x['mat'] = x.groupby(_block_col)[_y_col].rank()
R = x.groupby(_group_col)['mat'].mean()
vs = np.zeros((k, k))
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
for i, j in combs:
vs[i, j] = compare_stats(i, j)
vs *= np.sqrt(2.)
vs[tri_upper] = psturng(vs[tri_upper], k, np.inf)
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_conover_friedman(a, y_col=None, block_col=None, group_col=None, melted=False, sort=False, p_adjust=None):
'''Calculate pairwise comparisons using Conover post hoc test for unreplicated
blocked data. This test is usually conducted post hoc after
significant results of the Friedman test. The statistics refer to
the Student t distribution [1]_, [2]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of
block design, i.e. rows are blocks, and columns are groups. In this
case you do not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (strings).
y_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values. See statsmodels.sandbox.stats.multicomp
for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
'single-step' : uses Tukey distribution for multiple comparisons
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
A one-way ANOVA with repeated measures that is also referred to as ANOVA
with unreplicated block design can also be conducted via the
friedman.test. The consequent post hoc pairwise multiple comparison test
according to Conover is conducted with this function.
If y is a matrix, than the columns refer to the treatment and the rows
indicate the block.
References
----------
.. [1] W. J. Conover and R. L. Iman (1979), On multiple-comparisons procedures,
Tech. Rep. LA-7677-MS, Los Alamos Scientific Laboratory.
.. [2] W. J. Conover (1999), Practical nonparametric Statistics, 3rd. Edition,
Wiley.
Examples
--------
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_conover_friedman(x)
'''
if melted and not all([block_col, group_col, y_col]):
raise ValueError('block_col, group_col, y_col should be explicitly specified if using melted data')
def compare_stats(i, j):
dif = np.abs(R.loc[groups[i]] - R.loc[groups[j]])
tval = dif / np.sqrt(A) / np.sqrt(B)
pval = 2. * ss.t.sf(np.abs(tval), df = (m*n*k - k - n + 1))
return pval
def compare_tukey(i, j):
dif = np.abs(R.loc[groups[i]] - R.loc[groups[j]])
qval = np.sqrt(2.) * dif / (np.sqrt(A) * np.sqrt(B))
pval = psturng(qval, k, np.inf)
return pval
x, _y_col, _group_col, _block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
#if not sort:
# x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
# x[block_col] = Categorical(x[block_col], categories=x[block_col].unique(), ordered=True)
x.sort_values(by=[_group_col,_block_col], ascending=True, inplace=True)
x.dropna(inplace=True)
groups = np.unique(x[_group_col])
k = groups.size
n = x[_block_col].unique().size
x['mat'] = x.groupby(_block_col)[_y_col].rank()
R = x.groupby(_group_col)['mat'].sum()
A1 = (x['mat'] ** 2).sum()
m = 1
S2 = m/(m*k - 1.) * (A1 - m*k*n*(m*k + 1.)**2./4.)
T2 = 1 / S2 * (np.sum(R) - n * m * ((m * k + 1.) / 2.)**2.)
A = S2 * (2. * n * (m * k - 1.)) / ( m * n * k - k - n + 1.)
B = 1. - T2 / (n * (m * k - 1.))
vs = np.zeros((k, k))
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
if p_adjust == 'single-step':
for i, j in combs:
vs[i, j] = compare_tukey(i, j)
else:
for i, j in combs:
vs[i, j] = compare_stats(i, j)
if p_adjust is not None:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_npm_test(a, val_col=None, group_col=None, sort=False, p_adjust=None):
'''Calculate pairwise comparisons using Nashimoto and Wright's all-pairs
comparison procedure (NPM test) for simply ordered mean ranksums.
NPM test is basically an extension of Nemenyi's procedure for testing
increasingly ordered alternatives [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values. See `statsmodels.sandbox.stats.multicomp`
for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
The p values are estimated from the studentized range distribution. If
the medians are already increasingly ordered, than the NPM-test
simplifies to the ordinary Nemenyi test
References
----------
.. [1] Nashimoto, K., Wright, F.T., (2005), Multiple comparison procedures for
detecting differences in simply ordered means. Comput. Statist. Data
Anal. 48, 291--306.
Examples
--------
>>> x = np.array([[102,109,114,120,124],
[110,112,123,130,145],
[132,141,156,160,172]])
>>> sp.posthoc_npm_test(x)
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
#if not sort:
# x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = np.unique(x[_group_col])
x['ranks'] = x[_val_col].rank()
Ri = x.groupby(_group_col)['ranks'].mean()
ni = x.groupby(_group_col)[_val_col].count()
k = groups.size
n = x.shape[0]
sigma = np.sqrt(n * (n + 1) / 12.)
df = np.inf
def compare(m, u):
a = [(Ri.loc[groups[u]]-Ri.loc[groups[_mi]])/(sigma/np.sqrt(2)*np.sqrt(1./ni.loc[groups[_mi]] + 1./ni.loc[groups[u]])) for _mi in m]
return np.array(a)
stat = np.zeros((k, k))
for i in range(k-1):
for j in range(i+1, k):
u = j
m = np.arange(i, u)
tmp = compare(m, u)
stat[j, i] = np.max(tmp)
stat[stat < 0] = 0
p_values = psturng(stat, k, np.inf)
tri_upper = np.triu_indices(p_values.shape[0], 1)
tri_lower = np.tril_indices(p_values.shape[0], -1)
p_values[tri_lower] = p_values.T[tri_lower]
#if p_adjust:
# p_values[tri_upper] = multipletests(p_values[tri_upper], method = p_adjust)[1]
np.fill_diagonal(p_values, -1)
return DataFrame(p_values, index=groups, columns=groups)
def posthoc_siegel_friedman(a, y_col=None, block_col=None, group_col=None, melted=False, sort=False, p_adjust=None):
'''Siegel and Castellan's All-Pairs Comparisons Test for Unreplicated Blocked
Data. See authors' paper for additional information [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of
block design, i.e. rows are blocks, and columns are groups. In this
case you do not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (strings).
y_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values. See statsmodels.sandbox.stats.multicomp for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
For all-pairs comparisons in a two factorial unreplicated complete block design
with non-normally distributed residuals, Siegel and Castellan's test can be
performed on Friedman-type ranked data.
References
----------
.. [1] S. Siegel, N. J. Castellan Jr. (1988), Nonparametric Statistics for the
Behavioral Sciences. 2nd ed. New York: McGraw-Hill.
Examples
--------
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_siegel_friedman(x)
'''
if melted and not all([block_col, group_col, y_col]):
raise ValueError('block_col, group_col, y_col should be explicitly specified if using melted data')
def compare_stats(i, j):
dif = np.abs(R[groups[i]] - R[groups[j]])
zval = dif / np.sqrt(k * (k + 1.) / (6. * n))
return zval
x, y_col, group_col, block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
#if not sort:
# x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
# x[block_col] = Categorical(x[block_col], categories=x[block_col].unique(), ordered=True)
x.sort_values(by=[group_col,block_col], ascending=True, inplace=True)
x.dropna(inplace=True)
groups = x[group_col].unique()
k = groups.size
n = x[block_col].unique().size
x['mat'] = x.groupby(block_col)[y_col].rank()
R = x.groupby(group_col)['mat'].mean()
vs = np.zeros((k, k), dtype=np.float)
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
for i, j in combs:
vs[i, j] = compare_stats(i, j)
vs = 2. * ss.norm.sf(np.abs(vs))
vs[vs > 1] = 1.
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_miller_friedman(a, y_col=None, block_col=None, group_col=None, melted=False, sort=False):
'''Miller's All-Pairs Comparisons Test for Unreplicated Blocked Data.
The p-values are computed from the chi-square distribution [1]_, [2]_,
[3]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of
block design, i.e. rows are blocks, and columns are groups. In this
case you do not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (strings).
y_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
Pandas DataFrame containing p values.
Notes
-----
For all-pairs comparisons in a two factorial unreplicated complete block
design with non-normally distributed residuals, Miller's test can be
performed on Friedman-type ranked data.
References
----------
.. [1] J. Bortz J, G. A. Lienert, K. Boehnke (1990), Verteilungsfreie
Methoden in der Biostatistik. Berlin: Springerself.
.. [2] R. G. Miller Jr. (1996), Simultaneous statistical inference. New
York: McGraw-Hill.
.. [3] E. L. Wike (2006), Data Analysis. A Statistical Primer for Psychology
Students. New Brunswick: Aldine Transaction.
Examples
--------
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_miller_friedman(x)
'''
if melted and not all([block_col, group_col, y_col]):
raise ValueError('block_col, group_col, y_col should be explicitly specified if using melted data')
def compare_stats(i, j):
dif = np.abs(R[groups[i]] - R[groups[j]])
qval = dif / np.sqrt(k * (k + 1.) / (6. * n))
return qval
x, y_col, group_col, block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
#if not sort:
# x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
# x[block_col] = Categorical(x[block_col], categories=x[block_col].unique(), ordered=True)
x.sort_values(by=[group_col,block_col], ascending=True, inplace=True)
x.dropna(inplace=True)
groups = x[group_col].unique()
k = groups.size
n = x[block_col].unique().size
x['mat'] = x.groupby(block_col)[y_col].rank()
R = x.groupby(group_col)['mat'].mean()
vs = np.zeros((k, k), dtype=np.float)
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
for i, j in combs:
vs[i, j] = compare_stats(i, j)
vs = vs ** 2
vs = ss.chi2.sf(vs, k - 1)
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_durbin(a, y_col=None, block_col=None, group_col=None, melted=False, sort=False, p_adjust=None):
'''Pairwise post hoc test for multiple comparisons of rank sums according to
Durbin and Conover for a two-way balanced incomplete block design (BIBD). See
references for additional information [1]_, [2]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of block design,
i.e. rows are blocks, and columns are groups. In this case you do
not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (string).
y_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values. See statsmodels.sandbox.stats.multicomp
for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
Pandas DataFrame containing p values.
References
----------
.. [1] W. J. Conover and R. L. Iman (1979), On multiple-comparisons procedures,
Tech. Rep. LA-7677-MS, Los Alamos Scientific Laboratory.
.. [2] W. J. Conover (1999), Practical nonparametric Statistics,
3rd. edition, Wiley.
Examples
--------
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_durbin(x)
'''
if melted and not all([block_col, group_col, y_col]):
raise ValueError('block_col, group_col, y_col should be explicitly specified if using melted data')
def compare_stats(i, j):
dif = np.abs(Rj[groups[i]] - Rj[groups[j]])
tval = dif / denom
pval = 2. * ss.t.sf(np.abs(tval), df = df)
return pval
x, y_col, group_col, block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
if not sort:
x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
x[block_col] = Categorical(x[block_col], categories=x[block_col].unique(), ordered=True)
x.sort_values(by=[block_col, group_col], ascending=True, inplace=True)
x.dropna(inplace=True)
groups = x[group_col].unique()
t = len(groups)
b = x[block_col].unique().size
r = b
k = t
x['y_ranked'] = x.groupby(block_col)[y_col].rank()
Rj = x.groupby(group_col)['y_ranked'].sum()
A = (x['y_ranked'] ** 2).sum()
C = (b * k * (k + 1) ** 2) / 4.
D = (Rj ** 2).sum() - r * C
T1 = (t - 1) / (A - C) * D
denom = np.sqrt(((A - C) * 2 * r) / (b * k - b - t + 1) * (1 - T1 / (b * (k -1))))
df = b * k - b - t + 1
vs = np.zeros((t, t), dtype=np.float)
combs = it.combinations(range(t), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
for i, j in combs:
vs[i, j] = compare_stats(i, j)
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_anderson(a, val_col=None, group_col=None, midrank=True, sort=False, p_adjust=None):
'''Anderson-Darling Pairwise Test for k-samples. Tests the null hypothesis
that k-samples are drawn from the same population without having to specify
the distribution function of that population [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
midrank : bool, optional
Type of Anderson-Darling test which is computed. If set to True (default), the
midrank test applicable to continuous and discrete populations is performed. If
False, the right side empirical distribution is used.
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values. See statsmodels.sandbox.stats.multicomp for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
result : pandas DataFrame
P values.
References
----------
.. [1] F.W. Scholz, M.A. Stephens (1987), K-Sample Anderson-Darling Tests,
Journal of the American Statistical Association, Vol. 82, pp. 918-924.
Examples
--------
>>> x = np.array([[2.9, 3.0, 2.5, 2.6, 3.2], [3.8, 2.7, 4.0, 2.4], [2.8, 3.4, 3.7, 2.2, 2.0]])
>>> sp.posthoc_anderson(x)
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = x[_group_col].unique()
k = groups.size
vs = np.zeros((k, k), dtype=np.float)
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
for i, j in combs:
vs[i, j] = ss.anderson_ksamp([x.loc[x[_group_col] == groups[i], _val_col], x.loc[x[_group_col] == groups[j], _val_col]])[2]
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_quade(a, y_col=None, block_col=None, group_col=None, dist='t', melted=False, sort=False, p_adjust=None):
'''Calculate pairwise comparisons using Quade's post hoc test for
unreplicated blocked data. This test is usually conducted if significant
results were obtained by the omnibus test [1]_, [2]_, [3]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of
block design, i.e. rows are blocks, and columns are groups. In this
case you do not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (string).
y_col : str or int, optional
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
dist : str, optional
Method for determining p values.
The default distribution is "t", else "normal".
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
Pandas DataFrame containing p values.
References
----------
.. [1] W. J. Conover (1999), Practical nonparametric Statistics, 3rd. Edition,
Wiley.
.. [2] N. A. Heckert and J. J. Filliben (2003). NIST Handbook 148: Dataplot
Reference Manual, Volume 2: Let Subcommands and Library Functions.
National Institute of Standards and Technology Handbook Series, June 2003.
.. [3] D. Quade (1979), Using weighted rankings in the analysis of complete
blocks with additive block effects. Journal of the American Statistical
Association, 74, 680-683.
Examples
--------
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_quade(x)
'''
if melted and not all([block_col, group_col, y_col]):
raise ValueError('block_col, group_col, y_col should be explicitly specified if using melted data')
def compare_stats_t(i, j):
dif = np.abs(S[groups[i]] - S[groups[j]])
tval = dif / denom
pval = 2. * ss.t.sf(np.abs(tval), df = (b - 1) * (k - 1))
return pval
def compare_stats_norm(i, j):
dif = np.abs(W[groups[i]] * ff - W[groups[j]] * ff)
zval = dif / denom
pval = 2. * ss.norm.sf(np.abs(zval))
return pval
x, y_col, group_col, block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
if not sort:
x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
x[block_col] = Categorical(x[block_col], categories=x[block_col].unique(), ordered=True)
x.sort_values(by=[block_col, group_col], ascending=True, inplace=True)
x.dropna(inplace=True)
groups = x[group_col].unique()
k = len(groups)
b = x[block_col].unique().size
x['r'] = x.groupby(block_col)[y_col].rank()
q = (x.groupby(block_col)[y_col].max() - x.groupby(block_col)[y_col].min()).rank()
x['rr'] = x['r'] - (k + 1)/2
x['s'] = x.apply(lambda x, y: x['rr'] * y[x['blocks']], axis=1, args=(q,))
x['w'] = x.apply(lambda x, y: x['r'] * y[x['blocks']], axis=1, args=(q,))
A = (x['s'] ** 2).sum()
S = x.groupby(group_col)['s'].sum()
B = np.sum(S ** 2) / b
W = x.groupby(group_col)['w'].sum()
vs = np.zeros((k, k), dtype=np.float)
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
if dist == 't':
denom = np.sqrt((2 * b * (A - B)) / ((b - 1) * (k - 1)))
for i, j in combs:
vs[i, j] = compare_stats_t(i, j)
else:
n = b * k
denom = np.sqrt((k * (k + 1) * (2 * n + 1) * (k-1)) / (18 * n * (n + 1)))
ff = 1. / (b * (b + 1)/2)
for i, j in combs:
vs[i, j] = compare_stats_norm(i, j)
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_mackwolfe(a, val_col, group_col, p=None, n_perm=100, sort=False, p_adjust=None):
'''Mack-Wolfe Test for Umbrella Alternatives.
In dose-finding studies one may assume an increasing treatment effect with
increasing dose level. However, the test subject may actually succumb to
toxic effects at high doses, which leads to decresing treatment
effects [1]_, [2]_.
The scope of the Mack-Wolfe Test is to test for umbrella alternatives for
either a known or unknown point P (i.e. dose-level), where the peak
(umbrella point) is present.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
p : int, optional
The a-priori known peak as an ordinal number of the treatment group
including the zero dose level, i.e. p = {1, ..., k}. Defaults to None.
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
p : float
P value.
stat : float
Statistic.
References
----------
.. [1] Chen, I.Y. (1991) Notes on the Mack-Wolfe and Chen-Wolfe Tests for
Umbrella Alternatives. Biom. J., 33, 281-290.
.. [2] Mack, G.A., Wolfe, D. A. (1981) K-sample rank tests for umbrella
alternatives. J. Amer. Statist. Assoc., 76, 175-181.
Examples
--------
>>> x = np.array([[10,'a'], [59,'a'], [76,'b'], [10, 'b']])
>>> sp.posthoc_mackwolfe(x, val_col = 0, group_col = 1)
'''
x = __convert_to_df(a, val_col, group_col)
if not sort:
x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
x.sort_values(by=[group_col], ascending=True, inplace=True)
k = x[group_col].unique().size
if p:
if p > k:
print("Selected 'p' > number of groups:", str(p), " > ", str(k))
return False
elif p < 1:
print("Selected 'p' < 1: ", str(p))
return False
Rij = x[val_col].rank()
n = x.groupby(group_col)[val_col].count()
def _fn(Ri, Rj):
return np.sum(Ri.apply(lambda x: Rj[Rj > x].size))
def _ustat(Rij, g, k):
levels = np.unique(g)
U = np.identity(k)
for i in range(k):
for j in range(i):
U[i,j] = _fn(Rij[x[group_col] == levels[i]], Rij[x[group_col] == levels[j]])
U[j,i] = _fn(Rij[x[group_col] == levels[j]], Rij[x[group_col] == levels[i]])
return U
def _ap(p, U):
tmp1 = 0
if p > 0:
for i in range(p):
for j in range(i+1, p+1):
tmp1 += U[i,j]
tmp2 = 0
if p < k:
for i in range(p, k):
for j in range(i+1, k):
tmp2 += U[j,i]
return tmp1 + tmp2
def _n1(p, n):
return np.sum(n[:p+1])
def _n2(p, n):
return np.sum(n[p:k])
def _mean_at(p, n):
N1 = _n1(p, n)
N2 = _n2(p, n)
return (N1**2 + N2**2 - np.sum(n**2) - n.iloc[p]**2)/4
def _var_at(p, n):
N1 = _n1(p, n)
N2 = _n2(p, n)
N = np.sum(n)
var = (2 * (N1**3 + N2**3) + 3 * (N1**2 + N2**2) -\
np.sum(n**2 * (2*n + 3)) - n.iloc[p]**2 * (2 * n.iloc[p] + 3) +\
12. * n.iloc[p] * N1 * N2 - 12. * n.iloc[p] ** 2 * N) / 72.
return var
if p:
if (x.groupby(val_col).count() > 1).any().any():
print("Ties are present")
U = _ustat(Rij, x[group_col], k)
est = _ap(p, U)
mean = _mean_at(p, n)
sd = np.sqrt(_var_at(p, n))
stat = (est - mean)/sd
p_value = ss.norm.sf(stat)
else:
U = _ustat(Rij, x[group_col], k)
Ap = np.array([_ap(i, U) for i in range(k)]).ravel()
mean = np.array([_mean_at(i, n) for i in range(k)]).ravel()
var = np.array([_var_at(i, n) for i in range(k)]).ravel()
A = (Ap - mean) / np.sqrt(var)
stat = np.max(A)
p = A == stat
est = None
mt = []
for i in range(n_perm):
ix = Series(np.random.permutation(Rij))
Uix = _ustat(ix, x[group_col], k)
Apix = np.array([_ap(i, Uix) for i in range(k)])
Astarix = (Apix - mean) / np.sqrt(var)
mt.append(np.max(Astarix))
mt = np.array(mt)
p_value = mt[mt > stat] / n_perm
return p_value, stat
def posthoc_vanwaerden(a, val_col=None, group_col=None, sort=True, p_adjust=None):
'''Van der Waerden's test for pairwise multiple comparisons between group
levels. See references for additional information [1]_, [2]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
For one-factorial designs with samples that do not meet the assumptions for
one-way-ANOVA and subsequent post hoc tests, the van der Waerden test using
normal scores can be employed. Provided that significant differences were
detected by this global test, one may be interested in applying post hoc
tests according to van der Waerden for pairwise multiple comparisons of the
group levels.
There is no tie correction applied in this function.
References
----------
.. [1] W. J. Conover and R. L. Iman (1979), On multiple-comparisons procedures,
Tech. Rep. LA-7677-MS, Los Alamos Scientific Laboratory.
.. [2] B. L. van der Waerden (1952) Order tests for the two-sample problem and
their power, Indagationes Mathematicae, 14, 453-458.
Examples
--------
>>> x = np.array([[10,'a'], [59,'a'], [76,'b'], [10, 'b']])
>>> sp.posthoc_vanwaerden(x, val_col = 0, group_col = 1)
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = x[_group_col].unique()
n = x[_val_col].size
k = groups.size
r = ss.rankdata(x[_val_col])
x['z_scores'] = ss.norm.ppf(r / (n + 1))
aj = x.groupby(_group_col)['z_scores'].sum()
nj = x.groupby(_group_col)['z_scores'].count()
s2 = (1. / (n - 1.)) * (x['z_scores'] ** 2.).sum()
sts = (1. / s2) * np.sum(aj ** 2. / nj)
param = k - 1
A = aj / nj
vs = np.zeros((k, k), dtype=np.float)
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
def compare_stats(i, j):
dif = np.abs(A[groups[i]] - A[groups[j]])
B = 1. / nj[groups[i]] + 1. / nj[groups[j]]
tval = dif / np.sqrt(s2 * (n - 1. - sts)/(n - k) * B)
pval = 2. * ss.t.sf(np.abs(tval), df = n - k)
return pval
for i, j in combs:
vs[i, j] = compare_stats(i, j)
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_ttest(a, val_col=None, group_col=None, pool_sd=False, equal_var=True, p_adjust=None, sort=True):
'''Pairwise T test for multiple comparisons of independent groups. May be
used after a parametric ANOVA to do pairwise comparisons.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame. Array must be two-dimensional.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
equal_var : bool, optional
If True (default), perform a standard independent test
that assumes equal population variances [1]_.
If False, perform Welch's t-test, which does not assume equal
population variance [2]_.
pool_sd : bool, optional
Calculate a common SD for all groups and use that for all
comparisons (this can be useful if some groups are small).
This method does not actually call scipy ttest_ind() function,
so extra arguments are ignored. Default is False.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by group_col or not. Recommended
unless you sort your data manually.
Returns
-------
Numpy ndarray or pandas DataFrame of p values depending on input
data type.
References
----------
.. [1] http://en.wikipedia.org/wiki/T-test#Independent_two-sample_t-test
.. [2] http://en.wikipedia.org/wiki/Welch%27s_t_test
Examples
--------
>>> x = [[1,2,3,5,1], [12,31,54, np.nan], [10,12,6,74,11]]
>>> sp.posthoc_ttest(x, p_adjust = 'holm')
array([[-1. , 0.04600899, 0.31269089],
[ 0.04600899, -1. , 0.6327077 ],
[ 0.31269089, 0.6327077 , -1. ]])
'''
if isinstance(a, DataFrame):
x = a.copy()
if not sort:
x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
x.sort_values(by=[group_col, val_col], ascending=True, inplace=True)
x_lens = x.groupby(by=group_col)[val_col].count().values
x_lens_cumsum = np.insert(np.cumsum(x_lens), 0, 0)[:-1]
x_grouped = np.array([x[val_col][j:(j + x_lens[i])] for i, j in enumerate(x_lens_cumsum)])
#x_grouped = [x.loc[v, val_col].values.tolist() for g, v in x.groupby(group_col, sort=False).groups.items()]
else:
x = np.array(a)
x_grouped = np.array([np.asarray(a)[~np.isnan(a)] for a in x])
x_lens = np.asarray([len(a) for a in x_grouped])
x_lens_cumsum = np.insert(np.cumsum(x_lens), 0, 0)[:-1]
if any(x_lens == 0):
raise ValueError("All groups must contain data")
x_len = len(x_grouped)
vs = np.zeros((x_len, x_len), dtype=np.float)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
def compare_pooled(i, j):
diff = x_means[i] - x_means[j]
se_diff = pooled_sd * np.sqrt(1 / x_lens[i] + 1 / x_lens[j])
t_value = diff / se_diff
return 2 * ss.t.cdf(-np.abs(t_value), x_totaldegf)
combs = it.combinations(range(x_len), 2)
if pool_sd:
x_means = np.asarray([np.mean(xi) for xi in x_grouped])
x_sd = np.asarray([np.std(xi, ddof=1) for xi in x_grouped])
x_degf = x_lens - 1
x_totaldegf = np.sum(x_degf)
pooled_sd = np.sqrt(np.sum(x_sd ** 2 * x_degf) / x_totaldegf)
for i, j in combs:
vs[i, j] = compare_pooled(i, j)
else:
for i,j in combs:
vs[i, j] = ss.ttest_ind(x_grouped[i], x_grouped[j], equal_var=equal_var)[1]
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
if isinstance(x, DataFrame):
groups_unique = x[group_col].unique()
return DataFrame(vs, index=groups_unique, columns=groups_unique)
else:
return vs
def posthoc_tukey_hsd(x, g, alpha=0.05):
'''Pairwise comparisons with TukeyHSD confidence intervals. This is a
convenience function to make statsmodels `pairwise_tukeyhsd` method more
applicable for further use.
Parameters
----------
x : array_like or pandas Series object, 1d
An array, any object exposing the array interface, containing dependent
variable values (test or response variable). Values should have a
non-nominal scale. NaN values will cause an error (please handle
manually).
g : array_like or pandas Series object, 1d
An array, any object exposing the array interface, containing
independent variable values (grouping or predictor variable). Values
should have a nominal scale (categorical).
alpha : float, optional
Significance level for the test. Default is 0.05.
Returns
-------
result : pandas DataFrame
DataFrame with 0, 1, and -1 values, where 0 is False (not significant),
1 is True (significant), and -1 is for diagonal elements.
Examples
--------
>>> x = [[1,2,3,4,5], [35,31,75,40,21], [10,6,9,6,1]]
>>> g = [['a'] * 5, ['b'] * 5, ['c'] * 5]
>>> sp.posthoc_tukey_hsd(np.concatenate(x), np.concatenate(g))
'''
result = pairwise_tukeyhsd(x, g, alpha=0.05)
groups = np.array(result.groupsunique, dtype=np.str)
groups_len = len(groups)
vs = np.zeros((groups_len, groups_len), dtype=np.int)
for a in result.summary()[1:]:
a0 = str(a[0])
a1 = str(a[1])
a0i = np.where(groups == a0)[0][0]
a1i = np.where(groups == a1)[0][0]
vs[a0i, a1i] = 1 if str(a[5]) == 'True' else 0
vs = np.triu(vs)
np.fill_diagonal(vs, -1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[tri_lower] = vs.T[tri_lower]
return DataFrame(vs, index=groups, columns=groups)
def posthoc_mannwhitney(a, val_col=None, group_col=None, use_continuity=True, alternative='two-sided', p_adjust=None, sort=True):
'''Pairwise comparisons with Mann-Whitney rank test.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame. Array must be two-dimensional.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
use_continuity : bool, optional
Whether a continuity correction (1/2.) should be taken into account.
Default is True.
alternative : ['two-sided', 'less', or 'greater'], optional
Whether to get the p-value for the one-sided hypothesis
('less' or 'greater') or for the two-sided hypothesis ('two-sided').
Defaults to 'two-sided'.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by group_col or not. Recommended
unless you sort your data manually.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
Refer to `scipy.stats.mannwhitneyu` reference page for further details.
Examples
--------
>>> x = [[1,2,3,4,5], [35,31,75,40,21], [10,6,9,6,1]]
>>> sp.posthoc_mannwhitney(x, p_adjust = 'holm')
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col, _val_col], ascending=True, inplace=True)
groups = np.unique(x[_group_col])
x_len = groups.size
vs = np.zeros((x_len, x_len))
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(x_len), 2)
for i,j in combs:
vs[i, j] = ss.mannwhitneyu(x.loc[x[_group_col] == groups[i], _val_col],
x.loc[x[_group_col] == groups[j], _val_col],
use_continuity=use_continuity,
alternative=alternative)[1]
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_wilcoxon(a, val_col=None, group_col=None, zero_method='wilcox', correction=False, p_adjust=None, sort=False):
'''Pairwise comparisons with Wilcoxon signed-rank test. It is a non-parametric
version of the paired T-test for use with non-parametric ANOVA.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame. Array must be two-dimensional.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
zero_method : string, {"pratt", "wilcox", "zsplit"}, optional
"pratt": Pratt treatment, includes zero-differences in the ranking
process (more conservative)
"wilcox": Wilcox treatment, discards all zero-differences
"zsplit": Zero rank split, just like Pratt, but spliting the zero rank
between positive and negative ones
correction : bool, optional
If True, apply continuity correction by adjusting the Wilcoxon rank
statistic by 0.5 towards the mean value when computing the z-statistic.
Default is False.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by group_col and val_col or not.
Default is False.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
Refer to `scipy.stats.wilcoxon` reference page for further details.
Examples
--------
>>> x = [[1,2,3,4,5], [35,31,75,40,21], [10,6,9,6,1]]
>>> sp.posthoc_wilcoxon(x)
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
#x.sort_values(by=[_group_col, _val_col], ascending=True, inplace=True)
groups = np.unique(x[_group_col])
x_len = groups.size
vs = np.zeros((x_len, x_len))
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(x_len), 2)
for i,j in combs:
vs[i, j] = ss.wilcoxon(x.loc[x[_group_col] == groups[i], _val_col],
x.loc[x[_group_col] == groups[j], _val_col],
zero_method=zero_method, correction=correction)[1]
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method=p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_scheffe(a, val_col=None, group_col=None, sort=False, p_adjust=None):
'''Scheffe's all-pairs comparisons test for normally distributed data with equal
group variances. For all-pairs comparisons in an one-factorial layout with
normally distributed residuals and equal variances Scheffe's test can be
performed with parametric ANOVA [1]_, [2]_, [3]_.
A total of m = k(k-1)/2 hypotheses can be tested.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
The p values are computed from the F-distribution.
References
----------
.. [1] J. Bortz (1993) Statistik für Sozialwissenschaftler. 4. Aufl., Berlin:
Springer.
.. [2] L. Sachs (1997) Angewandte Statistik, New York: Springer.
.. [3] H. Scheffe (1953) A Method for Judging all Contrasts in the Analysis
of Variance. Biometrika 40, 87-110.
Examples
--------
>>> import scikit_posthocs as sp
>>> import pandas as pd
>>> x = pd.DataFrame({"a": [1,2,3,5,1], "b": [12,31,54,62,12], "c": [10,12,6,74,11]})
>>> x = x.melt(var_name='groups', value_name='values')
>>> sp.posthoc_scheffe(x, val_col='values', group_col='groups')
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = np.unique(x[_group_col])
x_grouped = x.groupby(_group_col)[_val_col]
ni = x_grouped.count()
n = ni.sum()
xi = x_grouped.mean()
si = x_grouped.var()
sin = 1. / (n - groups.size) * np.sum(si * (ni - 1.))
def compare(i, j):
dif = xi.loc[i] - xi.loc[j]
A = sin * (1. / ni[i] + 1. / ni[j]) * (groups.size - 1.)
f_val = dif ** 2. / A
return f_val
vs = np.zeros((groups.size, groups.size), dtype=np.float)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(groups.size), 2)
for i,j in combs:
vs[i, j] = compare(groups[i], groups[j])
vs[tri_lower] = vs.T[tri_lower]
p_values = ss.f.sf(vs, groups.size - 1., n - groups.size)
np.fill_diagonal(p_values, -1)
return DataFrame(p_values, index=groups, columns=groups)
def posthoc_tamhane(a, val_col=None, group_col=None, welch=True, sort=False):
'''Tamhane's T2 all-pairs comparison test for normally distributed data with
unequal variances. Tamhane's T2 test can be performed for all-pairs
comparisons in an one-factorial layout with normally distributed residuals
but unequal groups variances. A total of m = k(k-1)/2 hypotheses can be
tested. The null hypothesis is tested in the two-tailed test against the
alternative hypothesis [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
welch : bool, optional
If True, use Welch's approximate solution for calculating the degree of
freedom. T2 test uses the usual df = N - 2 approximation.
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
The p values are computed from the t-distribution and adjusted according to
Dunn-Sidak.
References
----------
.. [1] A.C. Tamhane (1979), A Comparison of Procedures for Multiple Comparisons of
Means with Unequal Variances. Journal of the American Statistical Association,
74, 471-480.
Examples
--------
>>> import scikit_posthocs as sp
>>> import pandas as pd
>>> x = pd.DataFrame({"a": [1,2,3,5,1], "b": [12,31,54,62,12], "c": [10,12,6,74,11]})
>>> x = x.melt(var_name='groups', value_name='values')
>>> sp.posthoc_tamhane(x, val_col='values', group_col='groups')
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = x[_group_col].unique()
x_grouped = x.groupby(_group_col)[_val_col]
ni = x_grouped.count()
n = ni.sum()
xi = x_grouped.mean()
si = x_grouped.var()
sin = 1. / (n - groups.size) * np.sum(si * (ni - 1))
def compare(i, j):
dif = xi[i] - xi[j]
A = si[i] / ni[i] + si[j] / ni[j]
t_val = dif / np.sqrt(A)
if welch:
df = A ** 2. / (si[i] ** 2. / (ni[i] ** 2. * (ni[i] - 1.)) + si[j] ** 2. / (ni[j] ** 2. * (ni[j] - 1.)))
else:
## checks according to Tamhane (1979, p. 474)
ok1 = (9./10. <= ni[i]/ni[j]) and (ni[i]/ni[j] <= 10./9.)
ok2 = (9./10. <= (s2i[i] / ni[i]) / (s2i[j] / ni[j])) and ((s2i[i] / ni[i]) / (s2i[j] / ni[j]) <= 10./9.)
ok3 = (4./5. <= ni[i]/ni[j]) and (ni[i]/ni[j] <= 5./4.) and (1./2. <= (s2i[i] / ni[i]) / (s2i[j] / ni[j])) and ((s2i[i] / ni[i]) / (s2i[j] / ni[j]) <= 2.)
ok4 = (2./3. <= ni[i]/ni[j]) and (ni[i]/ni[j] <= 3./2.) and (3./4. <= (s2i[i] / ni[i]) / (s2i[j] / ni[j])) and ((s2i[i] / ni[i]) / (s2i[j] / ni[j]) <= 4./3.)
OK = any(ok1, ok2, ok3, ok4)
if not OK:
print("Sample sizes or standard errors are not balanced. T2 test is recommended.")
df = ni[i] + ni[j] - 2.
p_val = 2. * ss.t.sf(np.abs(t_val), df=df)
return p_val
vs = np.zeros((groups.size, groups.size), dtype=np.float)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(groups.size), 2)
for i,j in combs:
vs[i, j] = compare(groups[i], groups[j])
vs[tri_upper] = 1. - (1. - vs[tri_upper]) ** groups.size
vs[tri_lower] = vs.T[tri_lower]
vs[vs > 1] = 1
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_tukey(a, val_col = None, group_col = None, sort = False):
'''Performs Tukey's all-pairs comparisons test for normally distributed data
with equal group variances. For all-pairs comparisons in an
one-factorial layout with normally distributed residuals and equal variances
Tukey's test can be performed. A total of m = k(k-1)/2 hypotheses can be
tested. The null hypothesis is tested in the two-tailed test against
the alternative hypothesis [1]_, [2]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
The p values are computed from the Tukey-distribution.
References
----------
.. [1] L. Sachs (1997) Angewandte Statistik, New York: Springer.
.. [2] J. Tukey (1949) Comparing Individual Means in the Analysis of Variance,
Biometrics 5, 99-114.
Examples
--------
>>> import scikit_posthocs as sp
>>> import pandas as pd
>>> x = pd.DataFrame({"a": [1,2,3,5,1], "b": [12,31,54,62,12], "c": [10,12,6,74,11]})
>>> x = x.melt(var_name='groups', value_name='values')
>>> sp.posthoc_tukey(x, val_col='values', group_col='groups')
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = np.unique(x[_group_col])
x_grouped = x.groupby(_group_col)[_val_col]
ni = x_grouped.count()
n = ni.sum()
xi = x_grouped.mean()
si = x_grouped.var()
sin = 1. / (n - groups.size) * np.sum(si * (ni - 1))
def compare(i, j):
dif = xi[i] - xi[j]
A = sin * 0.5 * (1. / ni.loc[i] + 1. / ni.loc[j])
q_val = dif / np.sqrt(A)
return q_val
vs = np.zeros((groups.size, groups.size), dtype=np.float)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(groups.size), 2)
for i,j in combs:
vs[i, j] = compare(groups[i], groups[j])
vs[tri_upper] = psturng(np.abs(vs[tri_upper]), groups.size, n - groups.size)
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_dscf(a, val_col=None, group_col=None, sort=False):
'''Dwass, Steel, Critchlow and Fligner all-pairs comparison test for a
one-factorial layout with non-normally distributed residuals. As opposed to
the all-pairs comparison procedures that depend on Kruskal ranks, the DSCF
test is basically an extension of the U-test as re-ranking is conducted for
each pairwise test [1]_, [2]_, [3]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
Pandas DataFrame containing p values.
Notes
-----
The p values are computed from the Tukey-distribution.
References
----------
.. [1] Douglas, C. E., Fligner, A. M. (1991) On distribution-free multiple
comparisons in the one-way analysis of variance, Communications in
Statistics - Theory and Methods, 20, 127-139.
.. [2] Dwass, M. (1960) Some k-sample rank-order tests. In Contributions to
Probability and Statistics, Edited by: I. Olkin, Stanford: Stanford
University Press.
.. [3] Steel, R. G. D. (1960) A rank sum test for comparing all pairs of
treatments, Technometrics, 2, 197-207.
Examples
--------
>>> import scikit_posthocs as sp
>>> import pandas as pd
>>> x = pd.DataFrame({"a": [1,2,3,5,1], "b": [12,31,54,62,12], "c": [10,12,6,74,11]})
>>> x = x.melt(var_name='groups', value_name='values')
>>> sp.posthoc_dscf(x, val_col='values', group_col='groups')
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = np.unique(x[_group_col])
x_grouped = x.groupby(_group_col)[_val_col]
n = x_grouped.count()
k = groups.size
def get_ties(x):
t = x.value_counts().values
c = np.sum((t ** 3 - t) / 12.)
return c
def compare(i, j):
ni = n.loc[i]
nj = n.loc[j]
x_raw = x.loc[(x[_group_col] == i) | (x[_group_col] == j)].copy()
x_raw['ranks'] = x_raw.loc[:, _val_col].rank()
r = x_raw.groupby(_group_col)['ranks'].sum().loc[[i, j]]
u = np.array([nj * ni + (nj * (nj + 1) / 2),
nj * ni + (ni * (ni + 1) / 2)]) - r
u_min = np.min(u)
s = ni + nj
var = (nj*ni/(s*(s - 1.))) * ((s**3 - s)/12. - get_ties(x_raw['ranks']))
p = np.sqrt(2.) * (u_min - nj * ni / 2.) / np.sqrt(var)
return p
vs = np.zeros((k, k))
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(k), 2)
for i, j in combs:
vs[i, j] = compare(groups[i], groups[j])
vs[tri_upper] = psturng(np.abs(vs[tri_upper]), k, np.inf)
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
|
maximtrp/scikit-posthocs | scikit_posthocs/_posthocs.py | posthoc_nemenyi_friedman | python | def posthoc_nemenyi_friedman(a, y_col=None, block_col=None, group_col=None, melted=False, sort=False):
'''Calculate pairwise comparisons using Nemenyi post hoc test for
unreplicated blocked data. This test is usually conducted post hoc if
significant results of the Friedman's test are obtained. The statistics
refer to upper quantiles of the studentized range distribution (Tukey) [1]_,
[2]_, [3]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of
block design, i.e. rows are blocks, and columns are groups. In this
case you do not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (strings).
y_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
A one-way ANOVA with repeated measures that is also referred to as ANOVA
with unreplicated block design can also be conducted via Friedman's
test. The consequent post hoc pairwise multiple comparison test
according to Nemenyi is conducted with this function.
This function does not test for ties.
References
----------
.. [1] J. Demsar (2006), Statistical comparisons of classifiers over
multiple data sets, Journal of Machine Learning Research, 7, 1-30.
.. [2] P. Nemenyi (1963) Distribution-free Multiple Comparisons. Ph.D.
thesis, Princeton University.
.. [3] L. Sachs (1997), Angewandte Statistik. Berlin: Springer.
Pages: 668-675.
Examples
--------
>>> # Non-melted case, x is a block design matrix, i.e. rows are blocks
>>> # and columns are groups.
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_nemenyi_friedman(x)
'''
if melted and not all([block_col, group_col, y_col]):
raise ValueError('block_col, group_col, y_col should be explicitly specified if using melted data')
def compare_stats(i, j):
dif = np.abs(R[groups[i]] - R[groups[j]])
qval = dif / np.sqrt(k * (k + 1.) / (6. * n))
return qval
x, _y_col, _group_col, _block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
#if not sort:
# x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
# x[block_col] = Categorical(x[block_col], categories=x[block_col].unique(), ordered=True)
x.sort_values(by=[_group_col, _block_col], ascending=True, inplace=True)
x.dropna(inplace=True)
groups = x[_group_col].unique()
k = groups.size
n = x[_block_col].unique().size
x['mat'] = x.groupby(_block_col)[_y_col].rank()
R = x.groupby(_group_col)['mat'].mean()
vs = np.zeros((k, k))
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
for i, j in combs:
vs[i, j] = compare_stats(i, j)
vs *= np.sqrt(2.)
vs[tri_upper] = psturng(vs[tri_upper], k, np.inf)
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups) | Calculate pairwise comparisons using Nemenyi post hoc test for
unreplicated blocked data. This test is usually conducted post hoc if
significant results of the Friedman's test are obtained. The statistics
refer to upper quantiles of the studentized range distribution (Tukey) [1]_,
[2]_, [3]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of
block design, i.e. rows are blocks, and columns are groups. In this
case you do not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (strings).
y_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
A one-way ANOVA with repeated measures that is also referred to as ANOVA
with unreplicated block design can also be conducted via Friedman's
test. The consequent post hoc pairwise multiple comparison test
according to Nemenyi is conducted with this function.
This function does not test for ties.
References
----------
.. [1] J. Demsar (2006), Statistical comparisons of classifiers over
multiple data sets, Journal of Machine Learning Research, 7, 1-30.
.. [2] P. Nemenyi (1963) Distribution-free Multiple Comparisons. Ph.D.
thesis, Princeton University.
.. [3] L. Sachs (1997), Angewandte Statistik. Berlin: Springer.
Pages: 668-675.
Examples
--------
>>> # Non-melted case, x is a block design matrix, i.e. rows are blocks
>>> # and columns are groups.
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_nemenyi_friedman(x) | train | https://github.com/maximtrp/scikit-posthocs/blob/5476b09e2a325cd4e31c0b0bc6906ab5cd77fc5d/scikit_posthocs/_posthocs.py#L483-L597 | [
"def __convert_to_block_df(a, y_col=None, group_col=None, block_col=None, melted=False):\n\n if isinstance(a, DataFrame) and not melted:\n x = a.copy(deep=True)\n group_col = 'groups'\n block_col = 'blocks'\n y_col = 'y'\n x.columns.name = group_col\n x.index.name = bloc... | # -*- coding: utf-8 -*-
import numpy as np
import scipy.stats as ss
import itertools as it
from statsmodels.sandbox.stats.multicomp import multipletests
from statsmodels.stats.multicomp import pairwise_tukeyhsd
from statsmodels.stats.libqsturng import psturng
from pandas import DataFrame, Categorical, Series
def __convert_to_df(a, val_col=None, group_col=None, val_id=None, group_id=None):
'''Hidden helper method to create a DataFrame with input data for further
processing.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas DataFrame.
Array must be two-dimensional. Second dimension may vary,
i.e. groups may have different lengths.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
val_id : int, optional
Index of a column that contains dependent variable values (test or
response variable). Should be specified if a NumPy ndarray is used as an
input. It will be inferred from data, if not specified.
group_id : int, optional
Index of a column that contains independent variable values (grouping or
predictor variable). Should be specified if a NumPy ndarray is used as
an input. It will be inferred from data, if not specified.
Returns
-------
x : pandas DataFrame
DataFrame with input data, `val_col` column contains numerical values and
`group_col` column contains categorical values.
val_col : str
Name of a DataFrame column that contains dependent variable values (test
or response variable).
group_col : str
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable).
Notes
-----
Inferrence algorithm for determining `val_id` and `group_id` args is rather
simple, so it is better to specify them explicitly to prevent errors.
'''
if not group_col:
group_col = 'groups'
if not val_col:
val_col = 'vals'
if isinstance(a, DataFrame):
x = a.copy()
if not {group_col, val_col}.issubset(a.columns):
raise ValueError('Specify correct column names using `group_col` and `val_col` args')
return x, val_col, group_col
elif isinstance(a, list) or (isinstance(a, np.ndarray) and not a.shape.count(2)):
grps_len = map(len, a)
grps = list(it.chain(*[[i+1] * l for i, l in enumerate(grps_len)]))
vals = list(it.chain(*a))
return DataFrame({val_col: vals, group_col: grps}), val_col, group_col
elif isinstance(a, np.ndarray):
# cols ids not defined
# trying to infer
if not(all([val_id, group_id])):
if np.argmax(a.shape):
a = a.T
ax = [np.unique(a[:, 0]).size, np.unique(a[:, 1]).size]
if np.asscalar(np.diff(ax)):
__val_col = np.argmax(ax)
__group_col = np.argmin(ax)
else:
raise ValueError('Cannot infer input format.\nPlease specify `val_id` and `group_id` args')
cols = {__val_col: val_col,
__group_col: group_col}
else:
cols = {val_id: val_col,
group_id: group_col}
cols_vals = dict(sorted(cols.items())).values()
return DataFrame(a, columns=cols_vals), val_col, group_col
def __convert_to_block_df(a, y_col=None, group_col=None, block_col=None, melted=False):
if isinstance(a, DataFrame) and not melted:
x = a.copy(deep=True)
group_col = 'groups'
block_col = 'blocks'
y_col = 'y'
x.columns.name = group_col
x.index.name = block_col
x = x.reset_index().melt(id_vars=block_col, var_name=group_col, value_name=y_col)
elif melted:
x = DataFrame.from_dict({'groups': a[group_col],
'blocks': a[block_col],
'y': a[y_col]})
elif not isinstance(a, DataFrame):
x = np.array(a)
x = DataFrame(x, index=np.arange(x.shape[0]), columns=np.arange(x.shape[1]))
if not melted:
group_col = 'groups'
block_col = 'blocks'
y_col = 'y'
x.columns.name = group_col
x.index.name = block_col
x = x.reset_index().melt(id_vars=block_col, var_name=group_col, value_name=y_col)
else:
x.columns[group_col] = 'groups'
x.columns[block_col] = 'blocks'
x.columns[y_col] = 'y'
group_col = 'groups'
block_col = 'blocks'
y_col = 'y'
return x, 'y', 'groups', 'blocks'
def posthoc_conover(a, val_col=None, group_col=None, p_adjust=None, sort=True):
'''Post hoc pairwise test for multiple comparisons of mean rank sums
(Conover's test). May be used after Kruskal-Wallis one-way analysis of
variance by ranks to do pairwise comparisons [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas DataFrame.
Array must be two-dimensional. Second dimension may vary,
i.e. groups may have different lengths.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
p_adjust : str, optional
Method for adjusting p values. See `statsmodels.sandbox.stats.multicomp`
for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by `group_col` or not. Recommended
unless you sort your data manually.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
A tie correction are employed according to Conover [1]_.
References
----------
.. [1] W. J. Conover and R. L. Iman (1979), On multiple-comparisons procedures,
Tech. Rep. LA-7677-MS, Los Alamos Scientific Laboratory.
Examples
--------
>>> x = [[1,2,3,5,1], [12,31,54, np.nan], [10,12,6,74,11]]
>>> sp.posthoc_conover(x, p_adjust = 'holm')
'''
def compare_conover(i, j):
diff = np.abs(x_ranks_avg.loc[i] - x_ranks_avg.loc[j])
B = (1. / x_lens.loc[i] + 1. / x_lens.loc[j])
D = (n - 1. - H_cor) / (n - x_len)
t_value = diff / np.sqrt(S2 * B * D)
p_value = 2. * ss.t.sf(np.abs(t_value), df = n - x_len)
return p_value
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col, _val_col], ascending=True, inplace=True)
n = len(x.index)
x_groups_unique = np.unique(x[_group_col])
x_len = x_groups_unique.size
x_lens = x.groupby(_group_col)[_val_col].count()
x['ranks'] = x[_val_col].rank()
x_ranks_avg = x.groupby(_group_col)['ranks'].mean()
x_ranks_sum = x.groupby(_group_col)['ranks'].sum()
# ties
vals = x.groupby('ranks').count()[_val_col].values
tie_sum = np.sum(vals[vals != 1] ** 3 - vals[vals != 1])
tie_sum = 0 if not tie_sum else tie_sum
x_ties = np.min([1., 1. - tie_sum / (n ** 3. - n)])
H = (12. / (n * (n + 1.))) * np.sum(x_ranks_sum**2 / x_lens) - 3. * (n + 1.)
H_cor = H / x_ties
if x_ties == 1:
S2 = n * (n + 1.) / 12.
else:
S2 = (1. / (n - 1.)) * (np.sum(x['ranks'] ** 2.) - (n * (((n + 1.)**2.) / 4.)))
vs = np.zeros((x_len, x_len))
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(x_len), 2)
for i, j in combs:
vs[i, j] = compare_conover(x_groups_unique[i], x_groups_unique[j])
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=x_groups_unique, columns=x_groups_unique)
def posthoc_dunn(a, val_col=None, group_col=None, p_adjust=None, sort=True):
'''Post hoc pairwise test for multiple comparisons of mean rank sums
(Dunn's test). May be used after Kruskal-Wallis one-way analysis of
variance by ranks to do pairwise comparisons [1]_, [2]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas DataFrame.
Array must be two-dimensional. Second dimension may vary,
i.e. groups may have different lengths.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
p_adjust : str, optional
Method for adjusting p values. See `statsmodels.sandbox.stats.multicomp`
for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by group_col or not. Recommended
unless you sort your data manually.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
A tie correction will be employed according to Glantz (2012).
References
----------
.. [1] O.J. Dunn (1964). Multiple comparisons using rank sums.
Technometrics, 6, 241-252.
.. [2] S.A. Glantz (2012), Primer of Biostatistics. New York: McGraw Hill.
Examples
--------
>>> x = [[1,2,3,5,1], [12,31,54, np.nan], [10,12,6,74,11]]
>>> sp.posthoc_dunn(x, p_adjust = 'holm')
'''
def compare_dunn(i, j):
diff = np.abs(x_ranks_avg.loc[i] - x_ranks_avg.loc[j])
A = n * (n + 1.) / 12.
B = (1. / x_lens.loc[i] + 1. / x_lens.loc[j])
z_value = diff / np.sqrt((A - x_ties) * B)
p_value = 2. * ss.norm.sf(np.abs(z_value))
return p_value
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col, _val_col], ascending=True, inplace=True)
n = len(x.index)
x_groups_unique = np.unique(x[_group_col])
x_len = x_groups_unique.size
x_lens = x.groupby(_group_col)[_val_col].count()
x['ranks'] = x[_val_col].rank()
x_ranks_avg = x.groupby(_group_col)['ranks'].mean()
# ties
vals = x.groupby('ranks').count()[_val_col].values
tie_sum = np.sum(vals[vals != 1] ** 3 - vals[vals != 1])
tie_sum = 0 if not tie_sum else tie_sum
x_ties = tie_sum / (12. * (n - 1))
vs = np.zeros((x_len, x_len))
combs = it.combinations(range(x_len), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
for i,j in combs:
vs[i, j] = compare_dunn(x_groups_unique[i], x_groups_unique[j])
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=x_groups_unique, columns=x_groups_unique)
def posthoc_nemenyi(a, val_col=None, group_col=None, dist='chi', sort=True):
'''Post hoc pairwise test for multiple comparisons of mean rank sums
(Nemenyi's test). May be used after Kruskal-Wallis one-way analysis of
variance by ranks to do pairwise comparisons [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame. Array must be two-dimensional. Second dimension may vary,
i.e. groups may have different lengths.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
dist : str, optional
Method for determining the p value. The default distribution is "chi"
(chi-squared), else "tukey" (studentized range).
sort : bool, optional
Specifies whether to sort DataFrame by group_col or not. Recommended
unless you sort your data manually.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
A tie correction will be employed according to Glantz (2012).
References
----------
.. [1] Lothar Sachs (1997), Angewandte Statistik. Berlin: Springer.
Pages: 395-397, 662-664.
Examples
--------
>>> x = [[1,2,3,5,1], [12,31,54, np.nan], [10,12,6,74,11]]
>>> sp.posthoc_nemenyi(x)
'''
def compare_stats_chi(i, j):
diff = np.abs(x_ranks_avg.loc[i] - x_ranks_avg.loc[j])
A = n * (n + 1.) / 12.
B = (1. / x_lens.loc[i] + 1. / x_lens.loc[j])
chi = diff ** 2. / (A * B)
return chi
def compare_stats_tukey(i, j):
diff = np.abs(x_ranks_avg.loc[i] - x_ranks_avg.loc[j])
B = (1. / x_lens.loc[i] + 1. / x_lens.loc[j])
q = diff / np.sqrt((n * (n + 1.) / 12.) * B)
return q
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col, _val_col], ascending=True, inplace=True)
n = len(x.index)
x_groups_unique = np.unique(x[_group_col])
x_len = x_groups_unique.size
x_lens = x.groupby(_group_col)[_val_col].count()
x['ranks'] = x[_val_col].rank()
x_ranks_avg = x.groupby(_group_col)['ranks'].mean()
# ties
vals = x.groupby('ranks').count()[_val_col].values
tie_sum = np.sum(vals[vals != 1] ** 3 - vals[vals != 1])
tie_sum = 0 if not tie_sum else tie_sum
x_ties = np.min([1., 1. - tie_sum / (n ** 3. - n)])
vs = np.zeros((x_len, x_len))
combs = it.combinations(range(x_len), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
if dist == 'chi':
for i,j in combs:
vs[i, j] = compare_stats_chi(x_groups_unique[i], x_groups_unique[j]) / x_ties
vs[tri_upper] = ss.chi2.sf(vs[tri_upper], x_len - 1)
elif dist == 'tukey':
for i,j in combs:
vs[i, j] = compare_stats_tukey(x_groups_unique[i], x_groups_unique[j]) * np.sqrt(2.)
vs[tri_upper] = psturng(vs[tri_upper], x_len, np.inf)
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=x_groups_unique, columns=x_groups_unique)
def posthoc_conover_friedman(a, y_col=None, block_col=None, group_col=None, melted=False, sort=False, p_adjust=None):
'''Calculate pairwise comparisons using Conover post hoc test for unreplicated
blocked data. This test is usually conducted post hoc after
significant results of the Friedman test. The statistics refer to
the Student t distribution [1]_, [2]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of
block design, i.e. rows are blocks, and columns are groups. In this
case you do not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (strings).
y_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values. See statsmodels.sandbox.stats.multicomp
for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
'single-step' : uses Tukey distribution for multiple comparisons
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
A one-way ANOVA with repeated measures that is also referred to as ANOVA
with unreplicated block design can also be conducted via the
friedman.test. The consequent post hoc pairwise multiple comparison test
according to Conover is conducted with this function.
If y is a matrix, than the columns refer to the treatment and the rows
indicate the block.
References
----------
.. [1] W. J. Conover and R. L. Iman (1979), On multiple-comparisons procedures,
Tech. Rep. LA-7677-MS, Los Alamos Scientific Laboratory.
.. [2] W. J. Conover (1999), Practical nonparametric Statistics, 3rd. Edition,
Wiley.
Examples
--------
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_conover_friedman(x)
'''
if melted and not all([block_col, group_col, y_col]):
raise ValueError('block_col, group_col, y_col should be explicitly specified if using melted data')
def compare_stats(i, j):
dif = np.abs(R.loc[groups[i]] - R.loc[groups[j]])
tval = dif / np.sqrt(A) / np.sqrt(B)
pval = 2. * ss.t.sf(np.abs(tval), df = (m*n*k - k - n + 1))
return pval
def compare_tukey(i, j):
dif = np.abs(R.loc[groups[i]] - R.loc[groups[j]])
qval = np.sqrt(2.) * dif / (np.sqrt(A) * np.sqrt(B))
pval = psturng(qval, k, np.inf)
return pval
x, _y_col, _group_col, _block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
#if not sort:
# x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
# x[block_col] = Categorical(x[block_col], categories=x[block_col].unique(), ordered=True)
x.sort_values(by=[_group_col,_block_col], ascending=True, inplace=True)
x.dropna(inplace=True)
groups = np.unique(x[_group_col])
k = groups.size
n = x[_block_col].unique().size
x['mat'] = x.groupby(_block_col)[_y_col].rank()
R = x.groupby(_group_col)['mat'].sum()
A1 = (x['mat'] ** 2).sum()
m = 1
S2 = m/(m*k - 1.) * (A1 - m*k*n*(m*k + 1.)**2./4.)
T2 = 1 / S2 * (np.sum(R) - n * m * ((m * k + 1.) / 2.)**2.)
A = S2 * (2. * n * (m * k - 1.)) / ( m * n * k - k - n + 1.)
B = 1. - T2 / (n * (m * k - 1.))
vs = np.zeros((k, k))
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
if p_adjust == 'single-step':
for i, j in combs:
vs[i, j] = compare_tukey(i, j)
else:
for i, j in combs:
vs[i, j] = compare_stats(i, j)
if p_adjust is not None:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_npm_test(a, val_col=None, group_col=None, sort=False, p_adjust=None):
'''Calculate pairwise comparisons using Nashimoto and Wright's all-pairs
comparison procedure (NPM test) for simply ordered mean ranksums.
NPM test is basically an extension of Nemenyi's procedure for testing
increasingly ordered alternatives [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values. See `statsmodels.sandbox.stats.multicomp`
for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
The p values are estimated from the studentized range distribution. If
the medians are already increasingly ordered, than the NPM-test
simplifies to the ordinary Nemenyi test
References
----------
.. [1] Nashimoto, K., Wright, F.T., (2005), Multiple comparison procedures for
detecting differences in simply ordered means. Comput. Statist. Data
Anal. 48, 291--306.
Examples
--------
>>> x = np.array([[102,109,114,120,124],
[110,112,123,130,145],
[132,141,156,160,172]])
>>> sp.posthoc_npm_test(x)
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
#if not sort:
# x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = np.unique(x[_group_col])
x['ranks'] = x[_val_col].rank()
Ri = x.groupby(_group_col)['ranks'].mean()
ni = x.groupby(_group_col)[_val_col].count()
k = groups.size
n = x.shape[0]
sigma = np.sqrt(n * (n + 1) / 12.)
df = np.inf
def compare(m, u):
a = [(Ri.loc[groups[u]]-Ri.loc[groups[_mi]])/(sigma/np.sqrt(2)*np.sqrt(1./ni.loc[groups[_mi]] + 1./ni.loc[groups[u]])) for _mi in m]
return np.array(a)
stat = np.zeros((k, k))
for i in range(k-1):
for j in range(i+1, k):
u = j
m = np.arange(i, u)
tmp = compare(m, u)
stat[j, i] = np.max(tmp)
stat[stat < 0] = 0
p_values = psturng(stat, k, np.inf)
tri_upper = np.triu_indices(p_values.shape[0], 1)
tri_lower = np.tril_indices(p_values.shape[0], -1)
p_values[tri_lower] = p_values.T[tri_lower]
#if p_adjust:
# p_values[tri_upper] = multipletests(p_values[tri_upper], method = p_adjust)[1]
np.fill_diagonal(p_values, -1)
return DataFrame(p_values, index=groups, columns=groups)
def posthoc_siegel_friedman(a, y_col=None, block_col=None, group_col=None, melted=False, sort=False, p_adjust=None):
'''Siegel and Castellan's All-Pairs Comparisons Test for Unreplicated Blocked
Data. See authors' paper for additional information [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of
block design, i.e. rows are blocks, and columns are groups. In this
case you do not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (strings).
y_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values. See statsmodels.sandbox.stats.multicomp for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
For all-pairs comparisons in a two factorial unreplicated complete block design
with non-normally distributed residuals, Siegel and Castellan's test can be
performed on Friedman-type ranked data.
References
----------
.. [1] S. Siegel, N. J. Castellan Jr. (1988), Nonparametric Statistics for the
Behavioral Sciences. 2nd ed. New York: McGraw-Hill.
Examples
--------
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_siegel_friedman(x)
'''
if melted and not all([block_col, group_col, y_col]):
raise ValueError('block_col, group_col, y_col should be explicitly specified if using melted data')
def compare_stats(i, j):
dif = np.abs(R[groups[i]] - R[groups[j]])
zval = dif / np.sqrt(k * (k + 1.) / (6. * n))
return zval
x, y_col, group_col, block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
#if not sort:
# x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
# x[block_col] = Categorical(x[block_col], categories=x[block_col].unique(), ordered=True)
x.sort_values(by=[group_col,block_col], ascending=True, inplace=True)
x.dropna(inplace=True)
groups = x[group_col].unique()
k = groups.size
n = x[block_col].unique().size
x['mat'] = x.groupby(block_col)[y_col].rank()
R = x.groupby(group_col)['mat'].mean()
vs = np.zeros((k, k), dtype=np.float)
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
for i, j in combs:
vs[i, j] = compare_stats(i, j)
vs = 2. * ss.norm.sf(np.abs(vs))
vs[vs > 1] = 1.
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_miller_friedman(a, y_col=None, block_col=None, group_col=None, melted=False, sort=False):
'''Miller's All-Pairs Comparisons Test for Unreplicated Blocked Data.
The p-values are computed from the chi-square distribution [1]_, [2]_,
[3]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of
block design, i.e. rows are blocks, and columns are groups. In this
case you do not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (strings).
y_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
Pandas DataFrame containing p values.
Notes
-----
For all-pairs comparisons in a two factorial unreplicated complete block
design with non-normally distributed residuals, Miller's test can be
performed on Friedman-type ranked data.
References
----------
.. [1] J. Bortz J, G. A. Lienert, K. Boehnke (1990), Verteilungsfreie
Methoden in der Biostatistik. Berlin: Springerself.
.. [2] R. G. Miller Jr. (1996), Simultaneous statistical inference. New
York: McGraw-Hill.
.. [3] E. L. Wike (2006), Data Analysis. A Statistical Primer for Psychology
Students. New Brunswick: Aldine Transaction.
Examples
--------
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_miller_friedman(x)
'''
if melted and not all([block_col, group_col, y_col]):
raise ValueError('block_col, group_col, y_col should be explicitly specified if using melted data')
def compare_stats(i, j):
dif = np.abs(R[groups[i]] - R[groups[j]])
qval = dif / np.sqrt(k * (k + 1.) / (6. * n))
return qval
x, y_col, group_col, block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
#if not sort:
# x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
# x[block_col] = Categorical(x[block_col], categories=x[block_col].unique(), ordered=True)
x.sort_values(by=[group_col,block_col], ascending=True, inplace=True)
x.dropna(inplace=True)
groups = x[group_col].unique()
k = groups.size
n = x[block_col].unique().size
x['mat'] = x.groupby(block_col)[y_col].rank()
R = x.groupby(group_col)['mat'].mean()
vs = np.zeros((k, k), dtype=np.float)
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
for i, j in combs:
vs[i, j] = compare_stats(i, j)
vs = vs ** 2
vs = ss.chi2.sf(vs, k - 1)
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_durbin(a, y_col=None, block_col=None, group_col=None, melted=False, sort=False, p_adjust=None):
'''Pairwise post hoc test for multiple comparisons of rank sums according to
Durbin and Conover for a two-way balanced incomplete block design (BIBD). See
references for additional information [1]_, [2]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of block design,
i.e. rows are blocks, and columns are groups. In this case you do
not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (string).
y_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values. See statsmodels.sandbox.stats.multicomp
for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
Pandas DataFrame containing p values.
References
----------
.. [1] W. J. Conover and R. L. Iman (1979), On multiple-comparisons procedures,
Tech. Rep. LA-7677-MS, Los Alamos Scientific Laboratory.
.. [2] W. J. Conover (1999), Practical nonparametric Statistics,
3rd. edition, Wiley.
Examples
--------
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_durbin(x)
'''
if melted and not all([block_col, group_col, y_col]):
raise ValueError('block_col, group_col, y_col should be explicitly specified if using melted data')
def compare_stats(i, j):
dif = np.abs(Rj[groups[i]] - Rj[groups[j]])
tval = dif / denom
pval = 2. * ss.t.sf(np.abs(tval), df = df)
return pval
x, y_col, group_col, block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
if not sort:
x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
x[block_col] = Categorical(x[block_col], categories=x[block_col].unique(), ordered=True)
x.sort_values(by=[block_col, group_col], ascending=True, inplace=True)
x.dropna(inplace=True)
groups = x[group_col].unique()
t = len(groups)
b = x[block_col].unique().size
r = b
k = t
x['y_ranked'] = x.groupby(block_col)[y_col].rank()
Rj = x.groupby(group_col)['y_ranked'].sum()
A = (x['y_ranked'] ** 2).sum()
C = (b * k * (k + 1) ** 2) / 4.
D = (Rj ** 2).sum() - r * C
T1 = (t - 1) / (A - C) * D
denom = np.sqrt(((A - C) * 2 * r) / (b * k - b - t + 1) * (1 - T1 / (b * (k -1))))
df = b * k - b - t + 1
vs = np.zeros((t, t), dtype=np.float)
combs = it.combinations(range(t), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
for i, j in combs:
vs[i, j] = compare_stats(i, j)
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_anderson(a, val_col=None, group_col=None, midrank=True, sort=False, p_adjust=None):
'''Anderson-Darling Pairwise Test for k-samples. Tests the null hypothesis
that k-samples are drawn from the same population without having to specify
the distribution function of that population [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
midrank : bool, optional
Type of Anderson-Darling test which is computed. If set to True (default), the
midrank test applicable to continuous and discrete populations is performed. If
False, the right side empirical distribution is used.
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values. See statsmodels.sandbox.stats.multicomp for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
result : pandas DataFrame
P values.
References
----------
.. [1] F.W. Scholz, M.A. Stephens (1987), K-Sample Anderson-Darling Tests,
Journal of the American Statistical Association, Vol. 82, pp. 918-924.
Examples
--------
>>> x = np.array([[2.9, 3.0, 2.5, 2.6, 3.2], [3.8, 2.7, 4.0, 2.4], [2.8, 3.4, 3.7, 2.2, 2.0]])
>>> sp.posthoc_anderson(x)
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = x[_group_col].unique()
k = groups.size
vs = np.zeros((k, k), dtype=np.float)
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
for i, j in combs:
vs[i, j] = ss.anderson_ksamp([x.loc[x[_group_col] == groups[i], _val_col], x.loc[x[_group_col] == groups[j], _val_col]])[2]
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_quade(a, y_col=None, block_col=None, group_col=None, dist='t', melted=False, sort=False, p_adjust=None):
'''Calculate pairwise comparisons using Quade's post hoc test for
unreplicated blocked data. This test is usually conducted if significant
results were obtained by the omnibus test [1]_, [2]_, [3]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of
block design, i.e. rows are blocks, and columns are groups. In this
case you do not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (string).
y_col : str or int, optional
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
dist : str, optional
Method for determining p values.
The default distribution is "t", else "normal".
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
Pandas DataFrame containing p values.
References
----------
.. [1] W. J. Conover (1999), Practical nonparametric Statistics, 3rd. Edition,
Wiley.
.. [2] N. A. Heckert and J. J. Filliben (2003). NIST Handbook 148: Dataplot
Reference Manual, Volume 2: Let Subcommands and Library Functions.
National Institute of Standards and Technology Handbook Series, June 2003.
.. [3] D. Quade (1979), Using weighted rankings in the analysis of complete
blocks with additive block effects. Journal of the American Statistical
Association, 74, 680-683.
Examples
--------
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_quade(x)
'''
if melted and not all([block_col, group_col, y_col]):
raise ValueError('block_col, group_col, y_col should be explicitly specified if using melted data')
def compare_stats_t(i, j):
dif = np.abs(S[groups[i]] - S[groups[j]])
tval = dif / denom
pval = 2. * ss.t.sf(np.abs(tval), df = (b - 1) * (k - 1))
return pval
def compare_stats_norm(i, j):
dif = np.abs(W[groups[i]] * ff - W[groups[j]] * ff)
zval = dif / denom
pval = 2. * ss.norm.sf(np.abs(zval))
return pval
x, y_col, group_col, block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
if not sort:
x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
x[block_col] = Categorical(x[block_col], categories=x[block_col].unique(), ordered=True)
x.sort_values(by=[block_col, group_col], ascending=True, inplace=True)
x.dropna(inplace=True)
groups = x[group_col].unique()
k = len(groups)
b = x[block_col].unique().size
x['r'] = x.groupby(block_col)[y_col].rank()
q = (x.groupby(block_col)[y_col].max() - x.groupby(block_col)[y_col].min()).rank()
x['rr'] = x['r'] - (k + 1)/2
x['s'] = x.apply(lambda x, y: x['rr'] * y[x['blocks']], axis=1, args=(q,))
x['w'] = x.apply(lambda x, y: x['r'] * y[x['blocks']], axis=1, args=(q,))
A = (x['s'] ** 2).sum()
S = x.groupby(group_col)['s'].sum()
B = np.sum(S ** 2) / b
W = x.groupby(group_col)['w'].sum()
vs = np.zeros((k, k), dtype=np.float)
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
if dist == 't':
denom = np.sqrt((2 * b * (A - B)) / ((b - 1) * (k - 1)))
for i, j in combs:
vs[i, j] = compare_stats_t(i, j)
else:
n = b * k
denom = np.sqrt((k * (k + 1) * (2 * n + 1) * (k-1)) / (18 * n * (n + 1)))
ff = 1. / (b * (b + 1)/2)
for i, j in combs:
vs[i, j] = compare_stats_norm(i, j)
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_mackwolfe(a, val_col, group_col, p=None, n_perm=100, sort=False, p_adjust=None):
'''Mack-Wolfe Test for Umbrella Alternatives.
In dose-finding studies one may assume an increasing treatment effect with
increasing dose level. However, the test subject may actually succumb to
toxic effects at high doses, which leads to decresing treatment
effects [1]_, [2]_.
The scope of the Mack-Wolfe Test is to test for umbrella alternatives for
either a known or unknown point P (i.e. dose-level), where the peak
(umbrella point) is present.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
p : int, optional
The a-priori known peak as an ordinal number of the treatment group
including the zero dose level, i.e. p = {1, ..., k}. Defaults to None.
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
p : float
P value.
stat : float
Statistic.
References
----------
.. [1] Chen, I.Y. (1991) Notes on the Mack-Wolfe and Chen-Wolfe Tests for
Umbrella Alternatives. Biom. J., 33, 281-290.
.. [2] Mack, G.A., Wolfe, D. A. (1981) K-sample rank tests for umbrella
alternatives. J. Amer. Statist. Assoc., 76, 175-181.
Examples
--------
>>> x = np.array([[10,'a'], [59,'a'], [76,'b'], [10, 'b']])
>>> sp.posthoc_mackwolfe(x, val_col = 0, group_col = 1)
'''
x = __convert_to_df(a, val_col, group_col)
if not sort:
x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
x.sort_values(by=[group_col], ascending=True, inplace=True)
k = x[group_col].unique().size
if p:
if p > k:
print("Selected 'p' > number of groups:", str(p), " > ", str(k))
return False
elif p < 1:
print("Selected 'p' < 1: ", str(p))
return False
Rij = x[val_col].rank()
n = x.groupby(group_col)[val_col].count()
def _fn(Ri, Rj):
return np.sum(Ri.apply(lambda x: Rj[Rj > x].size))
def _ustat(Rij, g, k):
levels = np.unique(g)
U = np.identity(k)
for i in range(k):
for j in range(i):
U[i,j] = _fn(Rij[x[group_col] == levels[i]], Rij[x[group_col] == levels[j]])
U[j,i] = _fn(Rij[x[group_col] == levels[j]], Rij[x[group_col] == levels[i]])
return U
def _ap(p, U):
tmp1 = 0
if p > 0:
for i in range(p):
for j in range(i+1, p+1):
tmp1 += U[i,j]
tmp2 = 0
if p < k:
for i in range(p, k):
for j in range(i+1, k):
tmp2 += U[j,i]
return tmp1 + tmp2
def _n1(p, n):
return np.sum(n[:p+1])
def _n2(p, n):
return np.sum(n[p:k])
def _mean_at(p, n):
N1 = _n1(p, n)
N2 = _n2(p, n)
return (N1**2 + N2**2 - np.sum(n**2) - n.iloc[p]**2)/4
def _var_at(p, n):
N1 = _n1(p, n)
N2 = _n2(p, n)
N = np.sum(n)
var = (2 * (N1**3 + N2**3) + 3 * (N1**2 + N2**2) -\
np.sum(n**2 * (2*n + 3)) - n.iloc[p]**2 * (2 * n.iloc[p] + 3) +\
12. * n.iloc[p] * N1 * N2 - 12. * n.iloc[p] ** 2 * N) / 72.
return var
if p:
if (x.groupby(val_col).count() > 1).any().any():
print("Ties are present")
U = _ustat(Rij, x[group_col], k)
est = _ap(p, U)
mean = _mean_at(p, n)
sd = np.sqrt(_var_at(p, n))
stat = (est - mean)/sd
p_value = ss.norm.sf(stat)
else:
U = _ustat(Rij, x[group_col], k)
Ap = np.array([_ap(i, U) for i in range(k)]).ravel()
mean = np.array([_mean_at(i, n) for i in range(k)]).ravel()
var = np.array([_var_at(i, n) for i in range(k)]).ravel()
A = (Ap - mean) / np.sqrt(var)
stat = np.max(A)
p = A == stat
est = None
mt = []
for i in range(n_perm):
ix = Series(np.random.permutation(Rij))
Uix = _ustat(ix, x[group_col], k)
Apix = np.array([_ap(i, Uix) for i in range(k)])
Astarix = (Apix - mean) / np.sqrt(var)
mt.append(np.max(Astarix))
mt = np.array(mt)
p_value = mt[mt > stat] / n_perm
return p_value, stat
def posthoc_vanwaerden(a, val_col=None, group_col=None, sort=True, p_adjust=None):
'''Van der Waerden's test for pairwise multiple comparisons between group
levels. See references for additional information [1]_, [2]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
For one-factorial designs with samples that do not meet the assumptions for
one-way-ANOVA and subsequent post hoc tests, the van der Waerden test using
normal scores can be employed. Provided that significant differences were
detected by this global test, one may be interested in applying post hoc
tests according to van der Waerden for pairwise multiple comparisons of the
group levels.
There is no tie correction applied in this function.
References
----------
.. [1] W. J. Conover and R. L. Iman (1979), On multiple-comparisons procedures,
Tech. Rep. LA-7677-MS, Los Alamos Scientific Laboratory.
.. [2] B. L. van der Waerden (1952) Order tests for the two-sample problem and
their power, Indagationes Mathematicae, 14, 453-458.
Examples
--------
>>> x = np.array([[10,'a'], [59,'a'], [76,'b'], [10, 'b']])
>>> sp.posthoc_vanwaerden(x, val_col = 0, group_col = 1)
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = x[_group_col].unique()
n = x[_val_col].size
k = groups.size
r = ss.rankdata(x[_val_col])
x['z_scores'] = ss.norm.ppf(r / (n + 1))
aj = x.groupby(_group_col)['z_scores'].sum()
nj = x.groupby(_group_col)['z_scores'].count()
s2 = (1. / (n - 1.)) * (x['z_scores'] ** 2.).sum()
sts = (1. / s2) * np.sum(aj ** 2. / nj)
param = k - 1
A = aj / nj
vs = np.zeros((k, k), dtype=np.float)
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
def compare_stats(i, j):
dif = np.abs(A[groups[i]] - A[groups[j]])
B = 1. / nj[groups[i]] + 1. / nj[groups[j]]
tval = dif / np.sqrt(s2 * (n - 1. - sts)/(n - k) * B)
pval = 2. * ss.t.sf(np.abs(tval), df = n - k)
return pval
for i, j in combs:
vs[i, j] = compare_stats(i, j)
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_ttest(a, val_col=None, group_col=None, pool_sd=False, equal_var=True, p_adjust=None, sort=True):
'''Pairwise T test for multiple comparisons of independent groups. May be
used after a parametric ANOVA to do pairwise comparisons.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame. Array must be two-dimensional.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
equal_var : bool, optional
If True (default), perform a standard independent test
that assumes equal population variances [1]_.
If False, perform Welch's t-test, which does not assume equal
population variance [2]_.
pool_sd : bool, optional
Calculate a common SD for all groups and use that for all
comparisons (this can be useful if some groups are small).
This method does not actually call scipy ttest_ind() function,
so extra arguments are ignored. Default is False.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by group_col or not. Recommended
unless you sort your data manually.
Returns
-------
Numpy ndarray or pandas DataFrame of p values depending on input
data type.
References
----------
.. [1] http://en.wikipedia.org/wiki/T-test#Independent_two-sample_t-test
.. [2] http://en.wikipedia.org/wiki/Welch%27s_t_test
Examples
--------
>>> x = [[1,2,3,5,1], [12,31,54, np.nan], [10,12,6,74,11]]
>>> sp.posthoc_ttest(x, p_adjust = 'holm')
array([[-1. , 0.04600899, 0.31269089],
[ 0.04600899, -1. , 0.6327077 ],
[ 0.31269089, 0.6327077 , -1. ]])
'''
if isinstance(a, DataFrame):
x = a.copy()
if not sort:
x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
x.sort_values(by=[group_col, val_col], ascending=True, inplace=True)
x_lens = x.groupby(by=group_col)[val_col].count().values
x_lens_cumsum = np.insert(np.cumsum(x_lens), 0, 0)[:-1]
x_grouped = np.array([x[val_col][j:(j + x_lens[i])] for i, j in enumerate(x_lens_cumsum)])
#x_grouped = [x.loc[v, val_col].values.tolist() for g, v in x.groupby(group_col, sort=False).groups.items()]
else:
x = np.array(a)
x_grouped = np.array([np.asarray(a)[~np.isnan(a)] for a in x])
x_lens = np.asarray([len(a) for a in x_grouped])
x_lens_cumsum = np.insert(np.cumsum(x_lens), 0, 0)[:-1]
if any(x_lens == 0):
raise ValueError("All groups must contain data")
x_len = len(x_grouped)
vs = np.zeros((x_len, x_len), dtype=np.float)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
def compare_pooled(i, j):
diff = x_means[i] - x_means[j]
se_diff = pooled_sd * np.sqrt(1 / x_lens[i] + 1 / x_lens[j])
t_value = diff / se_diff
return 2 * ss.t.cdf(-np.abs(t_value), x_totaldegf)
combs = it.combinations(range(x_len), 2)
if pool_sd:
x_means = np.asarray([np.mean(xi) for xi in x_grouped])
x_sd = np.asarray([np.std(xi, ddof=1) for xi in x_grouped])
x_degf = x_lens - 1
x_totaldegf = np.sum(x_degf)
pooled_sd = np.sqrt(np.sum(x_sd ** 2 * x_degf) / x_totaldegf)
for i, j in combs:
vs[i, j] = compare_pooled(i, j)
else:
for i,j in combs:
vs[i, j] = ss.ttest_ind(x_grouped[i], x_grouped[j], equal_var=equal_var)[1]
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
if isinstance(x, DataFrame):
groups_unique = x[group_col].unique()
return DataFrame(vs, index=groups_unique, columns=groups_unique)
else:
return vs
def posthoc_tukey_hsd(x, g, alpha=0.05):
'''Pairwise comparisons with TukeyHSD confidence intervals. This is a
convenience function to make statsmodels `pairwise_tukeyhsd` method more
applicable for further use.
Parameters
----------
x : array_like or pandas Series object, 1d
An array, any object exposing the array interface, containing dependent
variable values (test or response variable). Values should have a
non-nominal scale. NaN values will cause an error (please handle
manually).
g : array_like or pandas Series object, 1d
An array, any object exposing the array interface, containing
independent variable values (grouping or predictor variable). Values
should have a nominal scale (categorical).
alpha : float, optional
Significance level for the test. Default is 0.05.
Returns
-------
result : pandas DataFrame
DataFrame with 0, 1, and -1 values, where 0 is False (not significant),
1 is True (significant), and -1 is for diagonal elements.
Examples
--------
>>> x = [[1,2,3,4,5], [35,31,75,40,21], [10,6,9,6,1]]
>>> g = [['a'] * 5, ['b'] * 5, ['c'] * 5]
>>> sp.posthoc_tukey_hsd(np.concatenate(x), np.concatenate(g))
'''
result = pairwise_tukeyhsd(x, g, alpha=0.05)
groups = np.array(result.groupsunique, dtype=np.str)
groups_len = len(groups)
vs = np.zeros((groups_len, groups_len), dtype=np.int)
for a in result.summary()[1:]:
a0 = str(a[0])
a1 = str(a[1])
a0i = np.where(groups == a0)[0][0]
a1i = np.where(groups == a1)[0][0]
vs[a0i, a1i] = 1 if str(a[5]) == 'True' else 0
vs = np.triu(vs)
np.fill_diagonal(vs, -1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[tri_lower] = vs.T[tri_lower]
return DataFrame(vs, index=groups, columns=groups)
def posthoc_mannwhitney(a, val_col=None, group_col=None, use_continuity=True, alternative='two-sided', p_adjust=None, sort=True):
'''Pairwise comparisons with Mann-Whitney rank test.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame. Array must be two-dimensional.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
use_continuity : bool, optional
Whether a continuity correction (1/2.) should be taken into account.
Default is True.
alternative : ['two-sided', 'less', or 'greater'], optional
Whether to get the p-value for the one-sided hypothesis
('less' or 'greater') or for the two-sided hypothesis ('two-sided').
Defaults to 'two-sided'.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by group_col or not. Recommended
unless you sort your data manually.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
Refer to `scipy.stats.mannwhitneyu` reference page for further details.
Examples
--------
>>> x = [[1,2,3,4,5], [35,31,75,40,21], [10,6,9,6,1]]
>>> sp.posthoc_mannwhitney(x, p_adjust = 'holm')
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col, _val_col], ascending=True, inplace=True)
groups = np.unique(x[_group_col])
x_len = groups.size
vs = np.zeros((x_len, x_len))
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(x_len), 2)
for i,j in combs:
vs[i, j] = ss.mannwhitneyu(x.loc[x[_group_col] == groups[i], _val_col],
x.loc[x[_group_col] == groups[j], _val_col],
use_continuity=use_continuity,
alternative=alternative)[1]
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_wilcoxon(a, val_col=None, group_col=None, zero_method='wilcox', correction=False, p_adjust=None, sort=False):
'''Pairwise comparisons with Wilcoxon signed-rank test. It is a non-parametric
version of the paired T-test for use with non-parametric ANOVA.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame. Array must be two-dimensional.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
zero_method : string, {"pratt", "wilcox", "zsplit"}, optional
"pratt": Pratt treatment, includes zero-differences in the ranking
process (more conservative)
"wilcox": Wilcox treatment, discards all zero-differences
"zsplit": Zero rank split, just like Pratt, but spliting the zero rank
between positive and negative ones
correction : bool, optional
If True, apply continuity correction by adjusting the Wilcoxon rank
statistic by 0.5 towards the mean value when computing the z-statistic.
Default is False.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by group_col and val_col or not.
Default is False.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
Refer to `scipy.stats.wilcoxon` reference page for further details.
Examples
--------
>>> x = [[1,2,3,4,5], [35,31,75,40,21], [10,6,9,6,1]]
>>> sp.posthoc_wilcoxon(x)
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
#x.sort_values(by=[_group_col, _val_col], ascending=True, inplace=True)
groups = np.unique(x[_group_col])
x_len = groups.size
vs = np.zeros((x_len, x_len))
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(x_len), 2)
for i,j in combs:
vs[i, j] = ss.wilcoxon(x.loc[x[_group_col] == groups[i], _val_col],
x.loc[x[_group_col] == groups[j], _val_col],
zero_method=zero_method, correction=correction)[1]
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method=p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_scheffe(a, val_col=None, group_col=None, sort=False, p_adjust=None):
'''Scheffe's all-pairs comparisons test for normally distributed data with equal
group variances. For all-pairs comparisons in an one-factorial layout with
normally distributed residuals and equal variances Scheffe's test can be
performed with parametric ANOVA [1]_, [2]_, [3]_.
A total of m = k(k-1)/2 hypotheses can be tested.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
The p values are computed from the F-distribution.
References
----------
.. [1] J. Bortz (1993) Statistik für Sozialwissenschaftler. 4. Aufl., Berlin:
Springer.
.. [2] L. Sachs (1997) Angewandte Statistik, New York: Springer.
.. [3] H. Scheffe (1953) A Method for Judging all Contrasts in the Analysis
of Variance. Biometrika 40, 87-110.
Examples
--------
>>> import scikit_posthocs as sp
>>> import pandas as pd
>>> x = pd.DataFrame({"a": [1,2,3,5,1], "b": [12,31,54,62,12], "c": [10,12,6,74,11]})
>>> x = x.melt(var_name='groups', value_name='values')
>>> sp.posthoc_scheffe(x, val_col='values', group_col='groups')
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = np.unique(x[_group_col])
x_grouped = x.groupby(_group_col)[_val_col]
ni = x_grouped.count()
n = ni.sum()
xi = x_grouped.mean()
si = x_grouped.var()
sin = 1. / (n - groups.size) * np.sum(si * (ni - 1.))
def compare(i, j):
dif = xi.loc[i] - xi.loc[j]
A = sin * (1. / ni[i] + 1. / ni[j]) * (groups.size - 1.)
f_val = dif ** 2. / A
return f_val
vs = np.zeros((groups.size, groups.size), dtype=np.float)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(groups.size), 2)
for i,j in combs:
vs[i, j] = compare(groups[i], groups[j])
vs[tri_lower] = vs.T[tri_lower]
p_values = ss.f.sf(vs, groups.size - 1., n - groups.size)
np.fill_diagonal(p_values, -1)
return DataFrame(p_values, index=groups, columns=groups)
def posthoc_tamhane(a, val_col=None, group_col=None, welch=True, sort=False):
'''Tamhane's T2 all-pairs comparison test for normally distributed data with
unequal variances. Tamhane's T2 test can be performed for all-pairs
comparisons in an one-factorial layout with normally distributed residuals
but unequal groups variances. A total of m = k(k-1)/2 hypotheses can be
tested. The null hypothesis is tested in the two-tailed test against the
alternative hypothesis [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
welch : bool, optional
If True, use Welch's approximate solution for calculating the degree of
freedom. T2 test uses the usual df = N - 2 approximation.
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
The p values are computed from the t-distribution and adjusted according to
Dunn-Sidak.
References
----------
.. [1] A.C. Tamhane (1979), A Comparison of Procedures for Multiple Comparisons of
Means with Unequal Variances. Journal of the American Statistical Association,
74, 471-480.
Examples
--------
>>> import scikit_posthocs as sp
>>> import pandas as pd
>>> x = pd.DataFrame({"a": [1,2,3,5,1], "b": [12,31,54,62,12], "c": [10,12,6,74,11]})
>>> x = x.melt(var_name='groups', value_name='values')
>>> sp.posthoc_tamhane(x, val_col='values', group_col='groups')
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = x[_group_col].unique()
x_grouped = x.groupby(_group_col)[_val_col]
ni = x_grouped.count()
n = ni.sum()
xi = x_grouped.mean()
si = x_grouped.var()
sin = 1. / (n - groups.size) * np.sum(si * (ni - 1))
def compare(i, j):
dif = xi[i] - xi[j]
A = si[i] / ni[i] + si[j] / ni[j]
t_val = dif / np.sqrt(A)
if welch:
df = A ** 2. / (si[i] ** 2. / (ni[i] ** 2. * (ni[i] - 1.)) + si[j] ** 2. / (ni[j] ** 2. * (ni[j] - 1.)))
else:
## checks according to Tamhane (1979, p. 474)
ok1 = (9./10. <= ni[i]/ni[j]) and (ni[i]/ni[j] <= 10./9.)
ok2 = (9./10. <= (s2i[i] / ni[i]) / (s2i[j] / ni[j])) and ((s2i[i] / ni[i]) / (s2i[j] / ni[j]) <= 10./9.)
ok3 = (4./5. <= ni[i]/ni[j]) and (ni[i]/ni[j] <= 5./4.) and (1./2. <= (s2i[i] / ni[i]) / (s2i[j] / ni[j])) and ((s2i[i] / ni[i]) / (s2i[j] / ni[j]) <= 2.)
ok4 = (2./3. <= ni[i]/ni[j]) and (ni[i]/ni[j] <= 3./2.) and (3./4. <= (s2i[i] / ni[i]) / (s2i[j] / ni[j])) and ((s2i[i] / ni[i]) / (s2i[j] / ni[j]) <= 4./3.)
OK = any(ok1, ok2, ok3, ok4)
if not OK:
print("Sample sizes or standard errors are not balanced. T2 test is recommended.")
df = ni[i] + ni[j] - 2.
p_val = 2. * ss.t.sf(np.abs(t_val), df=df)
return p_val
vs = np.zeros((groups.size, groups.size), dtype=np.float)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(groups.size), 2)
for i,j in combs:
vs[i, j] = compare(groups[i], groups[j])
vs[tri_upper] = 1. - (1. - vs[tri_upper]) ** groups.size
vs[tri_lower] = vs.T[tri_lower]
vs[vs > 1] = 1
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_tukey(a, val_col = None, group_col = None, sort = False):
'''Performs Tukey's all-pairs comparisons test for normally distributed data
with equal group variances. For all-pairs comparisons in an
one-factorial layout with normally distributed residuals and equal variances
Tukey's test can be performed. A total of m = k(k-1)/2 hypotheses can be
tested. The null hypothesis is tested in the two-tailed test against
the alternative hypothesis [1]_, [2]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
The p values are computed from the Tukey-distribution.
References
----------
.. [1] L. Sachs (1997) Angewandte Statistik, New York: Springer.
.. [2] J. Tukey (1949) Comparing Individual Means in the Analysis of Variance,
Biometrics 5, 99-114.
Examples
--------
>>> import scikit_posthocs as sp
>>> import pandas as pd
>>> x = pd.DataFrame({"a": [1,2,3,5,1], "b": [12,31,54,62,12], "c": [10,12,6,74,11]})
>>> x = x.melt(var_name='groups', value_name='values')
>>> sp.posthoc_tukey(x, val_col='values', group_col='groups')
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = np.unique(x[_group_col])
x_grouped = x.groupby(_group_col)[_val_col]
ni = x_grouped.count()
n = ni.sum()
xi = x_grouped.mean()
si = x_grouped.var()
sin = 1. / (n - groups.size) * np.sum(si * (ni - 1))
def compare(i, j):
dif = xi[i] - xi[j]
A = sin * 0.5 * (1. / ni.loc[i] + 1. / ni.loc[j])
q_val = dif / np.sqrt(A)
return q_val
vs = np.zeros((groups.size, groups.size), dtype=np.float)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(groups.size), 2)
for i,j in combs:
vs[i, j] = compare(groups[i], groups[j])
vs[tri_upper] = psturng(np.abs(vs[tri_upper]), groups.size, n - groups.size)
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_dscf(a, val_col=None, group_col=None, sort=False):
'''Dwass, Steel, Critchlow and Fligner all-pairs comparison test for a
one-factorial layout with non-normally distributed residuals. As opposed to
the all-pairs comparison procedures that depend on Kruskal ranks, the DSCF
test is basically an extension of the U-test as re-ranking is conducted for
each pairwise test [1]_, [2]_, [3]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
Pandas DataFrame containing p values.
Notes
-----
The p values are computed from the Tukey-distribution.
References
----------
.. [1] Douglas, C. E., Fligner, A. M. (1991) On distribution-free multiple
comparisons in the one-way analysis of variance, Communications in
Statistics - Theory and Methods, 20, 127-139.
.. [2] Dwass, M. (1960) Some k-sample rank-order tests. In Contributions to
Probability and Statistics, Edited by: I. Olkin, Stanford: Stanford
University Press.
.. [3] Steel, R. G. D. (1960) A rank sum test for comparing all pairs of
treatments, Technometrics, 2, 197-207.
Examples
--------
>>> import scikit_posthocs as sp
>>> import pandas as pd
>>> x = pd.DataFrame({"a": [1,2,3,5,1], "b": [12,31,54,62,12], "c": [10,12,6,74,11]})
>>> x = x.melt(var_name='groups', value_name='values')
>>> sp.posthoc_dscf(x, val_col='values', group_col='groups')
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = np.unique(x[_group_col])
x_grouped = x.groupby(_group_col)[_val_col]
n = x_grouped.count()
k = groups.size
def get_ties(x):
t = x.value_counts().values
c = np.sum((t ** 3 - t) / 12.)
return c
def compare(i, j):
ni = n.loc[i]
nj = n.loc[j]
x_raw = x.loc[(x[_group_col] == i) | (x[_group_col] == j)].copy()
x_raw['ranks'] = x_raw.loc[:, _val_col].rank()
r = x_raw.groupby(_group_col)['ranks'].sum().loc[[i, j]]
u = np.array([nj * ni + (nj * (nj + 1) / 2),
nj * ni + (ni * (ni + 1) / 2)]) - r
u_min = np.min(u)
s = ni + nj
var = (nj*ni/(s*(s - 1.))) * ((s**3 - s)/12. - get_ties(x_raw['ranks']))
p = np.sqrt(2.) * (u_min - nj * ni / 2.) / np.sqrt(var)
return p
vs = np.zeros((k, k))
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(k), 2)
for i, j in combs:
vs[i, j] = compare(groups[i], groups[j])
vs[tri_upper] = psturng(np.abs(vs[tri_upper]), k, np.inf)
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
|
maximtrp/scikit-posthocs | scikit_posthocs/_posthocs.py | posthoc_conover_friedman | python | def posthoc_conover_friedman(a, y_col=None, block_col=None, group_col=None, melted=False, sort=False, p_adjust=None):
'''Calculate pairwise comparisons using Conover post hoc test for unreplicated
blocked data. This test is usually conducted post hoc after
significant results of the Friedman test. The statistics refer to
the Student t distribution [1]_, [2]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of
block design, i.e. rows are blocks, and columns are groups. In this
case you do not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (strings).
y_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values. See statsmodels.sandbox.stats.multicomp
for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
'single-step' : uses Tukey distribution for multiple comparisons
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
A one-way ANOVA with repeated measures that is also referred to as ANOVA
with unreplicated block design can also be conducted via the
friedman.test. The consequent post hoc pairwise multiple comparison test
according to Conover is conducted with this function.
If y is a matrix, than the columns refer to the treatment and the rows
indicate the block.
References
----------
.. [1] W. J. Conover and R. L. Iman (1979), On multiple-comparisons procedures,
Tech. Rep. LA-7677-MS, Los Alamos Scientific Laboratory.
.. [2] W. J. Conover (1999), Practical nonparametric Statistics, 3rd. Edition,
Wiley.
Examples
--------
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_conover_friedman(x)
'''
if melted and not all([block_col, group_col, y_col]):
raise ValueError('block_col, group_col, y_col should be explicitly specified if using melted data')
def compare_stats(i, j):
dif = np.abs(R.loc[groups[i]] - R.loc[groups[j]])
tval = dif / np.sqrt(A) / np.sqrt(B)
pval = 2. * ss.t.sf(np.abs(tval), df = (m*n*k - k - n + 1))
return pval
def compare_tukey(i, j):
dif = np.abs(R.loc[groups[i]] - R.loc[groups[j]])
qval = np.sqrt(2.) * dif / (np.sqrt(A) * np.sqrt(B))
pval = psturng(qval, k, np.inf)
return pval
x, _y_col, _group_col, _block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
#if not sort:
# x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
# x[block_col] = Categorical(x[block_col], categories=x[block_col].unique(), ordered=True)
x.sort_values(by=[_group_col,_block_col], ascending=True, inplace=True)
x.dropna(inplace=True)
groups = np.unique(x[_group_col])
k = groups.size
n = x[_block_col].unique().size
x['mat'] = x.groupby(_block_col)[_y_col].rank()
R = x.groupby(_group_col)['mat'].sum()
A1 = (x['mat'] ** 2).sum()
m = 1
S2 = m/(m*k - 1.) * (A1 - m*k*n*(m*k + 1.)**2./4.)
T2 = 1 / S2 * (np.sum(R) - n * m * ((m * k + 1.) / 2.)**2.)
A = S2 * (2. * n * (m * k - 1.)) / ( m * n * k - k - n + 1.)
B = 1. - T2 / (n * (m * k - 1.))
vs = np.zeros((k, k))
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
if p_adjust == 'single-step':
for i, j in combs:
vs[i, j] = compare_tukey(i, j)
else:
for i, j in combs:
vs[i, j] = compare_stats(i, j)
if p_adjust is not None:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups) | Calculate pairwise comparisons using Conover post hoc test for unreplicated
blocked data. This test is usually conducted post hoc after
significant results of the Friedman test. The statistics refer to
the Student t distribution [1]_, [2]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of
block design, i.e. rows are blocks, and columns are groups. In this
case you do not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (strings).
y_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values. See statsmodels.sandbox.stats.multicomp
for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
'single-step' : uses Tukey distribution for multiple comparisons
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
A one-way ANOVA with repeated measures that is also referred to as ANOVA
with unreplicated block design can also be conducted via the
friedman.test. The consequent post hoc pairwise multiple comparison test
according to Conover is conducted with this function.
If y is a matrix, than the columns refer to the treatment and the rows
indicate the block.
References
----------
.. [1] W. J. Conover and R. L. Iman (1979), On multiple-comparisons procedures,
Tech. Rep. LA-7677-MS, Los Alamos Scientific Laboratory.
.. [2] W. J. Conover (1999), Practical nonparametric Statistics, 3rd. Edition,
Wiley.
Examples
--------
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_conover_friedman(x) | train | https://github.com/maximtrp/scikit-posthocs/blob/5476b09e2a325cd4e31c0b0bc6906ab5cd77fc5d/scikit_posthocs/_posthocs.py#L599-L742 | [
"def __convert_to_block_df(a, y_col=None, group_col=None, block_col=None, melted=False):\n\n if isinstance(a, DataFrame) and not melted:\n x = a.copy(deep=True)\n group_col = 'groups'\n block_col = 'blocks'\n y_col = 'y'\n x.columns.name = group_col\n x.index.name = bloc... | # -*- coding: utf-8 -*-
import numpy as np
import scipy.stats as ss
import itertools as it
from statsmodels.sandbox.stats.multicomp import multipletests
from statsmodels.stats.multicomp import pairwise_tukeyhsd
from statsmodels.stats.libqsturng import psturng
from pandas import DataFrame, Categorical, Series
def __convert_to_df(a, val_col=None, group_col=None, val_id=None, group_id=None):
'''Hidden helper method to create a DataFrame with input data for further
processing.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas DataFrame.
Array must be two-dimensional. Second dimension may vary,
i.e. groups may have different lengths.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
val_id : int, optional
Index of a column that contains dependent variable values (test or
response variable). Should be specified if a NumPy ndarray is used as an
input. It will be inferred from data, if not specified.
group_id : int, optional
Index of a column that contains independent variable values (grouping or
predictor variable). Should be specified if a NumPy ndarray is used as
an input. It will be inferred from data, if not specified.
Returns
-------
x : pandas DataFrame
DataFrame with input data, `val_col` column contains numerical values and
`group_col` column contains categorical values.
val_col : str
Name of a DataFrame column that contains dependent variable values (test
or response variable).
group_col : str
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable).
Notes
-----
Inferrence algorithm for determining `val_id` and `group_id` args is rather
simple, so it is better to specify them explicitly to prevent errors.
'''
if not group_col:
group_col = 'groups'
if not val_col:
val_col = 'vals'
if isinstance(a, DataFrame):
x = a.copy()
if not {group_col, val_col}.issubset(a.columns):
raise ValueError('Specify correct column names using `group_col` and `val_col` args')
return x, val_col, group_col
elif isinstance(a, list) or (isinstance(a, np.ndarray) and not a.shape.count(2)):
grps_len = map(len, a)
grps = list(it.chain(*[[i+1] * l for i, l in enumerate(grps_len)]))
vals = list(it.chain(*a))
return DataFrame({val_col: vals, group_col: grps}), val_col, group_col
elif isinstance(a, np.ndarray):
# cols ids not defined
# trying to infer
if not(all([val_id, group_id])):
if np.argmax(a.shape):
a = a.T
ax = [np.unique(a[:, 0]).size, np.unique(a[:, 1]).size]
if np.asscalar(np.diff(ax)):
__val_col = np.argmax(ax)
__group_col = np.argmin(ax)
else:
raise ValueError('Cannot infer input format.\nPlease specify `val_id` and `group_id` args')
cols = {__val_col: val_col,
__group_col: group_col}
else:
cols = {val_id: val_col,
group_id: group_col}
cols_vals = dict(sorted(cols.items())).values()
return DataFrame(a, columns=cols_vals), val_col, group_col
def __convert_to_block_df(a, y_col=None, group_col=None, block_col=None, melted=False):
if isinstance(a, DataFrame) and not melted:
x = a.copy(deep=True)
group_col = 'groups'
block_col = 'blocks'
y_col = 'y'
x.columns.name = group_col
x.index.name = block_col
x = x.reset_index().melt(id_vars=block_col, var_name=group_col, value_name=y_col)
elif melted:
x = DataFrame.from_dict({'groups': a[group_col],
'blocks': a[block_col],
'y': a[y_col]})
elif not isinstance(a, DataFrame):
x = np.array(a)
x = DataFrame(x, index=np.arange(x.shape[0]), columns=np.arange(x.shape[1]))
if not melted:
group_col = 'groups'
block_col = 'blocks'
y_col = 'y'
x.columns.name = group_col
x.index.name = block_col
x = x.reset_index().melt(id_vars=block_col, var_name=group_col, value_name=y_col)
else:
x.columns[group_col] = 'groups'
x.columns[block_col] = 'blocks'
x.columns[y_col] = 'y'
group_col = 'groups'
block_col = 'blocks'
y_col = 'y'
return x, 'y', 'groups', 'blocks'
def posthoc_conover(a, val_col=None, group_col=None, p_adjust=None, sort=True):
'''Post hoc pairwise test for multiple comparisons of mean rank sums
(Conover's test). May be used after Kruskal-Wallis one-way analysis of
variance by ranks to do pairwise comparisons [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas DataFrame.
Array must be two-dimensional. Second dimension may vary,
i.e. groups may have different lengths.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
p_adjust : str, optional
Method for adjusting p values. See `statsmodels.sandbox.stats.multicomp`
for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by `group_col` or not. Recommended
unless you sort your data manually.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
A tie correction are employed according to Conover [1]_.
References
----------
.. [1] W. J. Conover and R. L. Iman (1979), On multiple-comparisons procedures,
Tech. Rep. LA-7677-MS, Los Alamos Scientific Laboratory.
Examples
--------
>>> x = [[1,2,3,5,1], [12,31,54, np.nan], [10,12,6,74,11]]
>>> sp.posthoc_conover(x, p_adjust = 'holm')
'''
def compare_conover(i, j):
diff = np.abs(x_ranks_avg.loc[i] - x_ranks_avg.loc[j])
B = (1. / x_lens.loc[i] + 1. / x_lens.loc[j])
D = (n - 1. - H_cor) / (n - x_len)
t_value = diff / np.sqrt(S2 * B * D)
p_value = 2. * ss.t.sf(np.abs(t_value), df = n - x_len)
return p_value
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col, _val_col], ascending=True, inplace=True)
n = len(x.index)
x_groups_unique = np.unique(x[_group_col])
x_len = x_groups_unique.size
x_lens = x.groupby(_group_col)[_val_col].count()
x['ranks'] = x[_val_col].rank()
x_ranks_avg = x.groupby(_group_col)['ranks'].mean()
x_ranks_sum = x.groupby(_group_col)['ranks'].sum()
# ties
vals = x.groupby('ranks').count()[_val_col].values
tie_sum = np.sum(vals[vals != 1] ** 3 - vals[vals != 1])
tie_sum = 0 if not tie_sum else tie_sum
x_ties = np.min([1., 1. - tie_sum / (n ** 3. - n)])
H = (12. / (n * (n + 1.))) * np.sum(x_ranks_sum**2 / x_lens) - 3. * (n + 1.)
H_cor = H / x_ties
if x_ties == 1:
S2 = n * (n + 1.) / 12.
else:
S2 = (1. / (n - 1.)) * (np.sum(x['ranks'] ** 2.) - (n * (((n + 1.)**2.) / 4.)))
vs = np.zeros((x_len, x_len))
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(x_len), 2)
for i, j in combs:
vs[i, j] = compare_conover(x_groups_unique[i], x_groups_unique[j])
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=x_groups_unique, columns=x_groups_unique)
def posthoc_dunn(a, val_col=None, group_col=None, p_adjust=None, sort=True):
'''Post hoc pairwise test for multiple comparisons of mean rank sums
(Dunn's test). May be used after Kruskal-Wallis one-way analysis of
variance by ranks to do pairwise comparisons [1]_, [2]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas DataFrame.
Array must be two-dimensional. Second dimension may vary,
i.e. groups may have different lengths.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
p_adjust : str, optional
Method for adjusting p values. See `statsmodels.sandbox.stats.multicomp`
for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by group_col or not. Recommended
unless you sort your data manually.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
A tie correction will be employed according to Glantz (2012).
References
----------
.. [1] O.J. Dunn (1964). Multiple comparisons using rank sums.
Technometrics, 6, 241-252.
.. [2] S.A. Glantz (2012), Primer of Biostatistics. New York: McGraw Hill.
Examples
--------
>>> x = [[1,2,3,5,1], [12,31,54, np.nan], [10,12,6,74,11]]
>>> sp.posthoc_dunn(x, p_adjust = 'holm')
'''
def compare_dunn(i, j):
diff = np.abs(x_ranks_avg.loc[i] - x_ranks_avg.loc[j])
A = n * (n + 1.) / 12.
B = (1. / x_lens.loc[i] + 1. / x_lens.loc[j])
z_value = diff / np.sqrt((A - x_ties) * B)
p_value = 2. * ss.norm.sf(np.abs(z_value))
return p_value
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col, _val_col], ascending=True, inplace=True)
n = len(x.index)
x_groups_unique = np.unique(x[_group_col])
x_len = x_groups_unique.size
x_lens = x.groupby(_group_col)[_val_col].count()
x['ranks'] = x[_val_col].rank()
x_ranks_avg = x.groupby(_group_col)['ranks'].mean()
# ties
vals = x.groupby('ranks').count()[_val_col].values
tie_sum = np.sum(vals[vals != 1] ** 3 - vals[vals != 1])
tie_sum = 0 if not tie_sum else tie_sum
x_ties = tie_sum / (12. * (n - 1))
vs = np.zeros((x_len, x_len))
combs = it.combinations(range(x_len), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
for i,j in combs:
vs[i, j] = compare_dunn(x_groups_unique[i], x_groups_unique[j])
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=x_groups_unique, columns=x_groups_unique)
def posthoc_nemenyi(a, val_col=None, group_col=None, dist='chi', sort=True):
'''Post hoc pairwise test for multiple comparisons of mean rank sums
(Nemenyi's test). May be used after Kruskal-Wallis one-way analysis of
variance by ranks to do pairwise comparisons [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame. Array must be two-dimensional. Second dimension may vary,
i.e. groups may have different lengths.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
dist : str, optional
Method for determining the p value. The default distribution is "chi"
(chi-squared), else "tukey" (studentized range).
sort : bool, optional
Specifies whether to sort DataFrame by group_col or not. Recommended
unless you sort your data manually.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
A tie correction will be employed according to Glantz (2012).
References
----------
.. [1] Lothar Sachs (1997), Angewandte Statistik. Berlin: Springer.
Pages: 395-397, 662-664.
Examples
--------
>>> x = [[1,2,3,5,1], [12,31,54, np.nan], [10,12,6,74,11]]
>>> sp.posthoc_nemenyi(x)
'''
def compare_stats_chi(i, j):
diff = np.abs(x_ranks_avg.loc[i] - x_ranks_avg.loc[j])
A = n * (n + 1.) / 12.
B = (1. / x_lens.loc[i] + 1. / x_lens.loc[j])
chi = diff ** 2. / (A * B)
return chi
def compare_stats_tukey(i, j):
diff = np.abs(x_ranks_avg.loc[i] - x_ranks_avg.loc[j])
B = (1. / x_lens.loc[i] + 1. / x_lens.loc[j])
q = diff / np.sqrt((n * (n + 1.) / 12.) * B)
return q
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col, _val_col], ascending=True, inplace=True)
n = len(x.index)
x_groups_unique = np.unique(x[_group_col])
x_len = x_groups_unique.size
x_lens = x.groupby(_group_col)[_val_col].count()
x['ranks'] = x[_val_col].rank()
x_ranks_avg = x.groupby(_group_col)['ranks'].mean()
# ties
vals = x.groupby('ranks').count()[_val_col].values
tie_sum = np.sum(vals[vals != 1] ** 3 - vals[vals != 1])
tie_sum = 0 if not tie_sum else tie_sum
x_ties = np.min([1., 1. - tie_sum / (n ** 3. - n)])
vs = np.zeros((x_len, x_len))
combs = it.combinations(range(x_len), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
if dist == 'chi':
for i,j in combs:
vs[i, j] = compare_stats_chi(x_groups_unique[i], x_groups_unique[j]) / x_ties
vs[tri_upper] = ss.chi2.sf(vs[tri_upper], x_len - 1)
elif dist == 'tukey':
for i,j in combs:
vs[i, j] = compare_stats_tukey(x_groups_unique[i], x_groups_unique[j]) * np.sqrt(2.)
vs[tri_upper] = psturng(vs[tri_upper], x_len, np.inf)
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=x_groups_unique, columns=x_groups_unique)
def posthoc_nemenyi_friedman(a, y_col=None, block_col=None, group_col=None, melted=False, sort=False):
'''Calculate pairwise comparisons using Nemenyi post hoc test for
unreplicated blocked data. This test is usually conducted post hoc if
significant results of the Friedman's test are obtained. The statistics
refer to upper quantiles of the studentized range distribution (Tukey) [1]_,
[2]_, [3]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of
block design, i.e. rows are blocks, and columns are groups. In this
case you do not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (strings).
y_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
A one-way ANOVA with repeated measures that is also referred to as ANOVA
with unreplicated block design can also be conducted via Friedman's
test. The consequent post hoc pairwise multiple comparison test
according to Nemenyi is conducted with this function.
This function does not test for ties.
References
----------
.. [1] J. Demsar (2006), Statistical comparisons of classifiers over
multiple data sets, Journal of Machine Learning Research, 7, 1-30.
.. [2] P. Nemenyi (1963) Distribution-free Multiple Comparisons. Ph.D.
thesis, Princeton University.
.. [3] L. Sachs (1997), Angewandte Statistik. Berlin: Springer.
Pages: 668-675.
Examples
--------
>>> # Non-melted case, x is a block design matrix, i.e. rows are blocks
>>> # and columns are groups.
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_nemenyi_friedman(x)
'''
if melted and not all([block_col, group_col, y_col]):
raise ValueError('block_col, group_col, y_col should be explicitly specified if using melted data')
def compare_stats(i, j):
dif = np.abs(R[groups[i]] - R[groups[j]])
qval = dif / np.sqrt(k * (k + 1.) / (6. * n))
return qval
x, _y_col, _group_col, _block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
#if not sort:
# x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
# x[block_col] = Categorical(x[block_col], categories=x[block_col].unique(), ordered=True)
x.sort_values(by=[_group_col, _block_col], ascending=True, inplace=True)
x.dropna(inplace=True)
groups = x[_group_col].unique()
k = groups.size
n = x[_block_col].unique().size
x['mat'] = x.groupby(_block_col)[_y_col].rank()
R = x.groupby(_group_col)['mat'].mean()
vs = np.zeros((k, k))
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
for i, j in combs:
vs[i, j] = compare_stats(i, j)
vs *= np.sqrt(2.)
vs[tri_upper] = psturng(vs[tri_upper], k, np.inf)
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_npm_test(a, val_col=None, group_col=None, sort=False, p_adjust=None):
'''Calculate pairwise comparisons using Nashimoto and Wright's all-pairs
comparison procedure (NPM test) for simply ordered mean ranksums.
NPM test is basically an extension of Nemenyi's procedure for testing
increasingly ordered alternatives [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values. See `statsmodels.sandbox.stats.multicomp`
for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
The p values are estimated from the studentized range distribution. If
the medians are already increasingly ordered, than the NPM-test
simplifies to the ordinary Nemenyi test
References
----------
.. [1] Nashimoto, K., Wright, F.T., (2005), Multiple comparison procedures for
detecting differences in simply ordered means. Comput. Statist. Data
Anal. 48, 291--306.
Examples
--------
>>> x = np.array([[102,109,114,120,124],
[110,112,123,130,145],
[132,141,156,160,172]])
>>> sp.posthoc_npm_test(x)
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
#if not sort:
# x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = np.unique(x[_group_col])
x['ranks'] = x[_val_col].rank()
Ri = x.groupby(_group_col)['ranks'].mean()
ni = x.groupby(_group_col)[_val_col].count()
k = groups.size
n = x.shape[0]
sigma = np.sqrt(n * (n + 1) / 12.)
df = np.inf
def compare(m, u):
a = [(Ri.loc[groups[u]]-Ri.loc[groups[_mi]])/(sigma/np.sqrt(2)*np.sqrt(1./ni.loc[groups[_mi]] + 1./ni.loc[groups[u]])) for _mi in m]
return np.array(a)
stat = np.zeros((k, k))
for i in range(k-1):
for j in range(i+1, k):
u = j
m = np.arange(i, u)
tmp = compare(m, u)
stat[j, i] = np.max(tmp)
stat[stat < 0] = 0
p_values = psturng(stat, k, np.inf)
tri_upper = np.triu_indices(p_values.shape[0], 1)
tri_lower = np.tril_indices(p_values.shape[0], -1)
p_values[tri_lower] = p_values.T[tri_lower]
#if p_adjust:
# p_values[tri_upper] = multipletests(p_values[tri_upper], method = p_adjust)[1]
np.fill_diagonal(p_values, -1)
return DataFrame(p_values, index=groups, columns=groups)
def posthoc_siegel_friedman(a, y_col=None, block_col=None, group_col=None, melted=False, sort=False, p_adjust=None):
'''Siegel and Castellan's All-Pairs Comparisons Test for Unreplicated Blocked
Data. See authors' paper for additional information [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of
block design, i.e. rows are blocks, and columns are groups. In this
case you do not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (strings).
y_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values. See statsmodels.sandbox.stats.multicomp for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
For all-pairs comparisons in a two factorial unreplicated complete block design
with non-normally distributed residuals, Siegel and Castellan's test can be
performed on Friedman-type ranked data.
References
----------
.. [1] S. Siegel, N. J. Castellan Jr. (1988), Nonparametric Statistics for the
Behavioral Sciences. 2nd ed. New York: McGraw-Hill.
Examples
--------
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_siegel_friedman(x)
'''
if melted and not all([block_col, group_col, y_col]):
raise ValueError('block_col, group_col, y_col should be explicitly specified if using melted data')
def compare_stats(i, j):
dif = np.abs(R[groups[i]] - R[groups[j]])
zval = dif / np.sqrt(k * (k + 1.) / (6. * n))
return zval
x, y_col, group_col, block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
#if not sort:
# x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
# x[block_col] = Categorical(x[block_col], categories=x[block_col].unique(), ordered=True)
x.sort_values(by=[group_col,block_col], ascending=True, inplace=True)
x.dropna(inplace=True)
groups = x[group_col].unique()
k = groups.size
n = x[block_col].unique().size
x['mat'] = x.groupby(block_col)[y_col].rank()
R = x.groupby(group_col)['mat'].mean()
vs = np.zeros((k, k), dtype=np.float)
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
for i, j in combs:
vs[i, j] = compare_stats(i, j)
vs = 2. * ss.norm.sf(np.abs(vs))
vs[vs > 1] = 1.
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_miller_friedman(a, y_col=None, block_col=None, group_col=None, melted=False, sort=False):
'''Miller's All-Pairs Comparisons Test for Unreplicated Blocked Data.
The p-values are computed from the chi-square distribution [1]_, [2]_,
[3]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of
block design, i.e. rows are blocks, and columns are groups. In this
case you do not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (strings).
y_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
Pandas DataFrame containing p values.
Notes
-----
For all-pairs comparisons in a two factorial unreplicated complete block
design with non-normally distributed residuals, Miller's test can be
performed on Friedman-type ranked data.
References
----------
.. [1] J. Bortz J, G. A. Lienert, K. Boehnke (1990), Verteilungsfreie
Methoden in der Biostatistik. Berlin: Springerself.
.. [2] R. G. Miller Jr. (1996), Simultaneous statistical inference. New
York: McGraw-Hill.
.. [3] E. L. Wike (2006), Data Analysis. A Statistical Primer for Psychology
Students. New Brunswick: Aldine Transaction.
Examples
--------
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_miller_friedman(x)
'''
if melted and not all([block_col, group_col, y_col]):
raise ValueError('block_col, group_col, y_col should be explicitly specified if using melted data')
def compare_stats(i, j):
dif = np.abs(R[groups[i]] - R[groups[j]])
qval = dif / np.sqrt(k * (k + 1.) / (6. * n))
return qval
x, y_col, group_col, block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
#if not sort:
# x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
# x[block_col] = Categorical(x[block_col], categories=x[block_col].unique(), ordered=True)
x.sort_values(by=[group_col,block_col], ascending=True, inplace=True)
x.dropna(inplace=True)
groups = x[group_col].unique()
k = groups.size
n = x[block_col].unique().size
x['mat'] = x.groupby(block_col)[y_col].rank()
R = x.groupby(group_col)['mat'].mean()
vs = np.zeros((k, k), dtype=np.float)
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
for i, j in combs:
vs[i, j] = compare_stats(i, j)
vs = vs ** 2
vs = ss.chi2.sf(vs, k - 1)
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_durbin(a, y_col=None, block_col=None, group_col=None, melted=False, sort=False, p_adjust=None):
'''Pairwise post hoc test for multiple comparisons of rank sums according to
Durbin and Conover for a two-way balanced incomplete block design (BIBD). See
references for additional information [1]_, [2]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of block design,
i.e. rows are blocks, and columns are groups. In this case you do
not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (string).
y_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values. See statsmodels.sandbox.stats.multicomp
for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
Pandas DataFrame containing p values.
References
----------
.. [1] W. J. Conover and R. L. Iman (1979), On multiple-comparisons procedures,
Tech. Rep. LA-7677-MS, Los Alamos Scientific Laboratory.
.. [2] W. J. Conover (1999), Practical nonparametric Statistics,
3rd. edition, Wiley.
Examples
--------
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_durbin(x)
'''
if melted and not all([block_col, group_col, y_col]):
raise ValueError('block_col, group_col, y_col should be explicitly specified if using melted data')
def compare_stats(i, j):
dif = np.abs(Rj[groups[i]] - Rj[groups[j]])
tval = dif / denom
pval = 2. * ss.t.sf(np.abs(tval), df = df)
return pval
x, y_col, group_col, block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
if not sort:
x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
x[block_col] = Categorical(x[block_col], categories=x[block_col].unique(), ordered=True)
x.sort_values(by=[block_col, group_col], ascending=True, inplace=True)
x.dropna(inplace=True)
groups = x[group_col].unique()
t = len(groups)
b = x[block_col].unique().size
r = b
k = t
x['y_ranked'] = x.groupby(block_col)[y_col].rank()
Rj = x.groupby(group_col)['y_ranked'].sum()
A = (x['y_ranked'] ** 2).sum()
C = (b * k * (k + 1) ** 2) / 4.
D = (Rj ** 2).sum() - r * C
T1 = (t - 1) / (A - C) * D
denom = np.sqrt(((A - C) * 2 * r) / (b * k - b - t + 1) * (1 - T1 / (b * (k -1))))
df = b * k - b - t + 1
vs = np.zeros((t, t), dtype=np.float)
combs = it.combinations(range(t), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
for i, j in combs:
vs[i, j] = compare_stats(i, j)
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_anderson(a, val_col=None, group_col=None, midrank=True, sort=False, p_adjust=None):
'''Anderson-Darling Pairwise Test for k-samples. Tests the null hypothesis
that k-samples are drawn from the same population without having to specify
the distribution function of that population [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
midrank : bool, optional
Type of Anderson-Darling test which is computed. If set to True (default), the
midrank test applicable to continuous and discrete populations is performed. If
False, the right side empirical distribution is used.
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values. See statsmodels.sandbox.stats.multicomp for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
result : pandas DataFrame
P values.
References
----------
.. [1] F.W. Scholz, M.A. Stephens (1987), K-Sample Anderson-Darling Tests,
Journal of the American Statistical Association, Vol. 82, pp. 918-924.
Examples
--------
>>> x = np.array([[2.9, 3.0, 2.5, 2.6, 3.2], [3.8, 2.7, 4.0, 2.4], [2.8, 3.4, 3.7, 2.2, 2.0]])
>>> sp.posthoc_anderson(x)
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = x[_group_col].unique()
k = groups.size
vs = np.zeros((k, k), dtype=np.float)
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
for i, j in combs:
vs[i, j] = ss.anderson_ksamp([x.loc[x[_group_col] == groups[i], _val_col], x.loc[x[_group_col] == groups[j], _val_col]])[2]
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_quade(a, y_col=None, block_col=None, group_col=None, dist='t', melted=False, sort=False, p_adjust=None):
'''Calculate pairwise comparisons using Quade's post hoc test for
unreplicated blocked data. This test is usually conducted if significant
results were obtained by the omnibus test [1]_, [2]_, [3]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of
block design, i.e. rows are blocks, and columns are groups. In this
case you do not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (string).
y_col : str or int, optional
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
dist : str, optional
Method for determining p values.
The default distribution is "t", else "normal".
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
Pandas DataFrame containing p values.
References
----------
.. [1] W. J. Conover (1999), Practical nonparametric Statistics, 3rd. Edition,
Wiley.
.. [2] N. A. Heckert and J. J. Filliben (2003). NIST Handbook 148: Dataplot
Reference Manual, Volume 2: Let Subcommands and Library Functions.
National Institute of Standards and Technology Handbook Series, June 2003.
.. [3] D. Quade (1979), Using weighted rankings in the analysis of complete
blocks with additive block effects. Journal of the American Statistical
Association, 74, 680-683.
Examples
--------
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_quade(x)
'''
if melted and not all([block_col, group_col, y_col]):
raise ValueError('block_col, group_col, y_col should be explicitly specified if using melted data')
def compare_stats_t(i, j):
dif = np.abs(S[groups[i]] - S[groups[j]])
tval = dif / denom
pval = 2. * ss.t.sf(np.abs(tval), df = (b - 1) * (k - 1))
return pval
def compare_stats_norm(i, j):
dif = np.abs(W[groups[i]] * ff - W[groups[j]] * ff)
zval = dif / denom
pval = 2. * ss.norm.sf(np.abs(zval))
return pval
x, y_col, group_col, block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
if not sort:
x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
x[block_col] = Categorical(x[block_col], categories=x[block_col].unique(), ordered=True)
x.sort_values(by=[block_col, group_col], ascending=True, inplace=True)
x.dropna(inplace=True)
groups = x[group_col].unique()
k = len(groups)
b = x[block_col].unique().size
x['r'] = x.groupby(block_col)[y_col].rank()
q = (x.groupby(block_col)[y_col].max() - x.groupby(block_col)[y_col].min()).rank()
x['rr'] = x['r'] - (k + 1)/2
x['s'] = x.apply(lambda x, y: x['rr'] * y[x['blocks']], axis=1, args=(q,))
x['w'] = x.apply(lambda x, y: x['r'] * y[x['blocks']], axis=1, args=(q,))
A = (x['s'] ** 2).sum()
S = x.groupby(group_col)['s'].sum()
B = np.sum(S ** 2) / b
W = x.groupby(group_col)['w'].sum()
vs = np.zeros((k, k), dtype=np.float)
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
if dist == 't':
denom = np.sqrt((2 * b * (A - B)) / ((b - 1) * (k - 1)))
for i, j in combs:
vs[i, j] = compare_stats_t(i, j)
else:
n = b * k
denom = np.sqrt((k * (k + 1) * (2 * n + 1) * (k-1)) / (18 * n * (n + 1)))
ff = 1. / (b * (b + 1)/2)
for i, j in combs:
vs[i, j] = compare_stats_norm(i, j)
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_mackwolfe(a, val_col, group_col, p=None, n_perm=100, sort=False, p_adjust=None):
'''Mack-Wolfe Test for Umbrella Alternatives.
In dose-finding studies one may assume an increasing treatment effect with
increasing dose level. However, the test subject may actually succumb to
toxic effects at high doses, which leads to decresing treatment
effects [1]_, [2]_.
The scope of the Mack-Wolfe Test is to test for umbrella alternatives for
either a known or unknown point P (i.e. dose-level), where the peak
(umbrella point) is present.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
p : int, optional
The a-priori known peak as an ordinal number of the treatment group
including the zero dose level, i.e. p = {1, ..., k}. Defaults to None.
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
p : float
P value.
stat : float
Statistic.
References
----------
.. [1] Chen, I.Y. (1991) Notes on the Mack-Wolfe and Chen-Wolfe Tests for
Umbrella Alternatives. Biom. J., 33, 281-290.
.. [2] Mack, G.A., Wolfe, D. A. (1981) K-sample rank tests for umbrella
alternatives. J. Amer. Statist. Assoc., 76, 175-181.
Examples
--------
>>> x = np.array([[10,'a'], [59,'a'], [76,'b'], [10, 'b']])
>>> sp.posthoc_mackwolfe(x, val_col = 0, group_col = 1)
'''
x = __convert_to_df(a, val_col, group_col)
if not sort:
x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
x.sort_values(by=[group_col], ascending=True, inplace=True)
k = x[group_col].unique().size
if p:
if p > k:
print("Selected 'p' > number of groups:", str(p), " > ", str(k))
return False
elif p < 1:
print("Selected 'p' < 1: ", str(p))
return False
Rij = x[val_col].rank()
n = x.groupby(group_col)[val_col].count()
def _fn(Ri, Rj):
return np.sum(Ri.apply(lambda x: Rj[Rj > x].size))
def _ustat(Rij, g, k):
levels = np.unique(g)
U = np.identity(k)
for i in range(k):
for j in range(i):
U[i,j] = _fn(Rij[x[group_col] == levels[i]], Rij[x[group_col] == levels[j]])
U[j,i] = _fn(Rij[x[group_col] == levels[j]], Rij[x[group_col] == levels[i]])
return U
def _ap(p, U):
tmp1 = 0
if p > 0:
for i in range(p):
for j in range(i+1, p+1):
tmp1 += U[i,j]
tmp2 = 0
if p < k:
for i in range(p, k):
for j in range(i+1, k):
tmp2 += U[j,i]
return tmp1 + tmp2
def _n1(p, n):
return np.sum(n[:p+1])
def _n2(p, n):
return np.sum(n[p:k])
def _mean_at(p, n):
N1 = _n1(p, n)
N2 = _n2(p, n)
return (N1**2 + N2**2 - np.sum(n**2) - n.iloc[p]**2)/4
def _var_at(p, n):
N1 = _n1(p, n)
N2 = _n2(p, n)
N = np.sum(n)
var = (2 * (N1**3 + N2**3) + 3 * (N1**2 + N2**2) -\
np.sum(n**2 * (2*n + 3)) - n.iloc[p]**2 * (2 * n.iloc[p] + 3) +\
12. * n.iloc[p] * N1 * N2 - 12. * n.iloc[p] ** 2 * N) / 72.
return var
if p:
if (x.groupby(val_col).count() > 1).any().any():
print("Ties are present")
U = _ustat(Rij, x[group_col], k)
est = _ap(p, U)
mean = _mean_at(p, n)
sd = np.sqrt(_var_at(p, n))
stat = (est - mean)/sd
p_value = ss.norm.sf(stat)
else:
U = _ustat(Rij, x[group_col], k)
Ap = np.array([_ap(i, U) for i in range(k)]).ravel()
mean = np.array([_mean_at(i, n) for i in range(k)]).ravel()
var = np.array([_var_at(i, n) for i in range(k)]).ravel()
A = (Ap - mean) / np.sqrt(var)
stat = np.max(A)
p = A == stat
est = None
mt = []
for i in range(n_perm):
ix = Series(np.random.permutation(Rij))
Uix = _ustat(ix, x[group_col], k)
Apix = np.array([_ap(i, Uix) for i in range(k)])
Astarix = (Apix - mean) / np.sqrt(var)
mt.append(np.max(Astarix))
mt = np.array(mt)
p_value = mt[mt > stat] / n_perm
return p_value, stat
def posthoc_vanwaerden(a, val_col=None, group_col=None, sort=True, p_adjust=None):
'''Van der Waerden's test for pairwise multiple comparisons between group
levels. See references for additional information [1]_, [2]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
For one-factorial designs with samples that do not meet the assumptions for
one-way-ANOVA and subsequent post hoc tests, the van der Waerden test using
normal scores can be employed. Provided that significant differences were
detected by this global test, one may be interested in applying post hoc
tests according to van der Waerden for pairwise multiple comparisons of the
group levels.
There is no tie correction applied in this function.
References
----------
.. [1] W. J. Conover and R. L. Iman (1979), On multiple-comparisons procedures,
Tech. Rep. LA-7677-MS, Los Alamos Scientific Laboratory.
.. [2] B. L. van der Waerden (1952) Order tests for the two-sample problem and
their power, Indagationes Mathematicae, 14, 453-458.
Examples
--------
>>> x = np.array([[10,'a'], [59,'a'], [76,'b'], [10, 'b']])
>>> sp.posthoc_vanwaerden(x, val_col = 0, group_col = 1)
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = x[_group_col].unique()
n = x[_val_col].size
k = groups.size
r = ss.rankdata(x[_val_col])
x['z_scores'] = ss.norm.ppf(r / (n + 1))
aj = x.groupby(_group_col)['z_scores'].sum()
nj = x.groupby(_group_col)['z_scores'].count()
s2 = (1. / (n - 1.)) * (x['z_scores'] ** 2.).sum()
sts = (1. / s2) * np.sum(aj ** 2. / nj)
param = k - 1
A = aj / nj
vs = np.zeros((k, k), dtype=np.float)
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
def compare_stats(i, j):
dif = np.abs(A[groups[i]] - A[groups[j]])
B = 1. / nj[groups[i]] + 1. / nj[groups[j]]
tval = dif / np.sqrt(s2 * (n - 1. - sts)/(n - k) * B)
pval = 2. * ss.t.sf(np.abs(tval), df = n - k)
return pval
for i, j in combs:
vs[i, j] = compare_stats(i, j)
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_ttest(a, val_col=None, group_col=None, pool_sd=False, equal_var=True, p_adjust=None, sort=True):
'''Pairwise T test for multiple comparisons of independent groups. May be
used after a parametric ANOVA to do pairwise comparisons.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame. Array must be two-dimensional.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
equal_var : bool, optional
If True (default), perform a standard independent test
that assumes equal population variances [1]_.
If False, perform Welch's t-test, which does not assume equal
population variance [2]_.
pool_sd : bool, optional
Calculate a common SD for all groups and use that for all
comparisons (this can be useful if some groups are small).
This method does not actually call scipy ttest_ind() function,
so extra arguments are ignored. Default is False.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by group_col or not. Recommended
unless you sort your data manually.
Returns
-------
Numpy ndarray or pandas DataFrame of p values depending on input
data type.
References
----------
.. [1] http://en.wikipedia.org/wiki/T-test#Independent_two-sample_t-test
.. [2] http://en.wikipedia.org/wiki/Welch%27s_t_test
Examples
--------
>>> x = [[1,2,3,5,1], [12,31,54, np.nan], [10,12,6,74,11]]
>>> sp.posthoc_ttest(x, p_adjust = 'holm')
array([[-1. , 0.04600899, 0.31269089],
[ 0.04600899, -1. , 0.6327077 ],
[ 0.31269089, 0.6327077 , -1. ]])
'''
if isinstance(a, DataFrame):
x = a.copy()
if not sort:
x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
x.sort_values(by=[group_col, val_col], ascending=True, inplace=True)
x_lens = x.groupby(by=group_col)[val_col].count().values
x_lens_cumsum = np.insert(np.cumsum(x_lens), 0, 0)[:-1]
x_grouped = np.array([x[val_col][j:(j + x_lens[i])] for i, j in enumerate(x_lens_cumsum)])
#x_grouped = [x.loc[v, val_col].values.tolist() for g, v in x.groupby(group_col, sort=False).groups.items()]
else:
x = np.array(a)
x_grouped = np.array([np.asarray(a)[~np.isnan(a)] for a in x])
x_lens = np.asarray([len(a) for a in x_grouped])
x_lens_cumsum = np.insert(np.cumsum(x_lens), 0, 0)[:-1]
if any(x_lens == 0):
raise ValueError("All groups must contain data")
x_len = len(x_grouped)
vs = np.zeros((x_len, x_len), dtype=np.float)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
def compare_pooled(i, j):
diff = x_means[i] - x_means[j]
se_diff = pooled_sd * np.sqrt(1 / x_lens[i] + 1 / x_lens[j])
t_value = diff / se_diff
return 2 * ss.t.cdf(-np.abs(t_value), x_totaldegf)
combs = it.combinations(range(x_len), 2)
if pool_sd:
x_means = np.asarray([np.mean(xi) for xi in x_grouped])
x_sd = np.asarray([np.std(xi, ddof=1) for xi in x_grouped])
x_degf = x_lens - 1
x_totaldegf = np.sum(x_degf)
pooled_sd = np.sqrt(np.sum(x_sd ** 2 * x_degf) / x_totaldegf)
for i, j in combs:
vs[i, j] = compare_pooled(i, j)
else:
for i,j in combs:
vs[i, j] = ss.ttest_ind(x_grouped[i], x_grouped[j], equal_var=equal_var)[1]
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
if isinstance(x, DataFrame):
groups_unique = x[group_col].unique()
return DataFrame(vs, index=groups_unique, columns=groups_unique)
else:
return vs
def posthoc_tukey_hsd(x, g, alpha=0.05):
'''Pairwise comparisons with TukeyHSD confidence intervals. This is a
convenience function to make statsmodels `pairwise_tukeyhsd` method more
applicable for further use.
Parameters
----------
x : array_like or pandas Series object, 1d
An array, any object exposing the array interface, containing dependent
variable values (test or response variable). Values should have a
non-nominal scale. NaN values will cause an error (please handle
manually).
g : array_like or pandas Series object, 1d
An array, any object exposing the array interface, containing
independent variable values (grouping or predictor variable). Values
should have a nominal scale (categorical).
alpha : float, optional
Significance level for the test. Default is 0.05.
Returns
-------
result : pandas DataFrame
DataFrame with 0, 1, and -1 values, where 0 is False (not significant),
1 is True (significant), and -1 is for diagonal elements.
Examples
--------
>>> x = [[1,2,3,4,5], [35,31,75,40,21], [10,6,9,6,1]]
>>> g = [['a'] * 5, ['b'] * 5, ['c'] * 5]
>>> sp.posthoc_tukey_hsd(np.concatenate(x), np.concatenate(g))
'''
result = pairwise_tukeyhsd(x, g, alpha=0.05)
groups = np.array(result.groupsunique, dtype=np.str)
groups_len = len(groups)
vs = np.zeros((groups_len, groups_len), dtype=np.int)
for a in result.summary()[1:]:
a0 = str(a[0])
a1 = str(a[1])
a0i = np.where(groups == a0)[0][0]
a1i = np.where(groups == a1)[0][0]
vs[a0i, a1i] = 1 if str(a[5]) == 'True' else 0
vs = np.triu(vs)
np.fill_diagonal(vs, -1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[tri_lower] = vs.T[tri_lower]
return DataFrame(vs, index=groups, columns=groups)
def posthoc_mannwhitney(a, val_col=None, group_col=None, use_continuity=True, alternative='two-sided', p_adjust=None, sort=True):
'''Pairwise comparisons with Mann-Whitney rank test.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame. Array must be two-dimensional.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
use_continuity : bool, optional
Whether a continuity correction (1/2.) should be taken into account.
Default is True.
alternative : ['two-sided', 'less', or 'greater'], optional
Whether to get the p-value for the one-sided hypothesis
('less' or 'greater') or for the two-sided hypothesis ('two-sided').
Defaults to 'two-sided'.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by group_col or not. Recommended
unless you sort your data manually.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
Refer to `scipy.stats.mannwhitneyu` reference page for further details.
Examples
--------
>>> x = [[1,2,3,4,5], [35,31,75,40,21], [10,6,9,6,1]]
>>> sp.posthoc_mannwhitney(x, p_adjust = 'holm')
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col, _val_col], ascending=True, inplace=True)
groups = np.unique(x[_group_col])
x_len = groups.size
vs = np.zeros((x_len, x_len))
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(x_len), 2)
for i,j in combs:
vs[i, j] = ss.mannwhitneyu(x.loc[x[_group_col] == groups[i], _val_col],
x.loc[x[_group_col] == groups[j], _val_col],
use_continuity=use_continuity,
alternative=alternative)[1]
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_wilcoxon(a, val_col=None, group_col=None, zero_method='wilcox', correction=False, p_adjust=None, sort=False):
'''Pairwise comparisons with Wilcoxon signed-rank test. It is a non-parametric
version of the paired T-test for use with non-parametric ANOVA.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame. Array must be two-dimensional.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
zero_method : string, {"pratt", "wilcox", "zsplit"}, optional
"pratt": Pratt treatment, includes zero-differences in the ranking
process (more conservative)
"wilcox": Wilcox treatment, discards all zero-differences
"zsplit": Zero rank split, just like Pratt, but spliting the zero rank
between positive and negative ones
correction : bool, optional
If True, apply continuity correction by adjusting the Wilcoxon rank
statistic by 0.5 towards the mean value when computing the z-statistic.
Default is False.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by group_col and val_col or not.
Default is False.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
Refer to `scipy.stats.wilcoxon` reference page for further details.
Examples
--------
>>> x = [[1,2,3,4,5], [35,31,75,40,21], [10,6,9,6,1]]
>>> sp.posthoc_wilcoxon(x)
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
#x.sort_values(by=[_group_col, _val_col], ascending=True, inplace=True)
groups = np.unique(x[_group_col])
x_len = groups.size
vs = np.zeros((x_len, x_len))
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(x_len), 2)
for i,j in combs:
vs[i, j] = ss.wilcoxon(x.loc[x[_group_col] == groups[i], _val_col],
x.loc[x[_group_col] == groups[j], _val_col],
zero_method=zero_method, correction=correction)[1]
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method=p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_scheffe(a, val_col=None, group_col=None, sort=False, p_adjust=None):
'''Scheffe's all-pairs comparisons test for normally distributed data with equal
group variances. For all-pairs comparisons in an one-factorial layout with
normally distributed residuals and equal variances Scheffe's test can be
performed with parametric ANOVA [1]_, [2]_, [3]_.
A total of m = k(k-1)/2 hypotheses can be tested.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
The p values are computed from the F-distribution.
References
----------
.. [1] J. Bortz (1993) Statistik für Sozialwissenschaftler. 4. Aufl., Berlin:
Springer.
.. [2] L. Sachs (1997) Angewandte Statistik, New York: Springer.
.. [3] H. Scheffe (1953) A Method for Judging all Contrasts in the Analysis
of Variance. Biometrika 40, 87-110.
Examples
--------
>>> import scikit_posthocs as sp
>>> import pandas as pd
>>> x = pd.DataFrame({"a": [1,2,3,5,1], "b": [12,31,54,62,12], "c": [10,12,6,74,11]})
>>> x = x.melt(var_name='groups', value_name='values')
>>> sp.posthoc_scheffe(x, val_col='values', group_col='groups')
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = np.unique(x[_group_col])
x_grouped = x.groupby(_group_col)[_val_col]
ni = x_grouped.count()
n = ni.sum()
xi = x_grouped.mean()
si = x_grouped.var()
sin = 1. / (n - groups.size) * np.sum(si * (ni - 1.))
def compare(i, j):
dif = xi.loc[i] - xi.loc[j]
A = sin * (1. / ni[i] + 1. / ni[j]) * (groups.size - 1.)
f_val = dif ** 2. / A
return f_val
vs = np.zeros((groups.size, groups.size), dtype=np.float)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(groups.size), 2)
for i,j in combs:
vs[i, j] = compare(groups[i], groups[j])
vs[tri_lower] = vs.T[tri_lower]
p_values = ss.f.sf(vs, groups.size - 1., n - groups.size)
np.fill_diagonal(p_values, -1)
return DataFrame(p_values, index=groups, columns=groups)
def posthoc_tamhane(a, val_col=None, group_col=None, welch=True, sort=False):
'''Tamhane's T2 all-pairs comparison test for normally distributed data with
unequal variances. Tamhane's T2 test can be performed for all-pairs
comparisons in an one-factorial layout with normally distributed residuals
but unequal groups variances. A total of m = k(k-1)/2 hypotheses can be
tested. The null hypothesis is tested in the two-tailed test against the
alternative hypothesis [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
welch : bool, optional
If True, use Welch's approximate solution for calculating the degree of
freedom. T2 test uses the usual df = N - 2 approximation.
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
The p values are computed from the t-distribution and adjusted according to
Dunn-Sidak.
References
----------
.. [1] A.C. Tamhane (1979), A Comparison of Procedures for Multiple Comparisons of
Means with Unequal Variances. Journal of the American Statistical Association,
74, 471-480.
Examples
--------
>>> import scikit_posthocs as sp
>>> import pandas as pd
>>> x = pd.DataFrame({"a": [1,2,3,5,1], "b": [12,31,54,62,12], "c": [10,12,6,74,11]})
>>> x = x.melt(var_name='groups', value_name='values')
>>> sp.posthoc_tamhane(x, val_col='values', group_col='groups')
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = x[_group_col].unique()
x_grouped = x.groupby(_group_col)[_val_col]
ni = x_grouped.count()
n = ni.sum()
xi = x_grouped.mean()
si = x_grouped.var()
sin = 1. / (n - groups.size) * np.sum(si * (ni - 1))
def compare(i, j):
dif = xi[i] - xi[j]
A = si[i] / ni[i] + si[j] / ni[j]
t_val = dif / np.sqrt(A)
if welch:
df = A ** 2. / (si[i] ** 2. / (ni[i] ** 2. * (ni[i] - 1.)) + si[j] ** 2. / (ni[j] ** 2. * (ni[j] - 1.)))
else:
## checks according to Tamhane (1979, p. 474)
ok1 = (9./10. <= ni[i]/ni[j]) and (ni[i]/ni[j] <= 10./9.)
ok2 = (9./10. <= (s2i[i] / ni[i]) / (s2i[j] / ni[j])) and ((s2i[i] / ni[i]) / (s2i[j] / ni[j]) <= 10./9.)
ok3 = (4./5. <= ni[i]/ni[j]) and (ni[i]/ni[j] <= 5./4.) and (1./2. <= (s2i[i] / ni[i]) / (s2i[j] / ni[j])) and ((s2i[i] / ni[i]) / (s2i[j] / ni[j]) <= 2.)
ok4 = (2./3. <= ni[i]/ni[j]) and (ni[i]/ni[j] <= 3./2.) and (3./4. <= (s2i[i] / ni[i]) / (s2i[j] / ni[j])) and ((s2i[i] / ni[i]) / (s2i[j] / ni[j]) <= 4./3.)
OK = any(ok1, ok2, ok3, ok4)
if not OK:
print("Sample sizes or standard errors are not balanced. T2 test is recommended.")
df = ni[i] + ni[j] - 2.
p_val = 2. * ss.t.sf(np.abs(t_val), df=df)
return p_val
vs = np.zeros((groups.size, groups.size), dtype=np.float)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(groups.size), 2)
for i,j in combs:
vs[i, j] = compare(groups[i], groups[j])
vs[tri_upper] = 1. - (1. - vs[tri_upper]) ** groups.size
vs[tri_lower] = vs.T[tri_lower]
vs[vs > 1] = 1
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_tukey(a, val_col = None, group_col = None, sort = False):
'''Performs Tukey's all-pairs comparisons test for normally distributed data
with equal group variances. For all-pairs comparisons in an
one-factorial layout with normally distributed residuals and equal variances
Tukey's test can be performed. A total of m = k(k-1)/2 hypotheses can be
tested. The null hypothesis is tested in the two-tailed test against
the alternative hypothesis [1]_, [2]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
The p values are computed from the Tukey-distribution.
References
----------
.. [1] L. Sachs (1997) Angewandte Statistik, New York: Springer.
.. [2] J. Tukey (1949) Comparing Individual Means in the Analysis of Variance,
Biometrics 5, 99-114.
Examples
--------
>>> import scikit_posthocs as sp
>>> import pandas as pd
>>> x = pd.DataFrame({"a": [1,2,3,5,1], "b": [12,31,54,62,12], "c": [10,12,6,74,11]})
>>> x = x.melt(var_name='groups', value_name='values')
>>> sp.posthoc_tukey(x, val_col='values', group_col='groups')
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = np.unique(x[_group_col])
x_grouped = x.groupby(_group_col)[_val_col]
ni = x_grouped.count()
n = ni.sum()
xi = x_grouped.mean()
si = x_grouped.var()
sin = 1. / (n - groups.size) * np.sum(si * (ni - 1))
def compare(i, j):
dif = xi[i] - xi[j]
A = sin * 0.5 * (1. / ni.loc[i] + 1. / ni.loc[j])
q_val = dif / np.sqrt(A)
return q_val
vs = np.zeros((groups.size, groups.size), dtype=np.float)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(groups.size), 2)
for i,j in combs:
vs[i, j] = compare(groups[i], groups[j])
vs[tri_upper] = psturng(np.abs(vs[tri_upper]), groups.size, n - groups.size)
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_dscf(a, val_col=None, group_col=None, sort=False):
'''Dwass, Steel, Critchlow and Fligner all-pairs comparison test for a
one-factorial layout with non-normally distributed residuals. As opposed to
the all-pairs comparison procedures that depend on Kruskal ranks, the DSCF
test is basically an extension of the U-test as re-ranking is conducted for
each pairwise test [1]_, [2]_, [3]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
Pandas DataFrame containing p values.
Notes
-----
The p values are computed from the Tukey-distribution.
References
----------
.. [1] Douglas, C. E., Fligner, A. M. (1991) On distribution-free multiple
comparisons in the one-way analysis of variance, Communications in
Statistics - Theory and Methods, 20, 127-139.
.. [2] Dwass, M. (1960) Some k-sample rank-order tests. In Contributions to
Probability and Statistics, Edited by: I. Olkin, Stanford: Stanford
University Press.
.. [3] Steel, R. G. D. (1960) A rank sum test for comparing all pairs of
treatments, Technometrics, 2, 197-207.
Examples
--------
>>> import scikit_posthocs as sp
>>> import pandas as pd
>>> x = pd.DataFrame({"a": [1,2,3,5,1], "b": [12,31,54,62,12], "c": [10,12,6,74,11]})
>>> x = x.melt(var_name='groups', value_name='values')
>>> sp.posthoc_dscf(x, val_col='values', group_col='groups')
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = np.unique(x[_group_col])
x_grouped = x.groupby(_group_col)[_val_col]
n = x_grouped.count()
k = groups.size
def get_ties(x):
t = x.value_counts().values
c = np.sum((t ** 3 - t) / 12.)
return c
def compare(i, j):
ni = n.loc[i]
nj = n.loc[j]
x_raw = x.loc[(x[_group_col] == i) | (x[_group_col] == j)].copy()
x_raw['ranks'] = x_raw.loc[:, _val_col].rank()
r = x_raw.groupby(_group_col)['ranks'].sum().loc[[i, j]]
u = np.array([nj * ni + (nj * (nj + 1) / 2),
nj * ni + (ni * (ni + 1) / 2)]) - r
u_min = np.min(u)
s = ni + nj
var = (nj*ni/(s*(s - 1.))) * ((s**3 - s)/12. - get_ties(x_raw['ranks']))
p = np.sqrt(2.) * (u_min - nj * ni / 2.) / np.sqrt(var)
return p
vs = np.zeros((k, k))
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(k), 2)
for i, j in combs:
vs[i, j] = compare(groups[i], groups[j])
vs[tri_upper] = psturng(np.abs(vs[tri_upper]), k, np.inf)
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
|
maximtrp/scikit-posthocs | scikit_posthocs/_posthocs.py | posthoc_durbin | python | def posthoc_durbin(a, y_col=None, block_col=None, group_col=None, melted=False, sort=False, p_adjust=None):
'''Pairwise post hoc test for multiple comparisons of rank sums according to
Durbin and Conover for a two-way balanced incomplete block design (BIBD). See
references for additional information [1]_, [2]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of block design,
i.e. rows are blocks, and columns are groups. In this case you do
not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (string).
y_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values. See statsmodels.sandbox.stats.multicomp
for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
Pandas DataFrame containing p values.
References
----------
.. [1] W. J. Conover and R. L. Iman (1979), On multiple-comparisons procedures,
Tech. Rep. LA-7677-MS, Los Alamos Scientific Laboratory.
.. [2] W. J. Conover (1999), Practical nonparametric Statistics,
3rd. edition, Wiley.
Examples
--------
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_durbin(x)
'''
if melted and not all([block_col, group_col, y_col]):
raise ValueError('block_col, group_col, y_col should be explicitly specified if using melted data')
def compare_stats(i, j):
dif = np.abs(Rj[groups[i]] - Rj[groups[j]])
tval = dif / denom
pval = 2. * ss.t.sf(np.abs(tval), df = df)
return pval
x, y_col, group_col, block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
if not sort:
x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
x[block_col] = Categorical(x[block_col], categories=x[block_col].unique(), ordered=True)
x.sort_values(by=[block_col, group_col], ascending=True, inplace=True)
x.dropna(inplace=True)
groups = x[group_col].unique()
t = len(groups)
b = x[block_col].unique().size
r = b
k = t
x['y_ranked'] = x.groupby(block_col)[y_col].rank()
Rj = x.groupby(group_col)['y_ranked'].sum()
A = (x['y_ranked'] ** 2).sum()
C = (b * k * (k + 1) ** 2) / 4.
D = (Rj ** 2).sum() - r * C
T1 = (t - 1) / (A - C) * D
denom = np.sqrt(((A - C) * 2 * r) / (b * k - b - t + 1) * (1 - T1 / (b * (k -1))))
df = b * k - b - t + 1
vs = np.zeros((t, t), dtype=np.float)
combs = it.combinations(range(t), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
for i, j in combs:
vs[i, j] = compare_stats(i, j)
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups) | Pairwise post hoc test for multiple comparisons of rank sums according to
Durbin and Conover for a two-way balanced incomplete block design (BIBD). See
references for additional information [1]_, [2]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of block design,
i.e. rows are blocks, and columns are groups. In this case you do
not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (string).
y_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values. See statsmodels.sandbox.stats.multicomp
for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
Pandas DataFrame containing p values.
References
----------
.. [1] W. J. Conover and R. L. Iman (1979), On multiple-comparisons procedures,
Tech. Rep. LA-7677-MS, Los Alamos Scientific Laboratory.
.. [2] W. J. Conover (1999), Practical nonparametric Statistics,
3rd. edition, Wiley.
Examples
--------
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_durbin(x) | train | https://github.com/maximtrp/scikit-posthocs/blob/5476b09e2a325cd4e31c0b0bc6906ab5cd77fc5d/scikit_posthocs/_posthocs.py#L1082-L1202 | [
"def __convert_to_block_df(a, y_col=None, group_col=None, block_col=None, melted=False):\n\n if isinstance(a, DataFrame) and not melted:\n x = a.copy(deep=True)\n group_col = 'groups'\n block_col = 'blocks'\n y_col = 'y'\n x.columns.name = group_col\n x.index.name = bloc... | # -*- coding: utf-8 -*-
import numpy as np
import scipy.stats as ss
import itertools as it
from statsmodels.sandbox.stats.multicomp import multipletests
from statsmodels.stats.multicomp import pairwise_tukeyhsd
from statsmodels.stats.libqsturng import psturng
from pandas import DataFrame, Categorical, Series
def __convert_to_df(a, val_col=None, group_col=None, val_id=None, group_id=None):
'''Hidden helper method to create a DataFrame with input data for further
processing.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas DataFrame.
Array must be two-dimensional. Second dimension may vary,
i.e. groups may have different lengths.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
val_id : int, optional
Index of a column that contains dependent variable values (test or
response variable). Should be specified if a NumPy ndarray is used as an
input. It will be inferred from data, if not specified.
group_id : int, optional
Index of a column that contains independent variable values (grouping or
predictor variable). Should be specified if a NumPy ndarray is used as
an input. It will be inferred from data, if not specified.
Returns
-------
x : pandas DataFrame
DataFrame with input data, `val_col` column contains numerical values and
`group_col` column contains categorical values.
val_col : str
Name of a DataFrame column that contains dependent variable values (test
or response variable).
group_col : str
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable).
Notes
-----
Inferrence algorithm for determining `val_id` and `group_id` args is rather
simple, so it is better to specify them explicitly to prevent errors.
'''
if not group_col:
group_col = 'groups'
if not val_col:
val_col = 'vals'
if isinstance(a, DataFrame):
x = a.copy()
if not {group_col, val_col}.issubset(a.columns):
raise ValueError('Specify correct column names using `group_col` and `val_col` args')
return x, val_col, group_col
elif isinstance(a, list) or (isinstance(a, np.ndarray) and not a.shape.count(2)):
grps_len = map(len, a)
grps = list(it.chain(*[[i+1] * l for i, l in enumerate(grps_len)]))
vals = list(it.chain(*a))
return DataFrame({val_col: vals, group_col: grps}), val_col, group_col
elif isinstance(a, np.ndarray):
# cols ids not defined
# trying to infer
if not(all([val_id, group_id])):
if np.argmax(a.shape):
a = a.T
ax = [np.unique(a[:, 0]).size, np.unique(a[:, 1]).size]
if np.asscalar(np.diff(ax)):
__val_col = np.argmax(ax)
__group_col = np.argmin(ax)
else:
raise ValueError('Cannot infer input format.\nPlease specify `val_id` and `group_id` args')
cols = {__val_col: val_col,
__group_col: group_col}
else:
cols = {val_id: val_col,
group_id: group_col}
cols_vals = dict(sorted(cols.items())).values()
return DataFrame(a, columns=cols_vals), val_col, group_col
def __convert_to_block_df(a, y_col=None, group_col=None, block_col=None, melted=False):
if isinstance(a, DataFrame) and not melted:
x = a.copy(deep=True)
group_col = 'groups'
block_col = 'blocks'
y_col = 'y'
x.columns.name = group_col
x.index.name = block_col
x = x.reset_index().melt(id_vars=block_col, var_name=group_col, value_name=y_col)
elif melted:
x = DataFrame.from_dict({'groups': a[group_col],
'blocks': a[block_col],
'y': a[y_col]})
elif not isinstance(a, DataFrame):
x = np.array(a)
x = DataFrame(x, index=np.arange(x.shape[0]), columns=np.arange(x.shape[1]))
if not melted:
group_col = 'groups'
block_col = 'blocks'
y_col = 'y'
x.columns.name = group_col
x.index.name = block_col
x = x.reset_index().melt(id_vars=block_col, var_name=group_col, value_name=y_col)
else:
x.columns[group_col] = 'groups'
x.columns[block_col] = 'blocks'
x.columns[y_col] = 'y'
group_col = 'groups'
block_col = 'blocks'
y_col = 'y'
return x, 'y', 'groups', 'blocks'
def posthoc_conover(a, val_col=None, group_col=None, p_adjust=None, sort=True):
'''Post hoc pairwise test for multiple comparisons of mean rank sums
(Conover's test). May be used after Kruskal-Wallis one-way analysis of
variance by ranks to do pairwise comparisons [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas DataFrame.
Array must be two-dimensional. Second dimension may vary,
i.e. groups may have different lengths.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
p_adjust : str, optional
Method for adjusting p values. See `statsmodels.sandbox.stats.multicomp`
for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by `group_col` or not. Recommended
unless you sort your data manually.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
A tie correction are employed according to Conover [1]_.
References
----------
.. [1] W. J. Conover and R. L. Iman (1979), On multiple-comparisons procedures,
Tech. Rep. LA-7677-MS, Los Alamos Scientific Laboratory.
Examples
--------
>>> x = [[1,2,3,5,1], [12,31,54, np.nan], [10,12,6,74,11]]
>>> sp.posthoc_conover(x, p_adjust = 'holm')
'''
def compare_conover(i, j):
diff = np.abs(x_ranks_avg.loc[i] - x_ranks_avg.loc[j])
B = (1. / x_lens.loc[i] + 1. / x_lens.loc[j])
D = (n - 1. - H_cor) / (n - x_len)
t_value = diff / np.sqrt(S2 * B * D)
p_value = 2. * ss.t.sf(np.abs(t_value), df = n - x_len)
return p_value
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col, _val_col], ascending=True, inplace=True)
n = len(x.index)
x_groups_unique = np.unique(x[_group_col])
x_len = x_groups_unique.size
x_lens = x.groupby(_group_col)[_val_col].count()
x['ranks'] = x[_val_col].rank()
x_ranks_avg = x.groupby(_group_col)['ranks'].mean()
x_ranks_sum = x.groupby(_group_col)['ranks'].sum()
# ties
vals = x.groupby('ranks').count()[_val_col].values
tie_sum = np.sum(vals[vals != 1] ** 3 - vals[vals != 1])
tie_sum = 0 if not tie_sum else tie_sum
x_ties = np.min([1., 1. - tie_sum / (n ** 3. - n)])
H = (12. / (n * (n + 1.))) * np.sum(x_ranks_sum**2 / x_lens) - 3. * (n + 1.)
H_cor = H / x_ties
if x_ties == 1:
S2 = n * (n + 1.) / 12.
else:
S2 = (1. / (n - 1.)) * (np.sum(x['ranks'] ** 2.) - (n * (((n + 1.)**2.) / 4.)))
vs = np.zeros((x_len, x_len))
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(x_len), 2)
for i, j in combs:
vs[i, j] = compare_conover(x_groups_unique[i], x_groups_unique[j])
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=x_groups_unique, columns=x_groups_unique)
def posthoc_dunn(a, val_col=None, group_col=None, p_adjust=None, sort=True):
'''Post hoc pairwise test for multiple comparisons of mean rank sums
(Dunn's test). May be used after Kruskal-Wallis one-way analysis of
variance by ranks to do pairwise comparisons [1]_, [2]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas DataFrame.
Array must be two-dimensional. Second dimension may vary,
i.e. groups may have different lengths.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
p_adjust : str, optional
Method for adjusting p values. See `statsmodels.sandbox.stats.multicomp`
for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by group_col or not. Recommended
unless you sort your data manually.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
A tie correction will be employed according to Glantz (2012).
References
----------
.. [1] O.J. Dunn (1964). Multiple comparisons using rank sums.
Technometrics, 6, 241-252.
.. [2] S.A. Glantz (2012), Primer of Biostatistics. New York: McGraw Hill.
Examples
--------
>>> x = [[1,2,3,5,1], [12,31,54, np.nan], [10,12,6,74,11]]
>>> sp.posthoc_dunn(x, p_adjust = 'holm')
'''
def compare_dunn(i, j):
diff = np.abs(x_ranks_avg.loc[i] - x_ranks_avg.loc[j])
A = n * (n + 1.) / 12.
B = (1. / x_lens.loc[i] + 1. / x_lens.loc[j])
z_value = diff / np.sqrt((A - x_ties) * B)
p_value = 2. * ss.norm.sf(np.abs(z_value))
return p_value
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col, _val_col], ascending=True, inplace=True)
n = len(x.index)
x_groups_unique = np.unique(x[_group_col])
x_len = x_groups_unique.size
x_lens = x.groupby(_group_col)[_val_col].count()
x['ranks'] = x[_val_col].rank()
x_ranks_avg = x.groupby(_group_col)['ranks'].mean()
# ties
vals = x.groupby('ranks').count()[_val_col].values
tie_sum = np.sum(vals[vals != 1] ** 3 - vals[vals != 1])
tie_sum = 0 if not tie_sum else tie_sum
x_ties = tie_sum / (12. * (n - 1))
vs = np.zeros((x_len, x_len))
combs = it.combinations(range(x_len), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
for i,j in combs:
vs[i, j] = compare_dunn(x_groups_unique[i], x_groups_unique[j])
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=x_groups_unique, columns=x_groups_unique)
def posthoc_nemenyi(a, val_col=None, group_col=None, dist='chi', sort=True):
'''Post hoc pairwise test for multiple comparisons of mean rank sums
(Nemenyi's test). May be used after Kruskal-Wallis one-way analysis of
variance by ranks to do pairwise comparisons [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame. Array must be two-dimensional. Second dimension may vary,
i.e. groups may have different lengths.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
dist : str, optional
Method for determining the p value. The default distribution is "chi"
(chi-squared), else "tukey" (studentized range).
sort : bool, optional
Specifies whether to sort DataFrame by group_col or not. Recommended
unless you sort your data manually.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
A tie correction will be employed according to Glantz (2012).
References
----------
.. [1] Lothar Sachs (1997), Angewandte Statistik. Berlin: Springer.
Pages: 395-397, 662-664.
Examples
--------
>>> x = [[1,2,3,5,1], [12,31,54, np.nan], [10,12,6,74,11]]
>>> sp.posthoc_nemenyi(x)
'''
def compare_stats_chi(i, j):
diff = np.abs(x_ranks_avg.loc[i] - x_ranks_avg.loc[j])
A = n * (n + 1.) / 12.
B = (1. / x_lens.loc[i] + 1. / x_lens.loc[j])
chi = diff ** 2. / (A * B)
return chi
def compare_stats_tukey(i, j):
diff = np.abs(x_ranks_avg.loc[i] - x_ranks_avg.loc[j])
B = (1. / x_lens.loc[i] + 1. / x_lens.loc[j])
q = diff / np.sqrt((n * (n + 1.) / 12.) * B)
return q
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col, _val_col], ascending=True, inplace=True)
n = len(x.index)
x_groups_unique = np.unique(x[_group_col])
x_len = x_groups_unique.size
x_lens = x.groupby(_group_col)[_val_col].count()
x['ranks'] = x[_val_col].rank()
x_ranks_avg = x.groupby(_group_col)['ranks'].mean()
# ties
vals = x.groupby('ranks').count()[_val_col].values
tie_sum = np.sum(vals[vals != 1] ** 3 - vals[vals != 1])
tie_sum = 0 if not tie_sum else tie_sum
x_ties = np.min([1., 1. - tie_sum / (n ** 3. - n)])
vs = np.zeros((x_len, x_len))
combs = it.combinations(range(x_len), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
if dist == 'chi':
for i,j in combs:
vs[i, j] = compare_stats_chi(x_groups_unique[i], x_groups_unique[j]) / x_ties
vs[tri_upper] = ss.chi2.sf(vs[tri_upper], x_len - 1)
elif dist == 'tukey':
for i,j in combs:
vs[i, j] = compare_stats_tukey(x_groups_unique[i], x_groups_unique[j]) * np.sqrt(2.)
vs[tri_upper] = psturng(vs[tri_upper], x_len, np.inf)
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=x_groups_unique, columns=x_groups_unique)
def posthoc_nemenyi_friedman(a, y_col=None, block_col=None, group_col=None, melted=False, sort=False):
'''Calculate pairwise comparisons using Nemenyi post hoc test for
unreplicated blocked data. This test is usually conducted post hoc if
significant results of the Friedman's test are obtained. The statistics
refer to upper quantiles of the studentized range distribution (Tukey) [1]_,
[2]_, [3]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of
block design, i.e. rows are blocks, and columns are groups. In this
case you do not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (strings).
y_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
A one-way ANOVA with repeated measures that is also referred to as ANOVA
with unreplicated block design can also be conducted via Friedman's
test. The consequent post hoc pairwise multiple comparison test
according to Nemenyi is conducted with this function.
This function does not test for ties.
References
----------
.. [1] J. Demsar (2006), Statistical comparisons of classifiers over
multiple data sets, Journal of Machine Learning Research, 7, 1-30.
.. [2] P. Nemenyi (1963) Distribution-free Multiple Comparisons. Ph.D.
thesis, Princeton University.
.. [3] L. Sachs (1997), Angewandte Statistik. Berlin: Springer.
Pages: 668-675.
Examples
--------
>>> # Non-melted case, x is a block design matrix, i.e. rows are blocks
>>> # and columns are groups.
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_nemenyi_friedman(x)
'''
if melted and not all([block_col, group_col, y_col]):
raise ValueError('block_col, group_col, y_col should be explicitly specified if using melted data')
def compare_stats(i, j):
dif = np.abs(R[groups[i]] - R[groups[j]])
qval = dif / np.sqrt(k * (k + 1.) / (6. * n))
return qval
x, _y_col, _group_col, _block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
#if not sort:
# x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
# x[block_col] = Categorical(x[block_col], categories=x[block_col].unique(), ordered=True)
x.sort_values(by=[_group_col, _block_col], ascending=True, inplace=True)
x.dropna(inplace=True)
groups = x[_group_col].unique()
k = groups.size
n = x[_block_col].unique().size
x['mat'] = x.groupby(_block_col)[_y_col].rank()
R = x.groupby(_group_col)['mat'].mean()
vs = np.zeros((k, k))
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
for i, j in combs:
vs[i, j] = compare_stats(i, j)
vs *= np.sqrt(2.)
vs[tri_upper] = psturng(vs[tri_upper], k, np.inf)
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_conover_friedman(a, y_col=None, block_col=None, group_col=None, melted=False, sort=False, p_adjust=None):
'''Calculate pairwise comparisons using Conover post hoc test for unreplicated
blocked data. This test is usually conducted post hoc after
significant results of the Friedman test. The statistics refer to
the Student t distribution [1]_, [2]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of
block design, i.e. rows are blocks, and columns are groups. In this
case you do not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (strings).
y_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values. See statsmodels.sandbox.stats.multicomp
for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
'single-step' : uses Tukey distribution for multiple comparisons
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
A one-way ANOVA with repeated measures that is also referred to as ANOVA
with unreplicated block design can also be conducted via the
friedman.test. The consequent post hoc pairwise multiple comparison test
according to Conover is conducted with this function.
If y is a matrix, than the columns refer to the treatment and the rows
indicate the block.
References
----------
.. [1] W. J. Conover and R. L. Iman (1979), On multiple-comparisons procedures,
Tech. Rep. LA-7677-MS, Los Alamos Scientific Laboratory.
.. [2] W. J. Conover (1999), Practical nonparametric Statistics, 3rd. Edition,
Wiley.
Examples
--------
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_conover_friedman(x)
'''
if melted and not all([block_col, group_col, y_col]):
raise ValueError('block_col, group_col, y_col should be explicitly specified if using melted data')
def compare_stats(i, j):
dif = np.abs(R.loc[groups[i]] - R.loc[groups[j]])
tval = dif / np.sqrt(A) / np.sqrt(B)
pval = 2. * ss.t.sf(np.abs(tval), df = (m*n*k - k - n + 1))
return pval
def compare_tukey(i, j):
dif = np.abs(R.loc[groups[i]] - R.loc[groups[j]])
qval = np.sqrt(2.) * dif / (np.sqrt(A) * np.sqrt(B))
pval = psturng(qval, k, np.inf)
return pval
x, _y_col, _group_col, _block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
#if not sort:
# x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
# x[block_col] = Categorical(x[block_col], categories=x[block_col].unique(), ordered=True)
x.sort_values(by=[_group_col,_block_col], ascending=True, inplace=True)
x.dropna(inplace=True)
groups = np.unique(x[_group_col])
k = groups.size
n = x[_block_col].unique().size
x['mat'] = x.groupby(_block_col)[_y_col].rank()
R = x.groupby(_group_col)['mat'].sum()
A1 = (x['mat'] ** 2).sum()
m = 1
S2 = m/(m*k - 1.) * (A1 - m*k*n*(m*k + 1.)**2./4.)
T2 = 1 / S2 * (np.sum(R) - n * m * ((m * k + 1.) / 2.)**2.)
A = S2 * (2. * n * (m * k - 1.)) / ( m * n * k - k - n + 1.)
B = 1. - T2 / (n * (m * k - 1.))
vs = np.zeros((k, k))
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
if p_adjust == 'single-step':
for i, j in combs:
vs[i, j] = compare_tukey(i, j)
else:
for i, j in combs:
vs[i, j] = compare_stats(i, j)
if p_adjust is not None:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_npm_test(a, val_col=None, group_col=None, sort=False, p_adjust=None):
'''Calculate pairwise comparisons using Nashimoto and Wright's all-pairs
comparison procedure (NPM test) for simply ordered mean ranksums.
NPM test is basically an extension of Nemenyi's procedure for testing
increasingly ordered alternatives [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values. See `statsmodels.sandbox.stats.multicomp`
for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
The p values are estimated from the studentized range distribution. If
the medians are already increasingly ordered, than the NPM-test
simplifies to the ordinary Nemenyi test
References
----------
.. [1] Nashimoto, K., Wright, F.T., (2005), Multiple comparison procedures for
detecting differences in simply ordered means. Comput. Statist. Data
Anal. 48, 291--306.
Examples
--------
>>> x = np.array([[102,109,114,120,124],
[110,112,123,130,145],
[132,141,156,160,172]])
>>> sp.posthoc_npm_test(x)
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
#if not sort:
# x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = np.unique(x[_group_col])
x['ranks'] = x[_val_col].rank()
Ri = x.groupby(_group_col)['ranks'].mean()
ni = x.groupby(_group_col)[_val_col].count()
k = groups.size
n = x.shape[0]
sigma = np.sqrt(n * (n + 1) / 12.)
df = np.inf
def compare(m, u):
a = [(Ri.loc[groups[u]]-Ri.loc[groups[_mi]])/(sigma/np.sqrt(2)*np.sqrt(1./ni.loc[groups[_mi]] + 1./ni.loc[groups[u]])) for _mi in m]
return np.array(a)
stat = np.zeros((k, k))
for i in range(k-1):
for j in range(i+1, k):
u = j
m = np.arange(i, u)
tmp = compare(m, u)
stat[j, i] = np.max(tmp)
stat[stat < 0] = 0
p_values = psturng(stat, k, np.inf)
tri_upper = np.triu_indices(p_values.shape[0], 1)
tri_lower = np.tril_indices(p_values.shape[0], -1)
p_values[tri_lower] = p_values.T[tri_lower]
#if p_adjust:
# p_values[tri_upper] = multipletests(p_values[tri_upper], method = p_adjust)[1]
np.fill_diagonal(p_values, -1)
return DataFrame(p_values, index=groups, columns=groups)
def posthoc_siegel_friedman(a, y_col=None, block_col=None, group_col=None, melted=False, sort=False, p_adjust=None):
'''Siegel and Castellan's All-Pairs Comparisons Test for Unreplicated Blocked
Data. See authors' paper for additional information [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of
block design, i.e. rows are blocks, and columns are groups. In this
case you do not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (strings).
y_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values. See statsmodels.sandbox.stats.multicomp for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
For all-pairs comparisons in a two factorial unreplicated complete block design
with non-normally distributed residuals, Siegel and Castellan's test can be
performed on Friedman-type ranked data.
References
----------
.. [1] S. Siegel, N. J. Castellan Jr. (1988), Nonparametric Statistics for the
Behavioral Sciences. 2nd ed. New York: McGraw-Hill.
Examples
--------
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_siegel_friedman(x)
'''
if melted and not all([block_col, group_col, y_col]):
raise ValueError('block_col, group_col, y_col should be explicitly specified if using melted data')
def compare_stats(i, j):
dif = np.abs(R[groups[i]] - R[groups[j]])
zval = dif / np.sqrt(k * (k + 1.) / (6. * n))
return zval
x, y_col, group_col, block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
#if not sort:
# x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
# x[block_col] = Categorical(x[block_col], categories=x[block_col].unique(), ordered=True)
x.sort_values(by=[group_col,block_col], ascending=True, inplace=True)
x.dropna(inplace=True)
groups = x[group_col].unique()
k = groups.size
n = x[block_col].unique().size
x['mat'] = x.groupby(block_col)[y_col].rank()
R = x.groupby(group_col)['mat'].mean()
vs = np.zeros((k, k), dtype=np.float)
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
for i, j in combs:
vs[i, j] = compare_stats(i, j)
vs = 2. * ss.norm.sf(np.abs(vs))
vs[vs > 1] = 1.
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_miller_friedman(a, y_col=None, block_col=None, group_col=None, melted=False, sort=False):
'''Miller's All-Pairs Comparisons Test for Unreplicated Blocked Data.
The p-values are computed from the chi-square distribution [1]_, [2]_,
[3]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of
block design, i.e. rows are blocks, and columns are groups. In this
case you do not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (strings).
y_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
Pandas DataFrame containing p values.
Notes
-----
For all-pairs comparisons in a two factorial unreplicated complete block
design with non-normally distributed residuals, Miller's test can be
performed on Friedman-type ranked data.
References
----------
.. [1] J. Bortz J, G. A. Lienert, K. Boehnke (1990), Verteilungsfreie
Methoden in der Biostatistik. Berlin: Springerself.
.. [2] R. G. Miller Jr. (1996), Simultaneous statistical inference. New
York: McGraw-Hill.
.. [3] E. L. Wike (2006), Data Analysis. A Statistical Primer for Psychology
Students. New Brunswick: Aldine Transaction.
Examples
--------
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_miller_friedman(x)
'''
if melted and not all([block_col, group_col, y_col]):
raise ValueError('block_col, group_col, y_col should be explicitly specified if using melted data')
def compare_stats(i, j):
dif = np.abs(R[groups[i]] - R[groups[j]])
qval = dif / np.sqrt(k * (k + 1.) / (6. * n))
return qval
x, y_col, group_col, block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
#if not sort:
# x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
# x[block_col] = Categorical(x[block_col], categories=x[block_col].unique(), ordered=True)
x.sort_values(by=[group_col,block_col], ascending=True, inplace=True)
x.dropna(inplace=True)
groups = x[group_col].unique()
k = groups.size
n = x[block_col].unique().size
x['mat'] = x.groupby(block_col)[y_col].rank()
R = x.groupby(group_col)['mat'].mean()
vs = np.zeros((k, k), dtype=np.float)
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
for i, j in combs:
vs[i, j] = compare_stats(i, j)
vs = vs ** 2
vs = ss.chi2.sf(vs, k - 1)
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_anderson(a, val_col=None, group_col=None, midrank=True, sort=False, p_adjust=None):
'''Anderson-Darling Pairwise Test for k-samples. Tests the null hypothesis
that k-samples are drawn from the same population without having to specify
the distribution function of that population [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
midrank : bool, optional
Type of Anderson-Darling test which is computed. If set to True (default), the
midrank test applicable to continuous and discrete populations is performed. If
False, the right side empirical distribution is used.
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values. See statsmodels.sandbox.stats.multicomp for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
result : pandas DataFrame
P values.
References
----------
.. [1] F.W. Scholz, M.A. Stephens (1987), K-Sample Anderson-Darling Tests,
Journal of the American Statistical Association, Vol. 82, pp. 918-924.
Examples
--------
>>> x = np.array([[2.9, 3.0, 2.5, 2.6, 3.2], [3.8, 2.7, 4.0, 2.4], [2.8, 3.4, 3.7, 2.2, 2.0]])
>>> sp.posthoc_anderson(x)
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = x[_group_col].unique()
k = groups.size
vs = np.zeros((k, k), dtype=np.float)
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
for i, j in combs:
vs[i, j] = ss.anderson_ksamp([x.loc[x[_group_col] == groups[i], _val_col], x.loc[x[_group_col] == groups[j], _val_col]])[2]
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_quade(a, y_col=None, block_col=None, group_col=None, dist='t', melted=False, sort=False, p_adjust=None):
'''Calculate pairwise comparisons using Quade's post hoc test for
unreplicated blocked data. This test is usually conducted if significant
results were obtained by the omnibus test [1]_, [2]_, [3]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of
block design, i.e. rows are blocks, and columns are groups. In this
case you do not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (string).
y_col : str or int, optional
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
dist : str, optional
Method for determining p values.
The default distribution is "t", else "normal".
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
Pandas DataFrame containing p values.
References
----------
.. [1] W. J. Conover (1999), Practical nonparametric Statistics, 3rd. Edition,
Wiley.
.. [2] N. A. Heckert and J. J. Filliben (2003). NIST Handbook 148: Dataplot
Reference Manual, Volume 2: Let Subcommands and Library Functions.
National Institute of Standards and Technology Handbook Series, June 2003.
.. [3] D. Quade (1979), Using weighted rankings in the analysis of complete
blocks with additive block effects. Journal of the American Statistical
Association, 74, 680-683.
Examples
--------
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_quade(x)
'''
if melted and not all([block_col, group_col, y_col]):
raise ValueError('block_col, group_col, y_col should be explicitly specified if using melted data')
def compare_stats_t(i, j):
dif = np.abs(S[groups[i]] - S[groups[j]])
tval = dif / denom
pval = 2. * ss.t.sf(np.abs(tval), df = (b - 1) * (k - 1))
return pval
def compare_stats_norm(i, j):
dif = np.abs(W[groups[i]] * ff - W[groups[j]] * ff)
zval = dif / denom
pval = 2. * ss.norm.sf(np.abs(zval))
return pval
x, y_col, group_col, block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
if not sort:
x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
x[block_col] = Categorical(x[block_col], categories=x[block_col].unique(), ordered=True)
x.sort_values(by=[block_col, group_col], ascending=True, inplace=True)
x.dropna(inplace=True)
groups = x[group_col].unique()
k = len(groups)
b = x[block_col].unique().size
x['r'] = x.groupby(block_col)[y_col].rank()
q = (x.groupby(block_col)[y_col].max() - x.groupby(block_col)[y_col].min()).rank()
x['rr'] = x['r'] - (k + 1)/2
x['s'] = x.apply(lambda x, y: x['rr'] * y[x['blocks']], axis=1, args=(q,))
x['w'] = x.apply(lambda x, y: x['r'] * y[x['blocks']], axis=1, args=(q,))
A = (x['s'] ** 2).sum()
S = x.groupby(group_col)['s'].sum()
B = np.sum(S ** 2) / b
W = x.groupby(group_col)['w'].sum()
vs = np.zeros((k, k), dtype=np.float)
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
if dist == 't':
denom = np.sqrt((2 * b * (A - B)) / ((b - 1) * (k - 1)))
for i, j in combs:
vs[i, j] = compare_stats_t(i, j)
else:
n = b * k
denom = np.sqrt((k * (k + 1) * (2 * n + 1) * (k-1)) / (18 * n * (n + 1)))
ff = 1. / (b * (b + 1)/2)
for i, j in combs:
vs[i, j] = compare_stats_norm(i, j)
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_mackwolfe(a, val_col, group_col, p=None, n_perm=100, sort=False, p_adjust=None):
'''Mack-Wolfe Test for Umbrella Alternatives.
In dose-finding studies one may assume an increasing treatment effect with
increasing dose level. However, the test subject may actually succumb to
toxic effects at high doses, which leads to decresing treatment
effects [1]_, [2]_.
The scope of the Mack-Wolfe Test is to test for umbrella alternatives for
either a known or unknown point P (i.e. dose-level), where the peak
(umbrella point) is present.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
p : int, optional
The a-priori known peak as an ordinal number of the treatment group
including the zero dose level, i.e. p = {1, ..., k}. Defaults to None.
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
p : float
P value.
stat : float
Statistic.
References
----------
.. [1] Chen, I.Y. (1991) Notes on the Mack-Wolfe and Chen-Wolfe Tests for
Umbrella Alternatives. Biom. J., 33, 281-290.
.. [2] Mack, G.A., Wolfe, D. A. (1981) K-sample rank tests for umbrella
alternatives. J. Amer. Statist. Assoc., 76, 175-181.
Examples
--------
>>> x = np.array([[10,'a'], [59,'a'], [76,'b'], [10, 'b']])
>>> sp.posthoc_mackwolfe(x, val_col = 0, group_col = 1)
'''
x = __convert_to_df(a, val_col, group_col)
if not sort:
x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
x.sort_values(by=[group_col], ascending=True, inplace=True)
k = x[group_col].unique().size
if p:
if p > k:
print("Selected 'p' > number of groups:", str(p), " > ", str(k))
return False
elif p < 1:
print("Selected 'p' < 1: ", str(p))
return False
Rij = x[val_col].rank()
n = x.groupby(group_col)[val_col].count()
def _fn(Ri, Rj):
return np.sum(Ri.apply(lambda x: Rj[Rj > x].size))
def _ustat(Rij, g, k):
levels = np.unique(g)
U = np.identity(k)
for i in range(k):
for j in range(i):
U[i,j] = _fn(Rij[x[group_col] == levels[i]], Rij[x[group_col] == levels[j]])
U[j,i] = _fn(Rij[x[group_col] == levels[j]], Rij[x[group_col] == levels[i]])
return U
def _ap(p, U):
tmp1 = 0
if p > 0:
for i in range(p):
for j in range(i+1, p+1):
tmp1 += U[i,j]
tmp2 = 0
if p < k:
for i in range(p, k):
for j in range(i+1, k):
tmp2 += U[j,i]
return tmp1 + tmp2
def _n1(p, n):
return np.sum(n[:p+1])
def _n2(p, n):
return np.sum(n[p:k])
def _mean_at(p, n):
N1 = _n1(p, n)
N2 = _n2(p, n)
return (N1**2 + N2**2 - np.sum(n**2) - n.iloc[p]**2)/4
def _var_at(p, n):
N1 = _n1(p, n)
N2 = _n2(p, n)
N = np.sum(n)
var = (2 * (N1**3 + N2**3) + 3 * (N1**2 + N2**2) -\
np.sum(n**2 * (2*n + 3)) - n.iloc[p]**2 * (2 * n.iloc[p] + 3) +\
12. * n.iloc[p] * N1 * N2 - 12. * n.iloc[p] ** 2 * N) / 72.
return var
if p:
if (x.groupby(val_col).count() > 1).any().any():
print("Ties are present")
U = _ustat(Rij, x[group_col], k)
est = _ap(p, U)
mean = _mean_at(p, n)
sd = np.sqrt(_var_at(p, n))
stat = (est - mean)/sd
p_value = ss.norm.sf(stat)
else:
U = _ustat(Rij, x[group_col], k)
Ap = np.array([_ap(i, U) for i in range(k)]).ravel()
mean = np.array([_mean_at(i, n) for i in range(k)]).ravel()
var = np.array([_var_at(i, n) for i in range(k)]).ravel()
A = (Ap - mean) / np.sqrt(var)
stat = np.max(A)
p = A == stat
est = None
mt = []
for i in range(n_perm):
ix = Series(np.random.permutation(Rij))
Uix = _ustat(ix, x[group_col], k)
Apix = np.array([_ap(i, Uix) for i in range(k)])
Astarix = (Apix - mean) / np.sqrt(var)
mt.append(np.max(Astarix))
mt = np.array(mt)
p_value = mt[mt > stat] / n_perm
return p_value, stat
def posthoc_vanwaerden(a, val_col=None, group_col=None, sort=True, p_adjust=None):
'''Van der Waerden's test for pairwise multiple comparisons between group
levels. See references for additional information [1]_, [2]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
For one-factorial designs with samples that do not meet the assumptions for
one-way-ANOVA and subsequent post hoc tests, the van der Waerden test using
normal scores can be employed. Provided that significant differences were
detected by this global test, one may be interested in applying post hoc
tests according to van der Waerden for pairwise multiple comparisons of the
group levels.
There is no tie correction applied in this function.
References
----------
.. [1] W. J. Conover and R. L. Iman (1979), On multiple-comparisons procedures,
Tech. Rep. LA-7677-MS, Los Alamos Scientific Laboratory.
.. [2] B. L. van der Waerden (1952) Order tests for the two-sample problem and
their power, Indagationes Mathematicae, 14, 453-458.
Examples
--------
>>> x = np.array([[10,'a'], [59,'a'], [76,'b'], [10, 'b']])
>>> sp.posthoc_vanwaerden(x, val_col = 0, group_col = 1)
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = x[_group_col].unique()
n = x[_val_col].size
k = groups.size
r = ss.rankdata(x[_val_col])
x['z_scores'] = ss.norm.ppf(r / (n + 1))
aj = x.groupby(_group_col)['z_scores'].sum()
nj = x.groupby(_group_col)['z_scores'].count()
s2 = (1. / (n - 1.)) * (x['z_scores'] ** 2.).sum()
sts = (1. / s2) * np.sum(aj ** 2. / nj)
param = k - 1
A = aj / nj
vs = np.zeros((k, k), dtype=np.float)
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
def compare_stats(i, j):
dif = np.abs(A[groups[i]] - A[groups[j]])
B = 1. / nj[groups[i]] + 1. / nj[groups[j]]
tval = dif / np.sqrt(s2 * (n - 1. - sts)/(n - k) * B)
pval = 2. * ss.t.sf(np.abs(tval), df = n - k)
return pval
for i, j in combs:
vs[i, j] = compare_stats(i, j)
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_ttest(a, val_col=None, group_col=None, pool_sd=False, equal_var=True, p_adjust=None, sort=True):
'''Pairwise T test for multiple comparisons of independent groups. May be
used after a parametric ANOVA to do pairwise comparisons.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame. Array must be two-dimensional.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
equal_var : bool, optional
If True (default), perform a standard independent test
that assumes equal population variances [1]_.
If False, perform Welch's t-test, which does not assume equal
population variance [2]_.
pool_sd : bool, optional
Calculate a common SD for all groups and use that for all
comparisons (this can be useful if some groups are small).
This method does not actually call scipy ttest_ind() function,
so extra arguments are ignored. Default is False.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by group_col or not. Recommended
unless you sort your data manually.
Returns
-------
Numpy ndarray or pandas DataFrame of p values depending on input
data type.
References
----------
.. [1] http://en.wikipedia.org/wiki/T-test#Independent_two-sample_t-test
.. [2] http://en.wikipedia.org/wiki/Welch%27s_t_test
Examples
--------
>>> x = [[1,2,3,5,1], [12,31,54, np.nan], [10,12,6,74,11]]
>>> sp.posthoc_ttest(x, p_adjust = 'holm')
array([[-1. , 0.04600899, 0.31269089],
[ 0.04600899, -1. , 0.6327077 ],
[ 0.31269089, 0.6327077 , -1. ]])
'''
if isinstance(a, DataFrame):
x = a.copy()
if not sort:
x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
x.sort_values(by=[group_col, val_col], ascending=True, inplace=True)
x_lens = x.groupby(by=group_col)[val_col].count().values
x_lens_cumsum = np.insert(np.cumsum(x_lens), 0, 0)[:-1]
x_grouped = np.array([x[val_col][j:(j + x_lens[i])] for i, j in enumerate(x_lens_cumsum)])
#x_grouped = [x.loc[v, val_col].values.tolist() for g, v in x.groupby(group_col, sort=False).groups.items()]
else:
x = np.array(a)
x_grouped = np.array([np.asarray(a)[~np.isnan(a)] for a in x])
x_lens = np.asarray([len(a) for a in x_grouped])
x_lens_cumsum = np.insert(np.cumsum(x_lens), 0, 0)[:-1]
if any(x_lens == 0):
raise ValueError("All groups must contain data")
x_len = len(x_grouped)
vs = np.zeros((x_len, x_len), dtype=np.float)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
def compare_pooled(i, j):
diff = x_means[i] - x_means[j]
se_diff = pooled_sd * np.sqrt(1 / x_lens[i] + 1 / x_lens[j])
t_value = diff / se_diff
return 2 * ss.t.cdf(-np.abs(t_value), x_totaldegf)
combs = it.combinations(range(x_len), 2)
if pool_sd:
x_means = np.asarray([np.mean(xi) for xi in x_grouped])
x_sd = np.asarray([np.std(xi, ddof=1) for xi in x_grouped])
x_degf = x_lens - 1
x_totaldegf = np.sum(x_degf)
pooled_sd = np.sqrt(np.sum(x_sd ** 2 * x_degf) / x_totaldegf)
for i, j in combs:
vs[i, j] = compare_pooled(i, j)
else:
for i,j in combs:
vs[i, j] = ss.ttest_ind(x_grouped[i], x_grouped[j], equal_var=equal_var)[1]
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
if isinstance(x, DataFrame):
groups_unique = x[group_col].unique()
return DataFrame(vs, index=groups_unique, columns=groups_unique)
else:
return vs
def posthoc_tukey_hsd(x, g, alpha=0.05):
'''Pairwise comparisons with TukeyHSD confidence intervals. This is a
convenience function to make statsmodels `pairwise_tukeyhsd` method more
applicable for further use.
Parameters
----------
x : array_like or pandas Series object, 1d
An array, any object exposing the array interface, containing dependent
variable values (test or response variable). Values should have a
non-nominal scale. NaN values will cause an error (please handle
manually).
g : array_like or pandas Series object, 1d
An array, any object exposing the array interface, containing
independent variable values (grouping or predictor variable). Values
should have a nominal scale (categorical).
alpha : float, optional
Significance level for the test. Default is 0.05.
Returns
-------
result : pandas DataFrame
DataFrame with 0, 1, and -1 values, where 0 is False (not significant),
1 is True (significant), and -1 is for diagonal elements.
Examples
--------
>>> x = [[1,2,3,4,5], [35,31,75,40,21], [10,6,9,6,1]]
>>> g = [['a'] * 5, ['b'] * 5, ['c'] * 5]
>>> sp.posthoc_tukey_hsd(np.concatenate(x), np.concatenate(g))
'''
result = pairwise_tukeyhsd(x, g, alpha=0.05)
groups = np.array(result.groupsunique, dtype=np.str)
groups_len = len(groups)
vs = np.zeros((groups_len, groups_len), dtype=np.int)
for a in result.summary()[1:]:
a0 = str(a[0])
a1 = str(a[1])
a0i = np.where(groups == a0)[0][0]
a1i = np.where(groups == a1)[0][0]
vs[a0i, a1i] = 1 if str(a[5]) == 'True' else 0
vs = np.triu(vs)
np.fill_diagonal(vs, -1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[tri_lower] = vs.T[tri_lower]
return DataFrame(vs, index=groups, columns=groups)
def posthoc_mannwhitney(a, val_col=None, group_col=None, use_continuity=True, alternative='two-sided', p_adjust=None, sort=True):
'''Pairwise comparisons with Mann-Whitney rank test.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame. Array must be two-dimensional.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
use_continuity : bool, optional
Whether a continuity correction (1/2.) should be taken into account.
Default is True.
alternative : ['two-sided', 'less', or 'greater'], optional
Whether to get the p-value for the one-sided hypothesis
('less' or 'greater') or for the two-sided hypothesis ('two-sided').
Defaults to 'two-sided'.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by group_col or not. Recommended
unless you sort your data manually.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
Refer to `scipy.stats.mannwhitneyu` reference page for further details.
Examples
--------
>>> x = [[1,2,3,4,5], [35,31,75,40,21], [10,6,9,6,1]]
>>> sp.posthoc_mannwhitney(x, p_adjust = 'holm')
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col, _val_col], ascending=True, inplace=True)
groups = np.unique(x[_group_col])
x_len = groups.size
vs = np.zeros((x_len, x_len))
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(x_len), 2)
for i,j in combs:
vs[i, j] = ss.mannwhitneyu(x.loc[x[_group_col] == groups[i], _val_col],
x.loc[x[_group_col] == groups[j], _val_col],
use_continuity=use_continuity,
alternative=alternative)[1]
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_wilcoxon(a, val_col=None, group_col=None, zero_method='wilcox', correction=False, p_adjust=None, sort=False):
'''Pairwise comparisons with Wilcoxon signed-rank test. It is a non-parametric
version of the paired T-test for use with non-parametric ANOVA.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame. Array must be two-dimensional.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
zero_method : string, {"pratt", "wilcox", "zsplit"}, optional
"pratt": Pratt treatment, includes zero-differences in the ranking
process (more conservative)
"wilcox": Wilcox treatment, discards all zero-differences
"zsplit": Zero rank split, just like Pratt, but spliting the zero rank
between positive and negative ones
correction : bool, optional
If True, apply continuity correction by adjusting the Wilcoxon rank
statistic by 0.5 towards the mean value when computing the z-statistic.
Default is False.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by group_col and val_col or not.
Default is False.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
Refer to `scipy.stats.wilcoxon` reference page for further details.
Examples
--------
>>> x = [[1,2,3,4,5], [35,31,75,40,21], [10,6,9,6,1]]
>>> sp.posthoc_wilcoxon(x)
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
#x.sort_values(by=[_group_col, _val_col], ascending=True, inplace=True)
groups = np.unique(x[_group_col])
x_len = groups.size
vs = np.zeros((x_len, x_len))
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(x_len), 2)
for i,j in combs:
vs[i, j] = ss.wilcoxon(x.loc[x[_group_col] == groups[i], _val_col],
x.loc[x[_group_col] == groups[j], _val_col],
zero_method=zero_method, correction=correction)[1]
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method=p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_scheffe(a, val_col=None, group_col=None, sort=False, p_adjust=None):
'''Scheffe's all-pairs comparisons test for normally distributed data with equal
group variances. For all-pairs comparisons in an one-factorial layout with
normally distributed residuals and equal variances Scheffe's test can be
performed with parametric ANOVA [1]_, [2]_, [3]_.
A total of m = k(k-1)/2 hypotheses can be tested.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
The p values are computed from the F-distribution.
References
----------
.. [1] J. Bortz (1993) Statistik für Sozialwissenschaftler. 4. Aufl., Berlin:
Springer.
.. [2] L. Sachs (1997) Angewandte Statistik, New York: Springer.
.. [3] H. Scheffe (1953) A Method for Judging all Contrasts in the Analysis
of Variance. Biometrika 40, 87-110.
Examples
--------
>>> import scikit_posthocs as sp
>>> import pandas as pd
>>> x = pd.DataFrame({"a": [1,2,3,5,1], "b": [12,31,54,62,12], "c": [10,12,6,74,11]})
>>> x = x.melt(var_name='groups', value_name='values')
>>> sp.posthoc_scheffe(x, val_col='values', group_col='groups')
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = np.unique(x[_group_col])
x_grouped = x.groupby(_group_col)[_val_col]
ni = x_grouped.count()
n = ni.sum()
xi = x_grouped.mean()
si = x_grouped.var()
sin = 1. / (n - groups.size) * np.sum(si * (ni - 1.))
def compare(i, j):
dif = xi.loc[i] - xi.loc[j]
A = sin * (1. / ni[i] + 1. / ni[j]) * (groups.size - 1.)
f_val = dif ** 2. / A
return f_val
vs = np.zeros((groups.size, groups.size), dtype=np.float)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(groups.size), 2)
for i,j in combs:
vs[i, j] = compare(groups[i], groups[j])
vs[tri_lower] = vs.T[tri_lower]
p_values = ss.f.sf(vs, groups.size - 1., n - groups.size)
np.fill_diagonal(p_values, -1)
return DataFrame(p_values, index=groups, columns=groups)
def posthoc_tamhane(a, val_col=None, group_col=None, welch=True, sort=False):
'''Tamhane's T2 all-pairs comparison test for normally distributed data with
unequal variances. Tamhane's T2 test can be performed for all-pairs
comparisons in an one-factorial layout with normally distributed residuals
but unequal groups variances. A total of m = k(k-1)/2 hypotheses can be
tested. The null hypothesis is tested in the two-tailed test against the
alternative hypothesis [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
welch : bool, optional
If True, use Welch's approximate solution for calculating the degree of
freedom. T2 test uses the usual df = N - 2 approximation.
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
The p values are computed from the t-distribution and adjusted according to
Dunn-Sidak.
References
----------
.. [1] A.C. Tamhane (1979), A Comparison of Procedures for Multiple Comparisons of
Means with Unequal Variances. Journal of the American Statistical Association,
74, 471-480.
Examples
--------
>>> import scikit_posthocs as sp
>>> import pandas as pd
>>> x = pd.DataFrame({"a": [1,2,3,5,1], "b": [12,31,54,62,12], "c": [10,12,6,74,11]})
>>> x = x.melt(var_name='groups', value_name='values')
>>> sp.posthoc_tamhane(x, val_col='values', group_col='groups')
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = x[_group_col].unique()
x_grouped = x.groupby(_group_col)[_val_col]
ni = x_grouped.count()
n = ni.sum()
xi = x_grouped.mean()
si = x_grouped.var()
sin = 1. / (n - groups.size) * np.sum(si * (ni - 1))
def compare(i, j):
dif = xi[i] - xi[j]
A = si[i] / ni[i] + si[j] / ni[j]
t_val = dif / np.sqrt(A)
if welch:
df = A ** 2. / (si[i] ** 2. / (ni[i] ** 2. * (ni[i] - 1.)) + si[j] ** 2. / (ni[j] ** 2. * (ni[j] - 1.)))
else:
## checks according to Tamhane (1979, p. 474)
ok1 = (9./10. <= ni[i]/ni[j]) and (ni[i]/ni[j] <= 10./9.)
ok2 = (9./10. <= (s2i[i] / ni[i]) / (s2i[j] / ni[j])) and ((s2i[i] / ni[i]) / (s2i[j] / ni[j]) <= 10./9.)
ok3 = (4./5. <= ni[i]/ni[j]) and (ni[i]/ni[j] <= 5./4.) and (1./2. <= (s2i[i] / ni[i]) / (s2i[j] / ni[j])) and ((s2i[i] / ni[i]) / (s2i[j] / ni[j]) <= 2.)
ok4 = (2./3. <= ni[i]/ni[j]) and (ni[i]/ni[j] <= 3./2.) and (3./4. <= (s2i[i] / ni[i]) / (s2i[j] / ni[j])) and ((s2i[i] / ni[i]) / (s2i[j] / ni[j]) <= 4./3.)
OK = any(ok1, ok2, ok3, ok4)
if not OK:
print("Sample sizes or standard errors are not balanced. T2 test is recommended.")
df = ni[i] + ni[j] - 2.
p_val = 2. * ss.t.sf(np.abs(t_val), df=df)
return p_val
vs = np.zeros((groups.size, groups.size), dtype=np.float)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(groups.size), 2)
for i,j in combs:
vs[i, j] = compare(groups[i], groups[j])
vs[tri_upper] = 1. - (1. - vs[tri_upper]) ** groups.size
vs[tri_lower] = vs.T[tri_lower]
vs[vs > 1] = 1
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_tukey(a, val_col = None, group_col = None, sort = False):
'''Performs Tukey's all-pairs comparisons test for normally distributed data
with equal group variances. For all-pairs comparisons in an
one-factorial layout with normally distributed residuals and equal variances
Tukey's test can be performed. A total of m = k(k-1)/2 hypotheses can be
tested. The null hypothesis is tested in the two-tailed test against
the alternative hypothesis [1]_, [2]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
The p values are computed from the Tukey-distribution.
References
----------
.. [1] L. Sachs (1997) Angewandte Statistik, New York: Springer.
.. [2] J. Tukey (1949) Comparing Individual Means in the Analysis of Variance,
Biometrics 5, 99-114.
Examples
--------
>>> import scikit_posthocs as sp
>>> import pandas as pd
>>> x = pd.DataFrame({"a": [1,2,3,5,1], "b": [12,31,54,62,12], "c": [10,12,6,74,11]})
>>> x = x.melt(var_name='groups', value_name='values')
>>> sp.posthoc_tukey(x, val_col='values', group_col='groups')
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = np.unique(x[_group_col])
x_grouped = x.groupby(_group_col)[_val_col]
ni = x_grouped.count()
n = ni.sum()
xi = x_grouped.mean()
si = x_grouped.var()
sin = 1. / (n - groups.size) * np.sum(si * (ni - 1))
def compare(i, j):
dif = xi[i] - xi[j]
A = sin * 0.5 * (1. / ni.loc[i] + 1. / ni.loc[j])
q_val = dif / np.sqrt(A)
return q_val
vs = np.zeros((groups.size, groups.size), dtype=np.float)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(groups.size), 2)
for i,j in combs:
vs[i, j] = compare(groups[i], groups[j])
vs[tri_upper] = psturng(np.abs(vs[tri_upper]), groups.size, n - groups.size)
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_dscf(a, val_col=None, group_col=None, sort=False):
'''Dwass, Steel, Critchlow and Fligner all-pairs comparison test for a
one-factorial layout with non-normally distributed residuals. As opposed to
the all-pairs comparison procedures that depend on Kruskal ranks, the DSCF
test is basically an extension of the U-test as re-ranking is conducted for
each pairwise test [1]_, [2]_, [3]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
Pandas DataFrame containing p values.
Notes
-----
The p values are computed from the Tukey-distribution.
References
----------
.. [1] Douglas, C. E., Fligner, A. M. (1991) On distribution-free multiple
comparisons in the one-way analysis of variance, Communications in
Statistics - Theory and Methods, 20, 127-139.
.. [2] Dwass, M. (1960) Some k-sample rank-order tests. In Contributions to
Probability and Statistics, Edited by: I. Olkin, Stanford: Stanford
University Press.
.. [3] Steel, R. G. D. (1960) A rank sum test for comparing all pairs of
treatments, Technometrics, 2, 197-207.
Examples
--------
>>> import scikit_posthocs as sp
>>> import pandas as pd
>>> x = pd.DataFrame({"a": [1,2,3,5,1], "b": [12,31,54,62,12], "c": [10,12,6,74,11]})
>>> x = x.melt(var_name='groups', value_name='values')
>>> sp.posthoc_dscf(x, val_col='values', group_col='groups')
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = np.unique(x[_group_col])
x_grouped = x.groupby(_group_col)[_val_col]
n = x_grouped.count()
k = groups.size
def get_ties(x):
t = x.value_counts().values
c = np.sum((t ** 3 - t) / 12.)
return c
def compare(i, j):
ni = n.loc[i]
nj = n.loc[j]
x_raw = x.loc[(x[_group_col] == i) | (x[_group_col] == j)].copy()
x_raw['ranks'] = x_raw.loc[:, _val_col].rank()
r = x_raw.groupby(_group_col)['ranks'].sum().loc[[i, j]]
u = np.array([nj * ni + (nj * (nj + 1) / 2),
nj * ni + (ni * (ni + 1) / 2)]) - r
u_min = np.min(u)
s = ni + nj
var = (nj*ni/(s*(s - 1.))) * ((s**3 - s)/12. - get_ties(x_raw['ranks']))
p = np.sqrt(2.) * (u_min - nj * ni / 2.) / np.sqrt(var)
return p
vs = np.zeros((k, k))
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(k), 2)
for i, j in combs:
vs[i, j] = compare(groups[i], groups[j])
vs[tri_upper] = psturng(np.abs(vs[tri_upper]), k, np.inf)
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
|
maximtrp/scikit-posthocs | scikit_posthocs/_posthocs.py | posthoc_anderson | python | def posthoc_anderson(a, val_col=None, group_col=None, midrank=True, sort=False, p_adjust=None):
'''Anderson-Darling Pairwise Test for k-samples. Tests the null hypothesis
that k-samples are drawn from the same population without having to specify
the distribution function of that population [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
midrank : bool, optional
Type of Anderson-Darling test which is computed. If set to True (default), the
midrank test applicable to continuous and discrete populations is performed. If
False, the right side empirical distribution is used.
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values. See statsmodels.sandbox.stats.multicomp for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
result : pandas DataFrame
P values.
References
----------
.. [1] F.W. Scholz, M.A. Stephens (1987), K-Sample Anderson-Darling Tests,
Journal of the American Statistical Association, Vol. 82, pp. 918-924.
Examples
--------
>>> x = np.array([[2.9, 3.0, 2.5, 2.6, 3.2], [3.8, 2.7, 4.0, 2.4], [2.8, 3.4, 3.7, 2.2, 2.0]])
>>> sp.posthoc_anderson(x)
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = x[_group_col].unique()
k = groups.size
vs = np.zeros((k, k), dtype=np.float)
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
for i, j in combs:
vs[i, j] = ss.anderson_ksamp([x.loc[x[_group_col] == groups[i], _val_col], x.loc[x[_group_col] == groups[j], _val_col]])[2]
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups) | Anderson-Darling Pairwise Test for k-samples. Tests the null hypothesis
that k-samples are drawn from the same population without having to specify
the distribution function of that population [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
midrank : bool, optional
Type of Anderson-Darling test which is computed. If set to True (default), the
midrank test applicable to continuous and discrete populations is performed. If
False, the right side empirical distribution is used.
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values. See statsmodels.sandbox.stats.multicomp for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
result : pandas DataFrame
P values.
References
----------
.. [1] F.W. Scholz, M.A. Stephens (1987), K-Sample Anderson-Darling Tests,
Journal of the American Statistical Association, Vol. 82, pp. 918-924.
Examples
--------
>>> x = np.array([[2.9, 3.0, 2.5, 2.6, 3.2], [3.8, 2.7, 4.0, 2.4], [2.8, 3.4, 3.7, 2.2, 2.0]])
>>> sp.posthoc_anderson(x) | train | https://github.com/maximtrp/scikit-posthocs/blob/5476b09e2a325cd4e31c0b0bc6906ab5cd77fc5d/scikit_posthocs/_posthocs.py#L1204-L1287 | [
"def __convert_to_df(a, val_col=None, group_col=None, val_id=None, group_id=None):\n\n '''Hidden helper method to create a DataFrame with input data for further\n processing.\n\n Parameters\n ----------\n a : array_like or pandas DataFrame object\n An array, any object exposing the array inter... | # -*- coding: utf-8 -*-
import numpy as np
import scipy.stats as ss
import itertools as it
from statsmodels.sandbox.stats.multicomp import multipletests
from statsmodels.stats.multicomp import pairwise_tukeyhsd
from statsmodels.stats.libqsturng import psturng
from pandas import DataFrame, Categorical, Series
def __convert_to_df(a, val_col=None, group_col=None, val_id=None, group_id=None):
'''Hidden helper method to create a DataFrame with input data for further
processing.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas DataFrame.
Array must be two-dimensional. Second dimension may vary,
i.e. groups may have different lengths.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
val_id : int, optional
Index of a column that contains dependent variable values (test or
response variable). Should be specified if a NumPy ndarray is used as an
input. It will be inferred from data, if not specified.
group_id : int, optional
Index of a column that contains independent variable values (grouping or
predictor variable). Should be specified if a NumPy ndarray is used as
an input. It will be inferred from data, if not specified.
Returns
-------
x : pandas DataFrame
DataFrame with input data, `val_col` column contains numerical values and
`group_col` column contains categorical values.
val_col : str
Name of a DataFrame column that contains dependent variable values (test
or response variable).
group_col : str
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable).
Notes
-----
Inferrence algorithm for determining `val_id` and `group_id` args is rather
simple, so it is better to specify them explicitly to prevent errors.
'''
if not group_col:
group_col = 'groups'
if not val_col:
val_col = 'vals'
if isinstance(a, DataFrame):
x = a.copy()
if not {group_col, val_col}.issubset(a.columns):
raise ValueError('Specify correct column names using `group_col` and `val_col` args')
return x, val_col, group_col
elif isinstance(a, list) or (isinstance(a, np.ndarray) and not a.shape.count(2)):
grps_len = map(len, a)
grps = list(it.chain(*[[i+1] * l for i, l in enumerate(grps_len)]))
vals = list(it.chain(*a))
return DataFrame({val_col: vals, group_col: grps}), val_col, group_col
elif isinstance(a, np.ndarray):
# cols ids not defined
# trying to infer
if not(all([val_id, group_id])):
if np.argmax(a.shape):
a = a.T
ax = [np.unique(a[:, 0]).size, np.unique(a[:, 1]).size]
if np.asscalar(np.diff(ax)):
__val_col = np.argmax(ax)
__group_col = np.argmin(ax)
else:
raise ValueError('Cannot infer input format.\nPlease specify `val_id` and `group_id` args')
cols = {__val_col: val_col,
__group_col: group_col}
else:
cols = {val_id: val_col,
group_id: group_col}
cols_vals = dict(sorted(cols.items())).values()
return DataFrame(a, columns=cols_vals), val_col, group_col
def __convert_to_block_df(a, y_col=None, group_col=None, block_col=None, melted=False):
if isinstance(a, DataFrame) and not melted:
x = a.copy(deep=True)
group_col = 'groups'
block_col = 'blocks'
y_col = 'y'
x.columns.name = group_col
x.index.name = block_col
x = x.reset_index().melt(id_vars=block_col, var_name=group_col, value_name=y_col)
elif melted:
x = DataFrame.from_dict({'groups': a[group_col],
'blocks': a[block_col],
'y': a[y_col]})
elif not isinstance(a, DataFrame):
x = np.array(a)
x = DataFrame(x, index=np.arange(x.shape[0]), columns=np.arange(x.shape[1]))
if not melted:
group_col = 'groups'
block_col = 'blocks'
y_col = 'y'
x.columns.name = group_col
x.index.name = block_col
x = x.reset_index().melt(id_vars=block_col, var_name=group_col, value_name=y_col)
else:
x.columns[group_col] = 'groups'
x.columns[block_col] = 'blocks'
x.columns[y_col] = 'y'
group_col = 'groups'
block_col = 'blocks'
y_col = 'y'
return x, 'y', 'groups', 'blocks'
def posthoc_conover(a, val_col=None, group_col=None, p_adjust=None, sort=True):
'''Post hoc pairwise test for multiple comparisons of mean rank sums
(Conover's test). May be used after Kruskal-Wallis one-way analysis of
variance by ranks to do pairwise comparisons [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas DataFrame.
Array must be two-dimensional. Second dimension may vary,
i.e. groups may have different lengths.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
p_adjust : str, optional
Method for adjusting p values. See `statsmodels.sandbox.stats.multicomp`
for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by `group_col` or not. Recommended
unless you sort your data manually.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
A tie correction are employed according to Conover [1]_.
References
----------
.. [1] W. J. Conover and R. L. Iman (1979), On multiple-comparisons procedures,
Tech. Rep. LA-7677-MS, Los Alamos Scientific Laboratory.
Examples
--------
>>> x = [[1,2,3,5,1], [12,31,54, np.nan], [10,12,6,74,11]]
>>> sp.posthoc_conover(x, p_adjust = 'holm')
'''
def compare_conover(i, j):
diff = np.abs(x_ranks_avg.loc[i] - x_ranks_avg.loc[j])
B = (1. / x_lens.loc[i] + 1. / x_lens.loc[j])
D = (n - 1. - H_cor) / (n - x_len)
t_value = diff / np.sqrt(S2 * B * D)
p_value = 2. * ss.t.sf(np.abs(t_value), df = n - x_len)
return p_value
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col, _val_col], ascending=True, inplace=True)
n = len(x.index)
x_groups_unique = np.unique(x[_group_col])
x_len = x_groups_unique.size
x_lens = x.groupby(_group_col)[_val_col].count()
x['ranks'] = x[_val_col].rank()
x_ranks_avg = x.groupby(_group_col)['ranks'].mean()
x_ranks_sum = x.groupby(_group_col)['ranks'].sum()
# ties
vals = x.groupby('ranks').count()[_val_col].values
tie_sum = np.sum(vals[vals != 1] ** 3 - vals[vals != 1])
tie_sum = 0 if not tie_sum else tie_sum
x_ties = np.min([1., 1. - tie_sum / (n ** 3. - n)])
H = (12. / (n * (n + 1.))) * np.sum(x_ranks_sum**2 / x_lens) - 3. * (n + 1.)
H_cor = H / x_ties
if x_ties == 1:
S2 = n * (n + 1.) / 12.
else:
S2 = (1. / (n - 1.)) * (np.sum(x['ranks'] ** 2.) - (n * (((n + 1.)**2.) / 4.)))
vs = np.zeros((x_len, x_len))
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(x_len), 2)
for i, j in combs:
vs[i, j] = compare_conover(x_groups_unique[i], x_groups_unique[j])
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=x_groups_unique, columns=x_groups_unique)
def posthoc_dunn(a, val_col=None, group_col=None, p_adjust=None, sort=True):
'''Post hoc pairwise test for multiple comparisons of mean rank sums
(Dunn's test). May be used after Kruskal-Wallis one-way analysis of
variance by ranks to do pairwise comparisons [1]_, [2]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas DataFrame.
Array must be two-dimensional. Second dimension may vary,
i.e. groups may have different lengths.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
p_adjust : str, optional
Method for adjusting p values. See `statsmodels.sandbox.stats.multicomp`
for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by group_col or not. Recommended
unless you sort your data manually.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
A tie correction will be employed according to Glantz (2012).
References
----------
.. [1] O.J. Dunn (1964). Multiple comparisons using rank sums.
Technometrics, 6, 241-252.
.. [2] S.A. Glantz (2012), Primer of Biostatistics. New York: McGraw Hill.
Examples
--------
>>> x = [[1,2,3,5,1], [12,31,54, np.nan], [10,12,6,74,11]]
>>> sp.posthoc_dunn(x, p_adjust = 'holm')
'''
def compare_dunn(i, j):
diff = np.abs(x_ranks_avg.loc[i] - x_ranks_avg.loc[j])
A = n * (n + 1.) / 12.
B = (1. / x_lens.loc[i] + 1. / x_lens.loc[j])
z_value = diff / np.sqrt((A - x_ties) * B)
p_value = 2. * ss.norm.sf(np.abs(z_value))
return p_value
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col, _val_col], ascending=True, inplace=True)
n = len(x.index)
x_groups_unique = np.unique(x[_group_col])
x_len = x_groups_unique.size
x_lens = x.groupby(_group_col)[_val_col].count()
x['ranks'] = x[_val_col].rank()
x_ranks_avg = x.groupby(_group_col)['ranks'].mean()
# ties
vals = x.groupby('ranks').count()[_val_col].values
tie_sum = np.sum(vals[vals != 1] ** 3 - vals[vals != 1])
tie_sum = 0 if not tie_sum else tie_sum
x_ties = tie_sum / (12. * (n - 1))
vs = np.zeros((x_len, x_len))
combs = it.combinations(range(x_len), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
for i,j in combs:
vs[i, j] = compare_dunn(x_groups_unique[i], x_groups_unique[j])
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=x_groups_unique, columns=x_groups_unique)
def posthoc_nemenyi(a, val_col=None, group_col=None, dist='chi', sort=True):
'''Post hoc pairwise test for multiple comparisons of mean rank sums
(Nemenyi's test). May be used after Kruskal-Wallis one-way analysis of
variance by ranks to do pairwise comparisons [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame. Array must be two-dimensional. Second dimension may vary,
i.e. groups may have different lengths.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
dist : str, optional
Method for determining the p value. The default distribution is "chi"
(chi-squared), else "tukey" (studentized range).
sort : bool, optional
Specifies whether to sort DataFrame by group_col or not. Recommended
unless you sort your data manually.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
A tie correction will be employed according to Glantz (2012).
References
----------
.. [1] Lothar Sachs (1997), Angewandte Statistik. Berlin: Springer.
Pages: 395-397, 662-664.
Examples
--------
>>> x = [[1,2,3,5,1], [12,31,54, np.nan], [10,12,6,74,11]]
>>> sp.posthoc_nemenyi(x)
'''
def compare_stats_chi(i, j):
diff = np.abs(x_ranks_avg.loc[i] - x_ranks_avg.loc[j])
A = n * (n + 1.) / 12.
B = (1. / x_lens.loc[i] + 1. / x_lens.loc[j])
chi = diff ** 2. / (A * B)
return chi
def compare_stats_tukey(i, j):
diff = np.abs(x_ranks_avg.loc[i] - x_ranks_avg.loc[j])
B = (1. / x_lens.loc[i] + 1. / x_lens.loc[j])
q = diff / np.sqrt((n * (n + 1.) / 12.) * B)
return q
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col, _val_col], ascending=True, inplace=True)
n = len(x.index)
x_groups_unique = np.unique(x[_group_col])
x_len = x_groups_unique.size
x_lens = x.groupby(_group_col)[_val_col].count()
x['ranks'] = x[_val_col].rank()
x_ranks_avg = x.groupby(_group_col)['ranks'].mean()
# ties
vals = x.groupby('ranks').count()[_val_col].values
tie_sum = np.sum(vals[vals != 1] ** 3 - vals[vals != 1])
tie_sum = 0 if not tie_sum else tie_sum
x_ties = np.min([1., 1. - tie_sum / (n ** 3. - n)])
vs = np.zeros((x_len, x_len))
combs = it.combinations(range(x_len), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
if dist == 'chi':
for i,j in combs:
vs[i, j] = compare_stats_chi(x_groups_unique[i], x_groups_unique[j]) / x_ties
vs[tri_upper] = ss.chi2.sf(vs[tri_upper], x_len - 1)
elif dist == 'tukey':
for i,j in combs:
vs[i, j] = compare_stats_tukey(x_groups_unique[i], x_groups_unique[j]) * np.sqrt(2.)
vs[tri_upper] = psturng(vs[tri_upper], x_len, np.inf)
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=x_groups_unique, columns=x_groups_unique)
def posthoc_nemenyi_friedman(a, y_col=None, block_col=None, group_col=None, melted=False, sort=False):
'''Calculate pairwise comparisons using Nemenyi post hoc test for
unreplicated blocked data. This test is usually conducted post hoc if
significant results of the Friedman's test are obtained. The statistics
refer to upper quantiles of the studentized range distribution (Tukey) [1]_,
[2]_, [3]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of
block design, i.e. rows are blocks, and columns are groups. In this
case you do not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (strings).
y_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
A one-way ANOVA with repeated measures that is also referred to as ANOVA
with unreplicated block design can also be conducted via Friedman's
test. The consequent post hoc pairwise multiple comparison test
according to Nemenyi is conducted with this function.
This function does not test for ties.
References
----------
.. [1] J. Demsar (2006), Statistical comparisons of classifiers over
multiple data sets, Journal of Machine Learning Research, 7, 1-30.
.. [2] P. Nemenyi (1963) Distribution-free Multiple Comparisons. Ph.D.
thesis, Princeton University.
.. [3] L. Sachs (1997), Angewandte Statistik. Berlin: Springer.
Pages: 668-675.
Examples
--------
>>> # Non-melted case, x is a block design matrix, i.e. rows are blocks
>>> # and columns are groups.
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_nemenyi_friedman(x)
'''
if melted and not all([block_col, group_col, y_col]):
raise ValueError('block_col, group_col, y_col should be explicitly specified if using melted data')
def compare_stats(i, j):
dif = np.abs(R[groups[i]] - R[groups[j]])
qval = dif / np.sqrt(k * (k + 1.) / (6. * n))
return qval
x, _y_col, _group_col, _block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
#if not sort:
# x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
# x[block_col] = Categorical(x[block_col], categories=x[block_col].unique(), ordered=True)
x.sort_values(by=[_group_col, _block_col], ascending=True, inplace=True)
x.dropna(inplace=True)
groups = x[_group_col].unique()
k = groups.size
n = x[_block_col].unique().size
x['mat'] = x.groupby(_block_col)[_y_col].rank()
R = x.groupby(_group_col)['mat'].mean()
vs = np.zeros((k, k))
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
for i, j in combs:
vs[i, j] = compare_stats(i, j)
vs *= np.sqrt(2.)
vs[tri_upper] = psturng(vs[tri_upper], k, np.inf)
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_conover_friedman(a, y_col=None, block_col=None, group_col=None, melted=False, sort=False, p_adjust=None):
'''Calculate pairwise comparisons using Conover post hoc test for unreplicated
blocked data. This test is usually conducted post hoc after
significant results of the Friedman test. The statistics refer to
the Student t distribution [1]_, [2]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of
block design, i.e. rows are blocks, and columns are groups. In this
case you do not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (strings).
y_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values. See statsmodels.sandbox.stats.multicomp
for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
'single-step' : uses Tukey distribution for multiple comparisons
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
A one-way ANOVA with repeated measures that is also referred to as ANOVA
with unreplicated block design can also be conducted via the
friedman.test. The consequent post hoc pairwise multiple comparison test
according to Conover is conducted with this function.
If y is a matrix, than the columns refer to the treatment and the rows
indicate the block.
References
----------
.. [1] W. J. Conover and R. L. Iman (1979), On multiple-comparisons procedures,
Tech. Rep. LA-7677-MS, Los Alamos Scientific Laboratory.
.. [2] W. J. Conover (1999), Practical nonparametric Statistics, 3rd. Edition,
Wiley.
Examples
--------
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_conover_friedman(x)
'''
if melted and not all([block_col, group_col, y_col]):
raise ValueError('block_col, group_col, y_col should be explicitly specified if using melted data')
def compare_stats(i, j):
dif = np.abs(R.loc[groups[i]] - R.loc[groups[j]])
tval = dif / np.sqrt(A) / np.sqrt(B)
pval = 2. * ss.t.sf(np.abs(tval), df = (m*n*k - k - n + 1))
return pval
def compare_tukey(i, j):
dif = np.abs(R.loc[groups[i]] - R.loc[groups[j]])
qval = np.sqrt(2.) * dif / (np.sqrt(A) * np.sqrt(B))
pval = psturng(qval, k, np.inf)
return pval
x, _y_col, _group_col, _block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
#if not sort:
# x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
# x[block_col] = Categorical(x[block_col], categories=x[block_col].unique(), ordered=True)
x.sort_values(by=[_group_col,_block_col], ascending=True, inplace=True)
x.dropna(inplace=True)
groups = np.unique(x[_group_col])
k = groups.size
n = x[_block_col].unique().size
x['mat'] = x.groupby(_block_col)[_y_col].rank()
R = x.groupby(_group_col)['mat'].sum()
A1 = (x['mat'] ** 2).sum()
m = 1
S2 = m/(m*k - 1.) * (A1 - m*k*n*(m*k + 1.)**2./4.)
T2 = 1 / S2 * (np.sum(R) - n * m * ((m * k + 1.) / 2.)**2.)
A = S2 * (2. * n * (m * k - 1.)) / ( m * n * k - k - n + 1.)
B = 1. - T2 / (n * (m * k - 1.))
vs = np.zeros((k, k))
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
if p_adjust == 'single-step':
for i, j in combs:
vs[i, j] = compare_tukey(i, j)
else:
for i, j in combs:
vs[i, j] = compare_stats(i, j)
if p_adjust is not None:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_npm_test(a, val_col=None, group_col=None, sort=False, p_adjust=None):
'''Calculate pairwise comparisons using Nashimoto and Wright's all-pairs
comparison procedure (NPM test) for simply ordered mean ranksums.
NPM test is basically an extension of Nemenyi's procedure for testing
increasingly ordered alternatives [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values. See `statsmodels.sandbox.stats.multicomp`
for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
The p values are estimated from the studentized range distribution. If
the medians are already increasingly ordered, than the NPM-test
simplifies to the ordinary Nemenyi test
References
----------
.. [1] Nashimoto, K., Wright, F.T., (2005), Multiple comparison procedures for
detecting differences in simply ordered means. Comput. Statist. Data
Anal. 48, 291--306.
Examples
--------
>>> x = np.array([[102,109,114,120,124],
[110,112,123,130,145],
[132,141,156,160,172]])
>>> sp.posthoc_npm_test(x)
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
#if not sort:
# x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = np.unique(x[_group_col])
x['ranks'] = x[_val_col].rank()
Ri = x.groupby(_group_col)['ranks'].mean()
ni = x.groupby(_group_col)[_val_col].count()
k = groups.size
n = x.shape[0]
sigma = np.sqrt(n * (n + 1) / 12.)
df = np.inf
def compare(m, u):
a = [(Ri.loc[groups[u]]-Ri.loc[groups[_mi]])/(sigma/np.sqrt(2)*np.sqrt(1./ni.loc[groups[_mi]] + 1./ni.loc[groups[u]])) for _mi in m]
return np.array(a)
stat = np.zeros((k, k))
for i in range(k-1):
for j in range(i+1, k):
u = j
m = np.arange(i, u)
tmp = compare(m, u)
stat[j, i] = np.max(tmp)
stat[stat < 0] = 0
p_values = psturng(stat, k, np.inf)
tri_upper = np.triu_indices(p_values.shape[0], 1)
tri_lower = np.tril_indices(p_values.shape[0], -1)
p_values[tri_lower] = p_values.T[tri_lower]
#if p_adjust:
# p_values[tri_upper] = multipletests(p_values[tri_upper], method = p_adjust)[1]
np.fill_diagonal(p_values, -1)
return DataFrame(p_values, index=groups, columns=groups)
def posthoc_siegel_friedman(a, y_col=None, block_col=None, group_col=None, melted=False, sort=False, p_adjust=None):
'''Siegel and Castellan's All-Pairs Comparisons Test for Unreplicated Blocked
Data. See authors' paper for additional information [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of
block design, i.e. rows are blocks, and columns are groups. In this
case you do not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (strings).
y_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values. See statsmodels.sandbox.stats.multicomp for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
For all-pairs comparisons in a two factorial unreplicated complete block design
with non-normally distributed residuals, Siegel and Castellan's test can be
performed on Friedman-type ranked data.
References
----------
.. [1] S. Siegel, N. J. Castellan Jr. (1988), Nonparametric Statistics for the
Behavioral Sciences. 2nd ed. New York: McGraw-Hill.
Examples
--------
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_siegel_friedman(x)
'''
if melted and not all([block_col, group_col, y_col]):
raise ValueError('block_col, group_col, y_col should be explicitly specified if using melted data')
def compare_stats(i, j):
dif = np.abs(R[groups[i]] - R[groups[j]])
zval = dif / np.sqrt(k * (k + 1.) / (6. * n))
return zval
x, y_col, group_col, block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
#if not sort:
# x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
# x[block_col] = Categorical(x[block_col], categories=x[block_col].unique(), ordered=True)
x.sort_values(by=[group_col,block_col], ascending=True, inplace=True)
x.dropna(inplace=True)
groups = x[group_col].unique()
k = groups.size
n = x[block_col].unique().size
x['mat'] = x.groupby(block_col)[y_col].rank()
R = x.groupby(group_col)['mat'].mean()
vs = np.zeros((k, k), dtype=np.float)
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
for i, j in combs:
vs[i, j] = compare_stats(i, j)
vs = 2. * ss.norm.sf(np.abs(vs))
vs[vs > 1] = 1.
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_miller_friedman(a, y_col=None, block_col=None, group_col=None, melted=False, sort=False):
'''Miller's All-Pairs Comparisons Test for Unreplicated Blocked Data.
The p-values are computed from the chi-square distribution [1]_, [2]_,
[3]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of
block design, i.e. rows are blocks, and columns are groups. In this
case you do not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (strings).
y_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
Pandas DataFrame containing p values.
Notes
-----
For all-pairs comparisons in a two factorial unreplicated complete block
design with non-normally distributed residuals, Miller's test can be
performed on Friedman-type ranked data.
References
----------
.. [1] J. Bortz J, G. A. Lienert, K. Boehnke (1990), Verteilungsfreie
Methoden in der Biostatistik. Berlin: Springerself.
.. [2] R. G. Miller Jr. (1996), Simultaneous statistical inference. New
York: McGraw-Hill.
.. [3] E. L. Wike (2006), Data Analysis. A Statistical Primer for Psychology
Students. New Brunswick: Aldine Transaction.
Examples
--------
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_miller_friedman(x)
'''
if melted and not all([block_col, group_col, y_col]):
raise ValueError('block_col, group_col, y_col should be explicitly specified if using melted data')
def compare_stats(i, j):
dif = np.abs(R[groups[i]] - R[groups[j]])
qval = dif / np.sqrt(k * (k + 1.) / (6. * n))
return qval
x, y_col, group_col, block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
#if not sort:
# x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
# x[block_col] = Categorical(x[block_col], categories=x[block_col].unique(), ordered=True)
x.sort_values(by=[group_col,block_col], ascending=True, inplace=True)
x.dropna(inplace=True)
groups = x[group_col].unique()
k = groups.size
n = x[block_col].unique().size
x['mat'] = x.groupby(block_col)[y_col].rank()
R = x.groupby(group_col)['mat'].mean()
vs = np.zeros((k, k), dtype=np.float)
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
for i, j in combs:
vs[i, j] = compare_stats(i, j)
vs = vs ** 2
vs = ss.chi2.sf(vs, k - 1)
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_durbin(a, y_col=None, block_col=None, group_col=None, melted=False, sort=False, p_adjust=None):
'''Pairwise post hoc test for multiple comparisons of rank sums according to
Durbin and Conover for a two-way balanced incomplete block design (BIBD). See
references for additional information [1]_, [2]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of block design,
i.e. rows are blocks, and columns are groups. In this case you do
not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (string).
y_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values. See statsmodels.sandbox.stats.multicomp
for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
Pandas DataFrame containing p values.
References
----------
.. [1] W. J. Conover and R. L. Iman (1979), On multiple-comparisons procedures,
Tech. Rep. LA-7677-MS, Los Alamos Scientific Laboratory.
.. [2] W. J. Conover (1999), Practical nonparametric Statistics,
3rd. edition, Wiley.
Examples
--------
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_durbin(x)
'''
if melted and not all([block_col, group_col, y_col]):
raise ValueError('block_col, group_col, y_col should be explicitly specified if using melted data')
def compare_stats(i, j):
dif = np.abs(Rj[groups[i]] - Rj[groups[j]])
tval = dif / denom
pval = 2. * ss.t.sf(np.abs(tval), df = df)
return pval
x, y_col, group_col, block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
if not sort:
x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
x[block_col] = Categorical(x[block_col], categories=x[block_col].unique(), ordered=True)
x.sort_values(by=[block_col, group_col], ascending=True, inplace=True)
x.dropna(inplace=True)
groups = x[group_col].unique()
t = len(groups)
b = x[block_col].unique().size
r = b
k = t
x['y_ranked'] = x.groupby(block_col)[y_col].rank()
Rj = x.groupby(group_col)['y_ranked'].sum()
A = (x['y_ranked'] ** 2).sum()
C = (b * k * (k + 1) ** 2) / 4.
D = (Rj ** 2).sum() - r * C
T1 = (t - 1) / (A - C) * D
denom = np.sqrt(((A - C) * 2 * r) / (b * k - b - t + 1) * (1 - T1 / (b * (k -1))))
df = b * k - b - t + 1
vs = np.zeros((t, t), dtype=np.float)
combs = it.combinations(range(t), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
for i, j in combs:
vs[i, j] = compare_stats(i, j)
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_quade(a, y_col=None, block_col=None, group_col=None, dist='t', melted=False, sort=False, p_adjust=None):
'''Calculate pairwise comparisons using Quade's post hoc test for
unreplicated blocked data. This test is usually conducted if significant
results were obtained by the omnibus test [1]_, [2]_, [3]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of
block design, i.e. rows are blocks, and columns are groups. In this
case you do not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (string).
y_col : str or int, optional
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
dist : str, optional
Method for determining p values.
The default distribution is "t", else "normal".
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
Pandas DataFrame containing p values.
References
----------
.. [1] W. J. Conover (1999), Practical nonparametric Statistics, 3rd. Edition,
Wiley.
.. [2] N. A. Heckert and J. J. Filliben (2003). NIST Handbook 148: Dataplot
Reference Manual, Volume 2: Let Subcommands and Library Functions.
National Institute of Standards and Technology Handbook Series, June 2003.
.. [3] D. Quade (1979), Using weighted rankings in the analysis of complete
blocks with additive block effects. Journal of the American Statistical
Association, 74, 680-683.
Examples
--------
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_quade(x)
'''
if melted and not all([block_col, group_col, y_col]):
raise ValueError('block_col, group_col, y_col should be explicitly specified if using melted data')
def compare_stats_t(i, j):
dif = np.abs(S[groups[i]] - S[groups[j]])
tval = dif / denom
pval = 2. * ss.t.sf(np.abs(tval), df = (b - 1) * (k - 1))
return pval
def compare_stats_norm(i, j):
dif = np.abs(W[groups[i]] * ff - W[groups[j]] * ff)
zval = dif / denom
pval = 2. * ss.norm.sf(np.abs(zval))
return pval
x, y_col, group_col, block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
if not sort:
x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
x[block_col] = Categorical(x[block_col], categories=x[block_col].unique(), ordered=True)
x.sort_values(by=[block_col, group_col], ascending=True, inplace=True)
x.dropna(inplace=True)
groups = x[group_col].unique()
k = len(groups)
b = x[block_col].unique().size
x['r'] = x.groupby(block_col)[y_col].rank()
q = (x.groupby(block_col)[y_col].max() - x.groupby(block_col)[y_col].min()).rank()
x['rr'] = x['r'] - (k + 1)/2
x['s'] = x.apply(lambda x, y: x['rr'] * y[x['blocks']], axis=1, args=(q,))
x['w'] = x.apply(lambda x, y: x['r'] * y[x['blocks']], axis=1, args=(q,))
A = (x['s'] ** 2).sum()
S = x.groupby(group_col)['s'].sum()
B = np.sum(S ** 2) / b
W = x.groupby(group_col)['w'].sum()
vs = np.zeros((k, k), dtype=np.float)
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
if dist == 't':
denom = np.sqrt((2 * b * (A - B)) / ((b - 1) * (k - 1)))
for i, j in combs:
vs[i, j] = compare_stats_t(i, j)
else:
n = b * k
denom = np.sqrt((k * (k + 1) * (2 * n + 1) * (k-1)) / (18 * n * (n + 1)))
ff = 1. / (b * (b + 1)/2)
for i, j in combs:
vs[i, j] = compare_stats_norm(i, j)
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_mackwolfe(a, val_col, group_col, p=None, n_perm=100, sort=False, p_adjust=None):
'''Mack-Wolfe Test for Umbrella Alternatives.
In dose-finding studies one may assume an increasing treatment effect with
increasing dose level. However, the test subject may actually succumb to
toxic effects at high doses, which leads to decresing treatment
effects [1]_, [2]_.
The scope of the Mack-Wolfe Test is to test for umbrella alternatives for
either a known or unknown point P (i.e. dose-level), where the peak
(umbrella point) is present.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
p : int, optional
The a-priori known peak as an ordinal number of the treatment group
including the zero dose level, i.e. p = {1, ..., k}. Defaults to None.
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
p : float
P value.
stat : float
Statistic.
References
----------
.. [1] Chen, I.Y. (1991) Notes on the Mack-Wolfe and Chen-Wolfe Tests for
Umbrella Alternatives. Biom. J., 33, 281-290.
.. [2] Mack, G.A., Wolfe, D. A. (1981) K-sample rank tests for umbrella
alternatives. J. Amer. Statist. Assoc., 76, 175-181.
Examples
--------
>>> x = np.array([[10,'a'], [59,'a'], [76,'b'], [10, 'b']])
>>> sp.posthoc_mackwolfe(x, val_col = 0, group_col = 1)
'''
x = __convert_to_df(a, val_col, group_col)
if not sort:
x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
x.sort_values(by=[group_col], ascending=True, inplace=True)
k = x[group_col].unique().size
if p:
if p > k:
print("Selected 'p' > number of groups:", str(p), " > ", str(k))
return False
elif p < 1:
print("Selected 'p' < 1: ", str(p))
return False
Rij = x[val_col].rank()
n = x.groupby(group_col)[val_col].count()
def _fn(Ri, Rj):
return np.sum(Ri.apply(lambda x: Rj[Rj > x].size))
def _ustat(Rij, g, k):
levels = np.unique(g)
U = np.identity(k)
for i in range(k):
for j in range(i):
U[i,j] = _fn(Rij[x[group_col] == levels[i]], Rij[x[group_col] == levels[j]])
U[j,i] = _fn(Rij[x[group_col] == levels[j]], Rij[x[group_col] == levels[i]])
return U
def _ap(p, U):
tmp1 = 0
if p > 0:
for i in range(p):
for j in range(i+1, p+1):
tmp1 += U[i,j]
tmp2 = 0
if p < k:
for i in range(p, k):
for j in range(i+1, k):
tmp2 += U[j,i]
return tmp1 + tmp2
def _n1(p, n):
return np.sum(n[:p+1])
def _n2(p, n):
return np.sum(n[p:k])
def _mean_at(p, n):
N1 = _n1(p, n)
N2 = _n2(p, n)
return (N1**2 + N2**2 - np.sum(n**2) - n.iloc[p]**2)/4
def _var_at(p, n):
N1 = _n1(p, n)
N2 = _n2(p, n)
N = np.sum(n)
var = (2 * (N1**3 + N2**3) + 3 * (N1**2 + N2**2) -\
np.sum(n**2 * (2*n + 3)) - n.iloc[p]**2 * (2 * n.iloc[p] + 3) +\
12. * n.iloc[p] * N1 * N2 - 12. * n.iloc[p] ** 2 * N) / 72.
return var
if p:
if (x.groupby(val_col).count() > 1).any().any():
print("Ties are present")
U = _ustat(Rij, x[group_col], k)
est = _ap(p, U)
mean = _mean_at(p, n)
sd = np.sqrt(_var_at(p, n))
stat = (est - mean)/sd
p_value = ss.norm.sf(stat)
else:
U = _ustat(Rij, x[group_col], k)
Ap = np.array([_ap(i, U) for i in range(k)]).ravel()
mean = np.array([_mean_at(i, n) for i in range(k)]).ravel()
var = np.array([_var_at(i, n) for i in range(k)]).ravel()
A = (Ap - mean) / np.sqrt(var)
stat = np.max(A)
p = A == stat
est = None
mt = []
for i in range(n_perm):
ix = Series(np.random.permutation(Rij))
Uix = _ustat(ix, x[group_col], k)
Apix = np.array([_ap(i, Uix) for i in range(k)])
Astarix = (Apix - mean) / np.sqrt(var)
mt.append(np.max(Astarix))
mt = np.array(mt)
p_value = mt[mt > stat] / n_perm
return p_value, stat
def posthoc_vanwaerden(a, val_col=None, group_col=None, sort=True, p_adjust=None):
'''Van der Waerden's test for pairwise multiple comparisons between group
levels. See references for additional information [1]_, [2]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
For one-factorial designs with samples that do not meet the assumptions for
one-way-ANOVA and subsequent post hoc tests, the van der Waerden test using
normal scores can be employed. Provided that significant differences were
detected by this global test, one may be interested in applying post hoc
tests according to van der Waerden for pairwise multiple comparisons of the
group levels.
There is no tie correction applied in this function.
References
----------
.. [1] W. J. Conover and R. L. Iman (1979), On multiple-comparisons procedures,
Tech. Rep. LA-7677-MS, Los Alamos Scientific Laboratory.
.. [2] B. L. van der Waerden (1952) Order tests for the two-sample problem and
their power, Indagationes Mathematicae, 14, 453-458.
Examples
--------
>>> x = np.array([[10,'a'], [59,'a'], [76,'b'], [10, 'b']])
>>> sp.posthoc_vanwaerden(x, val_col = 0, group_col = 1)
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = x[_group_col].unique()
n = x[_val_col].size
k = groups.size
r = ss.rankdata(x[_val_col])
x['z_scores'] = ss.norm.ppf(r / (n + 1))
aj = x.groupby(_group_col)['z_scores'].sum()
nj = x.groupby(_group_col)['z_scores'].count()
s2 = (1. / (n - 1.)) * (x['z_scores'] ** 2.).sum()
sts = (1. / s2) * np.sum(aj ** 2. / nj)
param = k - 1
A = aj / nj
vs = np.zeros((k, k), dtype=np.float)
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
def compare_stats(i, j):
dif = np.abs(A[groups[i]] - A[groups[j]])
B = 1. / nj[groups[i]] + 1. / nj[groups[j]]
tval = dif / np.sqrt(s2 * (n - 1. - sts)/(n - k) * B)
pval = 2. * ss.t.sf(np.abs(tval), df = n - k)
return pval
for i, j in combs:
vs[i, j] = compare_stats(i, j)
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_ttest(a, val_col=None, group_col=None, pool_sd=False, equal_var=True, p_adjust=None, sort=True):
'''Pairwise T test for multiple comparisons of independent groups. May be
used after a parametric ANOVA to do pairwise comparisons.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame. Array must be two-dimensional.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
equal_var : bool, optional
If True (default), perform a standard independent test
that assumes equal population variances [1]_.
If False, perform Welch's t-test, which does not assume equal
population variance [2]_.
pool_sd : bool, optional
Calculate a common SD for all groups and use that for all
comparisons (this can be useful if some groups are small).
This method does not actually call scipy ttest_ind() function,
so extra arguments are ignored. Default is False.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by group_col or not. Recommended
unless you sort your data manually.
Returns
-------
Numpy ndarray or pandas DataFrame of p values depending on input
data type.
References
----------
.. [1] http://en.wikipedia.org/wiki/T-test#Independent_two-sample_t-test
.. [2] http://en.wikipedia.org/wiki/Welch%27s_t_test
Examples
--------
>>> x = [[1,2,3,5,1], [12,31,54, np.nan], [10,12,6,74,11]]
>>> sp.posthoc_ttest(x, p_adjust = 'holm')
array([[-1. , 0.04600899, 0.31269089],
[ 0.04600899, -1. , 0.6327077 ],
[ 0.31269089, 0.6327077 , -1. ]])
'''
if isinstance(a, DataFrame):
x = a.copy()
if not sort:
x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
x.sort_values(by=[group_col, val_col], ascending=True, inplace=True)
x_lens = x.groupby(by=group_col)[val_col].count().values
x_lens_cumsum = np.insert(np.cumsum(x_lens), 0, 0)[:-1]
x_grouped = np.array([x[val_col][j:(j + x_lens[i])] for i, j in enumerate(x_lens_cumsum)])
#x_grouped = [x.loc[v, val_col].values.tolist() for g, v in x.groupby(group_col, sort=False).groups.items()]
else:
x = np.array(a)
x_grouped = np.array([np.asarray(a)[~np.isnan(a)] for a in x])
x_lens = np.asarray([len(a) for a in x_grouped])
x_lens_cumsum = np.insert(np.cumsum(x_lens), 0, 0)[:-1]
if any(x_lens == 0):
raise ValueError("All groups must contain data")
x_len = len(x_grouped)
vs = np.zeros((x_len, x_len), dtype=np.float)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
def compare_pooled(i, j):
diff = x_means[i] - x_means[j]
se_diff = pooled_sd * np.sqrt(1 / x_lens[i] + 1 / x_lens[j])
t_value = diff / se_diff
return 2 * ss.t.cdf(-np.abs(t_value), x_totaldegf)
combs = it.combinations(range(x_len), 2)
if pool_sd:
x_means = np.asarray([np.mean(xi) for xi in x_grouped])
x_sd = np.asarray([np.std(xi, ddof=1) for xi in x_grouped])
x_degf = x_lens - 1
x_totaldegf = np.sum(x_degf)
pooled_sd = np.sqrt(np.sum(x_sd ** 2 * x_degf) / x_totaldegf)
for i, j in combs:
vs[i, j] = compare_pooled(i, j)
else:
for i,j in combs:
vs[i, j] = ss.ttest_ind(x_grouped[i], x_grouped[j], equal_var=equal_var)[1]
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
if isinstance(x, DataFrame):
groups_unique = x[group_col].unique()
return DataFrame(vs, index=groups_unique, columns=groups_unique)
else:
return vs
def posthoc_tukey_hsd(x, g, alpha=0.05):
'''Pairwise comparisons with TukeyHSD confidence intervals. This is a
convenience function to make statsmodels `pairwise_tukeyhsd` method more
applicable for further use.
Parameters
----------
x : array_like or pandas Series object, 1d
An array, any object exposing the array interface, containing dependent
variable values (test or response variable). Values should have a
non-nominal scale. NaN values will cause an error (please handle
manually).
g : array_like or pandas Series object, 1d
An array, any object exposing the array interface, containing
independent variable values (grouping or predictor variable). Values
should have a nominal scale (categorical).
alpha : float, optional
Significance level for the test. Default is 0.05.
Returns
-------
result : pandas DataFrame
DataFrame with 0, 1, and -1 values, where 0 is False (not significant),
1 is True (significant), and -1 is for diagonal elements.
Examples
--------
>>> x = [[1,2,3,4,5], [35,31,75,40,21], [10,6,9,6,1]]
>>> g = [['a'] * 5, ['b'] * 5, ['c'] * 5]
>>> sp.posthoc_tukey_hsd(np.concatenate(x), np.concatenate(g))
'''
result = pairwise_tukeyhsd(x, g, alpha=0.05)
groups = np.array(result.groupsunique, dtype=np.str)
groups_len = len(groups)
vs = np.zeros((groups_len, groups_len), dtype=np.int)
for a in result.summary()[1:]:
a0 = str(a[0])
a1 = str(a[1])
a0i = np.where(groups == a0)[0][0]
a1i = np.where(groups == a1)[0][0]
vs[a0i, a1i] = 1 if str(a[5]) == 'True' else 0
vs = np.triu(vs)
np.fill_diagonal(vs, -1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[tri_lower] = vs.T[tri_lower]
return DataFrame(vs, index=groups, columns=groups)
def posthoc_mannwhitney(a, val_col=None, group_col=None, use_continuity=True, alternative='two-sided', p_adjust=None, sort=True):
'''Pairwise comparisons with Mann-Whitney rank test.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame. Array must be two-dimensional.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
use_continuity : bool, optional
Whether a continuity correction (1/2.) should be taken into account.
Default is True.
alternative : ['two-sided', 'less', or 'greater'], optional
Whether to get the p-value for the one-sided hypothesis
('less' or 'greater') or for the two-sided hypothesis ('two-sided').
Defaults to 'two-sided'.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by group_col or not. Recommended
unless you sort your data manually.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
Refer to `scipy.stats.mannwhitneyu` reference page for further details.
Examples
--------
>>> x = [[1,2,3,4,5], [35,31,75,40,21], [10,6,9,6,1]]
>>> sp.posthoc_mannwhitney(x, p_adjust = 'holm')
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col, _val_col], ascending=True, inplace=True)
groups = np.unique(x[_group_col])
x_len = groups.size
vs = np.zeros((x_len, x_len))
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(x_len), 2)
for i,j in combs:
vs[i, j] = ss.mannwhitneyu(x.loc[x[_group_col] == groups[i], _val_col],
x.loc[x[_group_col] == groups[j], _val_col],
use_continuity=use_continuity,
alternative=alternative)[1]
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_wilcoxon(a, val_col=None, group_col=None, zero_method='wilcox', correction=False, p_adjust=None, sort=False):
'''Pairwise comparisons with Wilcoxon signed-rank test. It is a non-parametric
version of the paired T-test for use with non-parametric ANOVA.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame. Array must be two-dimensional.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
zero_method : string, {"pratt", "wilcox", "zsplit"}, optional
"pratt": Pratt treatment, includes zero-differences in the ranking
process (more conservative)
"wilcox": Wilcox treatment, discards all zero-differences
"zsplit": Zero rank split, just like Pratt, but spliting the zero rank
between positive and negative ones
correction : bool, optional
If True, apply continuity correction by adjusting the Wilcoxon rank
statistic by 0.5 towards the mean value when computing the z-statistic.
Default is False.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by group_col and val_col or not.
Default is False.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
Refer to `scipy.stats.wilcoxon` reference page for further details.
Examples
--------
>>> x = [[1,2,3,4,5], [35,31,75,40,21], [10,6,9,6,1]]
>>> sp.posthoc_wilcoxon(x)
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
#x.sort_values(by=[_group_col, _val_col], ascending=True, inplace=True)
groups = np.unique(x[_group_col])
x_len = groups.size
vs = np.zeros((x_len, x_len))
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(x_len), 2)
for i,j in combs:
vs[i, j] = ss.wilcoxon(x.loc[x[_group_col] == groups[i], _val_col],
x.loc[x[_group_col] == groups[j], _val_col],
zero_method=zero_method, correction=correction)[1]
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method=p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_scheffe(a, val_col=None, group_col=None, sort=False, p_adjust=None):
'''Scheffe's all-pairs comparisons test for normally distributed data with equal
group variances. For all-pairs comparisons in an one-factorial layout with
normally distributed residuals and equal variances Scheffe's test can be
performed with parametric ANOVA [1]_, [2]_, [3]_.
A total of m = k(k-1)/2 hypotheses can be tested.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
The p values are computed from the F-distribution.
References
----------
.. [1] J. Bortz (1993) Statistik für Sozialwissenschaftler. 4. Aufl., Berlin:
Springer.
.. [2] L. Sachs (1997) Angewandte Statistik, New York: Springer.
.. [3] H. Scheffe (1953) A Method for Judging all Contrasts in the Analysis
of Variance. Biometrika 40, 87-110.
Examples
--------
>>> import scikit_posthocs as sp
>>> import pandas as pd
>>> x = pd.DataFrame({"a": [1,2,3,5,1], "b": [12,31,54,62,12], "c": [10,12,6,74,11]})
>>> x = x.melt(var_name='groups', value_name='values')
>>> sp.posthoc_scheffe(x, val_col='values', group_col='groups')
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = np.unique(x[_group_col])
x_grouped = x.groupby(_group_col)[_val_col]
ni = x_grouped.count()
n = ni.sum()
xi = x_grouped.mean()
si = x_grouped.var()
sin = 1. / (n - groups.size) * np.sum(si * (ni - 1.))
def compare(i, j):
dif = xi.loc[i] - xi.loc[j]
A = sin * (1. / ni[i] + 1. / ni[j]) * (groups.size - 1.)
f_val = dif ** 2. / A
return f_val
vs = np.zeros((groups.size, groups.size), dtype=np.float)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(groups.size), 2)
for i,j in combs:
vs[i, j] = compare(groups[i], groups[j])
vs[tri_lower] = vs.T[tri_lower]
p_values = ss.f.sf(vs, groups.size - 1., n - groups.size)
np.fill_diagonal(p_values, -1)
return DataFrame(p_values, index=groups, columns=groups)
def posthoc_tamhane(a, val_col=None, group_col=None, welch=True, sort=False):
'''Tamhane's T2 all-pairs comparison test for normally distributed data with
unequal variances. Tamhane's T2 test can be performed for all-pairs
comparisons in an one-factorial layout with normally distributed residuals
but unequal groups variances. A total of m = k(k-1)/2 hypotheses can be
tested. The null hypothesis is tested in the two-tailed test against the
alternative hypothesis [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
welch : bool, optional
If True, use Welch's approximate solution for calculating the degree of
freedom. T2 test uses the usual df = N - 2 approximation.
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
The p values are computed from the t-distribution and adjusted according to
Dunn-Sidak.
References
----------
.. [1] A.C. Tamhane (1979), A Comparison of Procedures for Multiple Comparisons of
Means with Unequal Variances. Journal of the American Statistical Association,
74, 471-480.
Examples
--------
>>> import scikit_posthocs as sp
>>> import pandas as pd
>>> x = pd.DataFrame({"a": [1,2,3,5,1], "b": [12,31,54,62,12], "c": [10,12,6,74,11]})
>>> x = x.melt(var_name='groups', value_name='values')
>>> sp.posthoc_tamhane(x, val_col='values', group_col='groups')
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = x[_group_col].unique()
x_grouped = x.groupby(_group_col)[_val_col]
ni = x_grouped.count()
n = ni.sum()
xi = x_grouped.mean()
si = x_grouped.var()
sin = 1. / (n - groups.size) * np.sum(si * (ni - 1))
def compare(i, j):
dif = xi[i] - xi[j]
A = si[i] / ni[i] + si[j] / ni[j]
t_val = dif / np.sqrt(A)
if welch:
df = A ** 2. / (si[i] ** 2. / (ni[i] ** 2. * (ni[i] - 1.)) + si[j] ** 2. / (ni[j] ** 2. * (ni[j] - 1.)))
else:
## checks according to Tamhane (1979, p. 474)
ok1 = (9./10. <= ni[i]/ni[j]) and (ni[i]/ni[j] <= 10./9.)
ok2 = (9./10. <= (s2i[i] / ni[i]) / (s2i[j] / ni[j])) and ((s2i[i] / ni[i]) / (s2i[j] / ni[j]) <= 10./9.)
ok3 = (4./5. <= ni[i]/ni[j]) and (ni[i]/ni[j] <= 5./4.) and (1./2. <= (s2i[i] / ni[i]) / (s2i[j] / ni[j])) and ((s2i[i] / ni[i]) / (s2i[j] / ni[j]) <= 2.)
ok4 = (2./3. <= ni[i]/ni[j]) and (ni[i]/ni[j] <= 3./2.) and (3./4. <= (s2i[i] / ni[i]) / (s2i[j] / ni[j])) and ((s2i[i] / ni[i]) / (s2i[j] / ni[j]) <= 4./3.)
OK = any(ok1, ok2, ok3, ok4)
if not OK:
print("Sample sizes or standard errors are not balanced. T2 test is recommended.")
df = ni[i] + ni[j] - 2.
p_val = 2. * ss.t.sf(np.abs(t_val), df=df)
return p_val
vs = np.zeros((groups.size, groups.size), dtype=np.float)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(groups.size), 2)
for i,j in combs:
vs[i, j] = compare(groups[i], groups[j])
vs[tri_upper] = 1. - (1. - vs[tri_upper]) ** groups.size
vs[tri_lower] = vs.T[tri_lower]
vs[vs > 1] = 1
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_tukey(a, val_col = None, group_col = None, sort = False):
'''Performs Tukey's all-pairs comparisons test for normally distributed data
with equal group variances. For all-pairs comparisons in an
one-factorial layout with normally distributed residuals and equal variances
Tukey's test can be performed. A total of m = k(k-1)/2 hypotheses can be
tested. The null hypothesis is tested in the two-tailed test against
the alternative hypothesis [1]_, [2]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
The p values are computed from the Tukey-distribution.
References
----------
.. [1] L. Sachs (1997) Angewandte Statistik, New York: Springer.
.. [2] J. Tukey (1949) Comparing Individual Means in the Analysis of Variance,
Biometrics 5, 99-114.
Examples
--------
>>> import scikit_posthocs as sp
>>> import pandas as pd
>>> x = pd.DataFrame({"a": [1,2,3,5,1], "b": [12,31,54,62,12], "c": [10,12,6,74,11]})
>>> x = x.melt(var_name='groups', value_name='values')
>>> sp.posthoc_tukey(x, val_col='values', group_col='groups')
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = np.unique(x[_group_col])
x_grouped = x.groupby(_group_col)[_val_col]
ni = x_grouped.count()
n = ni.sum()
xi = x_grouped.mean()
si = x_grouped.var()
sin = 1. / (n - groups.size) * np.sum(si * (ni - 1))
def compare(i, j):
dif = xi[i] - xi[j]
A = sin * 0.5 * (1. / ni.loc[i] + 1. / ni.loc[j])
q_val = dif / np.sqrt(A)
return q_val
vs = np.zeros((groups.size, groups.size), dtype=np.float)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(groups.size), 2)
for i,j in combs:
vs[i, j] = compare(groups[i], groups[j])
vs[tri_upper] = psturng(np.abs(vs[tri_upper]), groups.size, n - groups.size)
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_dscf(a, val_col=None, group_col=None, sort=False):
'''Dwass, Steel, Critchlow and Fligner all-pairs comparison test for a
one-factorial layout with non-normally distributed residuals. As opposed to
the all-pairs comparison procedures that depend on Kruskal ranks, the DSCF
test is basically an extension of the U-test as re-ranking is conducted for
each pairwise test [1]_, [2]_, [3]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
Pandas DataFrame containing p values.
Notes
-----
The p values are computed from the Tukey-distribution.
References
----------
.. [1] Douglas, C. E., Fligner, A. M. (1991) On distribution-free multiple
comparisons in the one-way analysis of variance, Communications in
Statistics - Theory and Methods, 20, 127-139.
.. [2] Dwass, M. (1960) Some k-sample rank-order tests. In Contributions to
Probability and Statistics, Edited by: I. Olkin, Stanford: Stanford
University Press.
.. [3] Steel, R. G. D. (1960) A rank sum test for comparing all pairs of
treatments, Technometrics, 2, 197-207.
Examples
--------
>>> import scikit_posthocs as sp
>>> import pandas as pd
>>> x = pd.DataFrame({"a": [1,2,3,5,1], "b": [12,31,54,62,12], "c": [10,12,6,74,11]})
>>> x = x.melt(var_name='groups', value_name='values')
>>> sp.posthoc_dscf(x, val_col='values', group_col='groups')
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = np.unique(x[_group_col])
x_grouped = x.groupby(_group_col)[_val_col]
n = x_grouped.count()
k = groups.size
def get_ties(x):
t = x.value_counts().values
c = np.sum((t ** 3 - t) / 12.)
return c
def compare(i, j):
ni = n.loc[i]
nj = n.loc[j]
x_raw = x.loc[(x[_group_col] == i) | (x[_group_col] == j)].copy()
x_raw['ranks'] = x_raw.loc[:, _val_col].rank()
r = x_raw.groupby(_group_col)['ranks'].sum().loc[[i, j]]
u = np.array([nj * ni + (nj * (nj + 1) / 2),
nj * ni + (ni * (ni + 1) / 2)]) - r
u_min = np.min(u)
s = ni + nj
var = (nj*ni/(s*(s - 1.))) * ((s**3 - s)/12. - get_ties(x_raw['ranks']))
p = np.sqrt(2.) * (u_min - nj * ni / 2.) / np.sqrt(var)
return p
vs = np.zeros((k, k))
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(k), 2)
for i, j in combs:
vs[i, j] = compare(groups[i], groups[j])
vs[tri_upper] = psturng(np.abs(vs[tri_upper]), k, np.inf)
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
|
maximtrp/scikit-posthocs | scikit_posthocs/_posthocs.py | posthoc_quade | python | def posthoc_quade(a, y_col=None, block_col=None, group_col=None, dist='t', melted=False, sort=False, p_adjust=None):
'''Calculate pairwise comparisons using Quade's post hoc test for
unreplicated blocked data. This test is usually conducted if significant
results were obtained by the omnibus test [1]_, [2]_, [3]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of
block design, i.e. rows are blocks, and columns are groups. In this
case you do not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (string).
y_col : str or int, optional
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
dist : str, optional
Method for determining p values.
The default distribution is "t", else "normal".
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
Pandas DataFrame containing p values.
References
----------
.. [1] W. J. Conover (1999), Practical nonparametric Statistics, 3rd. Edition,
Wiley.
.. [2] N. A. Heckert and J. J. Filliben (2003). NIST Handbook 148: Dataplot
Reference Manual, Volume 2: Let Subcommands and Library Functions.
National Institute of Standards and Technology Handbook Series, June 2003.
.. [3] D. Quade (1979), Using weighted rankings in the analysis of complete
blocks with additive block effects. Journal of the American Statistical
Association, 74, 680-683.
Examples
--------
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_quade(x)
'''
if melted and not all([block_col, group_col, y_col]):
raise ValueError('block_col, group_col, y_col should be explicitly specified if using melted data')
def compare_stats_t(i, j):
dif = np.abs(S[groups[i]] - S[groups[j]])
tval = dif / denom
pval = 2. * ss.t.sf(np.abs(tval), df = (b - 1) * (k - 1))
return pval
def compare_stats_norm(i, j):
dif = np.abs(W[groups[i]] * ff - W[groups[j]] * ff)
zval = dif / denom
pval = 2. * ss.norm.sf(np.abs(zval))
return pval
x, y_col, group_col, block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
if not sort:
x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
x[block_col] = Categorical(x[block_col], categories=x[block_col].unique(), ordered=True)
x.sort_values(by=[block_col, group_col], ascending=True, inplace=True)
x.dropna(inplace=True)
groups = x[group_col].unique()
k = len(groups)
b = x[block_col].unique().size
x['r'] = x.groupby(block_col)[y_col].rank()
q = (x.groupby(block_col)[y_col].max() - x.groupby(block_col)[y_col].min()).rank()
x['rr'] = x['r'] - (k + 1)/2
x['s'] = x.apply(lambda x, y: x['rr'] * y[x['blocks']], axis=1, args=(q,))
x['w'] = x.apply(lambda x, y: x['r'] * y[x['blocks']], axis=1, args=(q,))
A = (x['s'] ** 2).sum()
S = x.groupby(group_col)['s'].sum()
B = np.sum(S ** 2) / b
W = x.groupby(group_col)['w'].sum()
vs = np.zeros((k, k), dtype=np.float)
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
if dist == 't':
denom = np.sqrt((2 * b * (A - B)) / ((b - 1) * (k - 1)))
for i, j in combs:
vs[i, j] = compare_stats_t(i, j)
else:
n = b * k
denom = np.sqrt((k * (k + 1) * (2 * n + 1) * (k-1)) / (18 * n * (n + 1)))
ff = 1. / (b * (b + 1)/2)
for i, j in combs:
vs[i, j] = compare_stats_norm(i, j)
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups) | Calculate pairwise comparisons using Quade's post hoc test for
unreplicated blocked data. This test is usually conducted if significant
results were obtained by the omnibus test [1]_, [2]_, [3]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of
block design, i.e. rows are blocks, and columns are groups. In this
case you do not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (string).
y_col : str or int, optional
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
dist : str, optional
Method for determining p values.
The default distribution is "t", else "normal".
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
Pandas DataFrame containing p values.
References
----------
.. [1] W. J. Conover (1999), Practical nonparametric Statistics, 3rd. Edition,
Wiley.
.. [2] N. A. Heckert and J. J. Filliben (2003). NIST Handbook 148: Dataplot
Reference Manual, Volume 2: Let Subcommands and Library Functions.
National Institute of Standards and Technology Handbook Series, June 2003.
.. [3] D. Quade (1979), Using weighted rankings in the analysis of complete
blocks with additive block effects. Journal of the American Statistical
Association, 74, 680-683.
Examples
--------
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_quade(x) | train | https://github.com/maximtrp/scikit-posthocs/blob/5476b09e2a325cd4e31c0b0bc6906ab5cd77fc5d/scikit_posthocs/_posthocs.py#L1290-L1437 | [
"def __convert_to_block_df(a, y_col=None, group_col=None, block_col=None, melted=False):\n\n if isinstance(a, DataFrame) and not melted:\n x = a.copy(deep=True)\n group_col = 'groups'\n block_col = 'blocks'\n y_col = 'y'\n x.columns.name = group_col\n x.index.name = bloc... | # -*- coding: utf-8 -*-
import numpy as np
import scipy.stats as ss
import itertools as it
from statsmodels.sandbox.stats.multicomp import multipletests
from statsmodels.stats.multicomp import pairwise_tukeyhsd
from statsmodels.stats.libqsturng import psturng
from pandas import DataFrame, Categorical, Series
def __convert_to_df(a, val_col=None, group_col=None, val_id=None, group_id=None):
'''Hidden helper method to create a DataFrame with input data for further
processing.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas DataFrame.
Array must be two-dimensional. Second dimension may vary,
i.e. groups may have different lengths.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
val_id : int, optional
Index of a column that contains dependent variable values (test or
response variable). Should be specified if a NumPy ndarray is used as an
input. It will be inferred from data, if not specified.
group_id : int, optional
Index of a column that contains independent variable values (grouping or
predictor variable). Should be specified if a NumPy ndarray is used as
an input. It will be inferred from data, if not specified.
Returns
-------
x : pandas DataFrame
DataFrame with input data, `val_col` column contains numerical values and
`group_col` column contains categorical values.
val_col : str
Name of a DataFrame column that contains dependent variable values (test
or response variable).
group_col : str
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable).
Notes
-----
Inferrence algorithm for determining `val_id` and `group_id` args is rather
simple, so it is better to specify them explicitly to prevent errors.
'''
if not group_col:
group_col = 'groups'
if not val_col:
val_col = 'vals'
if isinstance(a, DataFrame):
x = a.copy()
if not {group_col, val_col}.issubset(a.columns):
raise ValueError('Specify correct column names using `group_col` and `val_col` args')
return x, val_col, group_col
elif isinstance(a, list) or (isinstance(a, np.ndarray) and not a.shape.count(2)):
grps_len = map(len, a)
grps = list(it.chain(*[[i+1] * l for i, l in enumerate(grps_len)]))
vals = list(it.chain(*a))
return DataFrame({val_col: vals, group_col: grps}), val_col, group_col
elif isinstance(a, np.ndarray):
# cols ids not defined
# trying to infer
if not(all([val_id, group_id])):
if np.argmax(a.shape):
a = a.T
ax = [np.unique(a[:, 0]).size, np.unique(a[:, 1]).size]
if np.asscalar(np.diff(ax)):
__val_col = np.argmax(ax)
__group_col = np.argmin(ax)
else:
raise ValueError('Cannot infer input format.\nPlease specify `val_id` and `group_id` args')
cols = {__val_col: val_col,
__group_col: group_col}
else:
cols = {val_id: val_col,
group_id: group_col}
cols_vals = dict(sorted(cols.items())).values()
return DataFrame(a, columns=cols_vals), val_col, group_col
def __convert_to_block_df(a, y_col=None, group_col=None, block_col=None, melted=False):
if isinstance(a, DataFrame) and not melted:
x = a.copy(deep=True)
group_col = 'groups'
block_col = 'blocks'
y_col = 'y'
x.columns.name = group_col
x.index.name = block_col
x = x.reset_index().melt(id_vars=block_col, var_name=group_col, value_name=y_col)
elif melted:
x = DataFrame.from_dict({'groups': a[group_col],
'blocks': a[block_col],
'y': a[y_col]})
elif not isinstance(a, DataFrame):
x = np.array(a)
x = DataFrame(x, index=np.arange(x.shape[0]), columns=np.arange(x.shape[1]))
if not melted:
group_col = 'groups'
block_col = 'blocks'
y_col = 'y'
x.columns.name = group_col
x.index.name = block_col
x = x.reset_index().melt(id_vars=block_col, var_name=group_col, value_name=y_col)
else:
x.columns[group_col] = 'groups'
x.columns[block_col] = 'blocks'
x.columns[y_col] = 'y'
group_col = 'groups'
block_col = 'blocks'
y_col = 'y'
return x, 'y', 'groups', 'blocks'
def posthoc_conover(a, val_col=None, group_col=None, p_adjust=None, sort=True):
'''Post hoc pairwise test for multiple comparisons of mean rank sums
(Conover's test). May be used after Kruskal-Wallis one-way analysis of
variance by ranks to do pairwise comparisons [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas DataFrame.
Array must be two-dimensional. Second dimension may vary,
i.e. groups may have different lengths.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
p_adjust : str, optional
Method for adjusting p values. See `statsmodels.sandbox.stats.multicomp`
for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by `group_col` or not. Recommended
unless you sort your data manually.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
A tie correction are employed according to Conover [1]_.
References
----------
.. [1] W. J. Conover and R. L. Iman (1979), On multiple-comparisons procedures,
Tech. Rep. LA-7677-MS, Los Alamos Scientific Laboratory.
Examples
--------
>>> x = [[1,2,3,5,1], [12,31,54, np.nan], [10,12,6,74,11]]
>>> sp.posthoc_conover(x, p_adjust = 'holm')
'''
def compare_conover(i, j):
diff = np.abs(x_ranks_avg.loc[i] - x_ranks_avg.loc[j])
B = (1. / x_lens.loc[i] + 1. / x_lens.loc[j])
D = (n - 1. - H_cor) / (n - x_len)
t_value = diff / np.sqrt(S2 * B * D)
p_value = 2. * ss.t.sf(np.abs(t_value), df = n - x_len)
return p_value
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col, _val_col], ascending=True, inplace=True)
n = len(x.index)
x_groups_unique = np.unique(x[_group_col])
x_len = x_groups_unique.size
x_lens = x.groupby(_group_col)[_val_col].count()
x['ranks'] = x[_val_col].rank()
x_ranks_avg = x.groupby(_group_col)['ranks'].mean()
x_ranks_sum = x.groupby(_group_col)['ranks'].sum()
# ties
vals = x.groupby('ranks').count()[_val_col].values
tie_sum = np.sum(vals[vals != 1] ** 3 - vals[vals != 1])
tie_sum = 0 if not tie_sum else tie_sum
x_ties = np.min([1., 1. - tie_sum / (n ** 3. - n)])
H = (12. / (n * (n + 1.))) * np.sum(x_ranks_sum**2 / x_lens) - 3. * (n + 1.)
H_cor = H / x_ties
if x_ties == 1:
S2 = n * (n + 1.) / 12.
else:
S2 = (1. / (n - 1.)) * (np.sum(x['ranks'] ** 2.) - (n * (((n + 1.)**2.) / 4.)))
vs = np.zeros((x_len, x_len))
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(x_len), 2)
for i, j in combs:
vs[i, j] = compare_conover(x_groups_unique[i], x_groups_unique[j])
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=x_groups_unique, columns=x_groups_unique)
def posthoc_dunn(a, val_col=None, group_col=None, p_adjust=None, sort=True):
'''Post hoc pairwise test for multiple comparisons of mean rank sums
(Dunn's test). May be used after Kruskal-Wallis one-way analysis of
variance by ranks to do pairwise comparisons [1]_, [2]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas DataFrame.
Array must be two-dimensional. Second dimension may vary,
i.e. groups may have different lengths.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
p_adjust : str, optional
Method for adjusting p values. See `statsmodels.sandbox.stats.multicomp`
for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by group_col or not. Recommended
unless you sort your data manually.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
A tie correction will be employed according to Glantz (2012).
References
----------
.. [1] O.J. Dunn (1964). Multiple comparisons using rank sums.
Technometrics, 6, 241-252.
.. [2] S.A. Glantz (2012), Primer of Biostatistics. New York: McGraw Hill.
Examples
--------
>>> x = [[1,2,3,5,1], [12,31,54, np.nan], [10,12,6,74,11]]
>>> sp.posthoc_dunn(x, p_adjust = 'holm')
'''
def compare_dunn(i, j):
diff = np.abs(x_ranks_avg.loc[i] - x_ranks_avg.loc[j])
A = n * (n + 1.) / 12.
B = (1. / x_lens.loc[i] + 1. / x_lens.loc[j])
z_value = diff / np.sqrt((A - x_ties) * B)
p_value = 2. * ss.norm.sf(np.abs(z_value))
return p_value
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col, _val_col], ascending=True, inplace=True)
n = len(x.index)
x_groups_unique = np.unique(x[_group_col])
x_len = x_groups_unique.size
x_lens = x.groupby(_group_col)[_val_col].count()
x['ranks'] = x[_val_col].rank()
x_ranks_avg = x.groupby(_group_col)['ranks'].mean()
# ties
vals = x.groupby('ranks').count()[_val_col].values
tie_sum = np.sum(vals[vals != 1] ** 3 - vals[vals != 1])
tie_sum = 0 if not tie_sum else tie_sum
x_ties = tie_sum / (12. * (n - 1))
vs = np.zeros((x_len, x_len))
combs = it.combinations(range(x_len), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
for i,j in combs:
vs[i, j] = compare_dunn(x_groups_unique[i], x_groups_unique[j])
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=x_groups_unique, columns=x_groups_unique)
def posthoc_nemenyi(a, val_col=None, group_col=None, dist='chi', sort=True):
'''Post hoc pairwise test for multiple comparisons of mean rank sums
(Nemenyi's test). May be used after Kruskal-Wallis one-way analysis of
variance by ranks to do pairwise comparisons [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame. Array must be two-dimensional. Second dimension may vary,
i.e. groups may have different lengths.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
dist : str, optional
Method for determining the p value. The default distribution is "chi"
(chi-squared), else "tukey" (studentized range).
sort : bool, optional
Specifies whether to sort DataFrame by group_col or not. Recommended
unless you sort your data manually.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
A tie correction will be employed according to Glantz (2012).
References
----------
.. [1] Lothar Sachs (1997), Angewandte Statistik. Berlin: Springer.
Pages: 395-397, 662-664.
Examples
--------
>>> x = [[1,2,3,5,1], [12,31,54, np.nan], [10,12,6,74,11]]
>>> sp.posthoc_nemenyi(x)
'''
def compare_stats_chi(i, j):
diff = np.abs(x_ranks_avg.loc[i] - x_ranks_avg.loc[j])
A = n * (n + 1.) / 12.
B = (1. / x_lens.loc[i] + 1. / x_lens.loc[j])
chi = diff ** 2. / (A * B)
return chi
def compare_stats_tukey(i, j):
diff = np.abs(x_ranks_avg.loc[i] - x_ranks_avg.loc[j])
B = (1. / x_lens.loc[i] + 1. / x_lens.loc[j])
q = diff / np.sqrt((n * (n + 1.) / 12.) * B)
return q
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col, _val_col], ascending=True, inplace=True)
n = len(x.index)
x_groups_unique = np.unique(x[_group_col])
x_len = x_groups_unique.size
x_lens = x.groupby(_group_col)[_val_col].count()
x['ranks'] = x[_val_col].rank()
x_ranks_avg = x.groupby(_group_col)['ranks'].mean()
# ties
vals = x.groupby('ranks').count()[_val_col].values
tie_sum = np.sum(vals[vals != 1] ** 3 - vals[vals != 1])
tie_sum = 0 if not tie_sum else tie_sum
x_ties = np.min([1., 1. - tie_sum / (n ** 3. - n)])
vs = np.zeros((x_len, x_len))
combs = it.combinations(range(x_len), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
if dist == 'chi':
for i,j in combs:
vs[i, j] = compare_stats_chi(x_groups_unique[i], x_groups_unique[j]) / x_ties
vs[tri_upper] = ss.chi2.sf(vs[tri_upper], x_len - 1)
elif dist == 'tukey':
for i,j in combs:
vs[i, j] = compare_stats_tukey(x_groups_unique[i], x_groups_unique[j]) * np.sqrt(2.)
vs[tri_upper] = psturng(vs[tri_upper], x_len, np.inf)
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=x_groups_unique, columns=x_groups_unique)
def posthoc_nemenyi_friedman(a, y_col=None, block_col=None, group_col=None, melted=False, sort=False):
'''Calculate pairwise comparisons using Nemenyi post hoc test for
unreplicated blocked data. This test is usually conducted post hoc if
significant results of the Friedman's test are obtained. The statistics
refer to upper quantiles of the studentized range distribution (Tukey) [1]_,
[2]_, [3]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of
block design, i.e. rows are blocks, and columns are groups. In this
case you do not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (strings).
y_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
A one-way ANOVA with repeated measures that is also referred to as ANOVA
with unreplicated block design can also be conducted via Friedman's
test. The consequent post hoc pairwise multiple comparison test
according to Nemenyi is conducted with this function.
This function does not test for ties.
References
----------
.. [1] J. Demsar (2006), Statistical comparisons of classifiers over
multiple data sets, Journal of Machine Learning Research, 7, 1-30.
.. [2] P. Nemenyi (1963) Distribution-free Multiple Comparisons. Ph.D.
thesis, Princeton University.
.. [3] L. Sachs (1997), Angewandte Statistik. Berlin: Springer.
Pages: 668-675.
Examples
--------
>>> # Non-melted case, x is a block design matrix, i.e. rows are blocks
>>> # and columns are groups.
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_nemenyi_friedman(x)
'''
if melted and not all([block_col, group_col, y_col]):
raise ValueError('block_col, group_col, y_col should be explicitly specified if using melted data')
def compare_stats(i, j):
dif = np.abs(R[groups[i]] - R[groups[j]])
qval = dif / np.sqrt(k * (k + 1.) / (6. * n))
return qval
x, _y_col, _group_col, _block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
#if not sort:
# x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
# x[block_col] = Categorical(x[block_col], categories=x[block_col].unique(), ordered=True)
x.sort_values(by=[_group_col, _block_col], ascending=True, inplace=True)
x.dropna(inplace=True)
groups = x[_group_col].unique()
k = groups.size
n = x[_block_col].unique().size
x['mat'] = x.groupby(_block_col)[_y_col].rank()
R = x.groupby(_group_col)['mat'].mean()
vs = np.zeros((k, k))
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
for i, j in combs:
vs[i, j] = compare_stats(i, j)
vs *= np.sqrt(2.)
vs[tri_upper] = psturng(vs[tri_upper], k, np.inf)
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_conover_friedman(a, y_col=None, block_col=None, group_col=None, melted=False, sort=False, p_adjust=None):
'''Calculate pairwise comparisons using Conover post hoc test for unreplicated
blocked data. This test is usually conducted post hoc after
significant results of the Friedman test. The statistics refer to
the Student t distribution [1]_, [2]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of
block design, i.e. rows are blocks, and columns are groups. In this
case you do not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (strings).
y_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values. See statsmodels.sandbox.stats.multicomp
for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
'single-step' : uses Tukey distribution for multiple comparisons
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
A one-way ANOVA with repeated measures that is also referred to as ANOVA
with unreplicated block design can also be conducted via the
friedman.test. The consequent post hoc pairwise multiple comparison test
according to Conover is conducted with this function.
If y is a matrix, than the columns refer to the treatment and the rows
indicate the block.
References
----------
.. [1] W. J. Conover and R. L. Iman (1979), On multiple-comparisons procedures,
Tech. Rep. LA-7677-MS, Los Alamos Scientific Laboratory.
.. [2] W. J. Conover (1999), Practical nonparametric Statistics, 3rd. Edition,
Wiley.
Examples
--------
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_conover_friedman(x)
'''
if melted and not all([block_col, group_col, y_col]):
raise ValueError('block_col, group_col, y_col should be explicitly specified if using melted data')
def compare_stats(i, j):
dif = np.abs(R.loc[groups[i]] - R.loc[groups[j]])
tval = dif / np.sqrt(A) / np.sqrt(B)
pval = 2. * ss.t.sf(np.abs(tval), df = (m*n*k - k - n + 1))
return pval
def compare_tukey(i, j):
dif = np.abs(R.loc[groups[i]] - R.loc[groups[j]])
qval = np.sqrt(2.) * dif / (np.sqrt(A) * np.sqrt(B))
pval = psturng(qval, k, np.inf)
return pval
x, _y_col, _group_col, _block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
#if not sort:
# x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
# x[block_col] = Categorical(x[block_col], categories=x[block_col].unique(), ordered=True)
x.sort_values(by=[_group_col,_block_col], ascending=True, inplace=True)
x.dropna(inplace=True)
groups = np.unique(x[_group_col])
k = groups.size
n = x[_block_col].unique().size
x['mat'] = x.groupby(_block_col)[_y_col].rank()
R = x.groupby(_group_col)['mat'].sum()
A1 = (x['mat'] ** 2).sum()
m = 1
S2 = m/(m*k - 1.) * (A1 - m*k*n*(m*k + 1.)**2./4.)
T2 = 1 / S2 * (np.sum(R) - n * m * ((m * k + 1.) / 2.)**2.)
A = S2 * (2. * n * (m * k - 1.)) / ( m * n * k - k - n + 1.)
B = 1. - T2 / (n * (m * k - 1.))
vs = np.zeros((k, k))
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
if p_adjust == 'single-step':
for i, j in combs:
vs[i, j] = compare_tukey(i, j)
else:
for i, j in combs:
vs[i, j] = compare_stats(i, j)
if p_adjust is not None:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_npm_test(a, val_col=None, group_col=None, sort=False, p_adjust=None):
'''Calculate pairwise comparisons using Nashimoto and Wright's all-pairs
comparison procedure (NPM test) for simply ordered mean ranksums.
NPM test is basically an extension of Nemenyi's procedure for testing
increasingly ordered alternatives [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values. See `statsmodels.sandbox.stats.multicomp`
for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
The p values are estimated from the studentized range distribution. If
the medians are already increasingly ordered, than the NPM-test
simplifies to the ordinary Nemenyi test
References
----------
.. [1] Nashimoto, K., Wright, F.T., (2005), Multiple comparison procedures for
detecting differences in simply ordered means. Comput. Statist. Data
Anal. 48, 291--306.
Examples
--------
>>> x = np.array([[102,109,114,120,124],
[110,112,123,130,145],
[132,141,156,160,172]])
>>> sp.posthoc_npm_test(x)
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
#if not sort:
# x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = np.unique(x[_group_col])
x['ranks'] = x[_val_col].rank()
Ri = x.groupby(_group_col)['ranks'].mean()
ni = x.groupby(_group_col)[_val_col].count()
k = groups.size
n = x.shape[0]
sigma = np.sqrt(n * (n + 1) / 12.)
df = np.inf
def compare(m, u):
a = [(Ri.loc[groups[u]]-Ri.loc[groups[_mi]])/(sigma/np.sqrt(2)*np.sqrt(1./ni.loc[groups[_mi]] + 1./ni.loc[groups[u]])) for _mi in m]
return np.array(a)
stat = np.zeros((k, k))
for i in range(k-1):
for j in range(i+1, k):
u = j
m = np.arange(i, u)
tmp = compare(m, u)
stat[j, i] = np.max(tmp)
stat[stat < 0] = 0
p_values = psturng(stat, k, np.inf)
tri_upper = np.triu_indices(p_values.shape[0], 1)
tri_lower = np.tril_indices(p_values.shape[0], -1)
p_values[tri_lower] = p_values.T[tri_lower]
#if p_adjust:
# p_values[tri_upper] = multipletests(p_values[tri_upper], method = p_adjust)[1]
np.fill_diagonal(p_values, -1)
return DataFrame(p_values, index=groups, columns=groups)
def posthoc_siegel_friedman(a, y_col=None, block_col=None, group_col=None, melted=False, sort=False, p_adjust=None):
'''Siegel and Castellan's All-Pairs Comparisons Test for Unreplicated Blocked
Data. See authors' paper for additional information [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of
block design, i.e. rows are blocks, and columns are groups. In this
case you do not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (strings).
y_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values. See statsmodels.sandbox.stats.multicomp for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
For all-pairs comparisons in a two factorial unreplicated complete block design
with non-normally distributed residuals, Siegel and Castellan's test can be
performed on Friedman-type ranked data.
References
----------
.. [1] S. Siegel, N. J. Castellan Jr. (1988), Nonparametric Statistics for the
Behavioral Sciences. 2nd ed. New York: McGraw-Hill.
Examples
--------
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_siegel_friedman(x)
'''
if melted and not all([block_col, group_col, y_col]):
raise ValueError('block_col, group_col, y_col should be explicitly specified if using melted data')
def compare_stats(i, j):
dif = np.abs(R[groups[i]] - R[groups[j]])
zval = dif / np.sqrt(k * (k + 1.) / (6. * n))
return zval
x, y_col, group_col, block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
#if not sort:
# x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
# x[block_col] = Categorical(x[block_col], categories=x[block_col].unique(), ordered=True)
x.sort_values(by=[group_col,block_col], ascending=True, inplace=True)
x.dropna(inplace=True)
groups = x[group_col].unique()
k = groups.size
n = x[block_col].unique().size
x['mat'] = x.groupby(block_col)[y_col].rank()
R = x.groupby(group_col)['mat'].mean()
vs = np.zeros((k, k), dtype=np.float)
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
for i, j in combs:
vs[i, j] = compare_stats(i, j)
vs = 2. * ss.norm.sf(np.abs(vs))
vs[vs > 1] = 1.
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_miller_friedman(a, y_col=None, block_col=None, group_col=None, melted=False, sort=False):
'''Miller's All-Pairs Comparisons Test for Unreplicated Blocked Data.
The p-values are computed from the chi-square distribution [1]_, [2]_,
[3]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of
block design, i.e. rows are blocks, and columns are groups. In this
case you do not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (strings).
y_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
Pandas DataFrame containing p values.
Notes
-----
For all-pairs comparisons in a two factorial unreplicated complete block
design with non-normally distributed residuals, Miller's test can be
performed on Friedman-type ranked data.
References
----------
.. [1] J. Bortz J, G. A. Lienert, K. Boehnke (1990), Verteilungsfreie
Methoden in der Biostatistik. Berlin: Springerself.
.. [2] R. G. Miller Jr. (1996), Simultaneous statistical inference. New
York: McGraw-Hill.
.. [3] E. L. Wike (2006), Data Analysis. A Statistical Primer for Psychology
Students. New Brunswick: Aldine Transaction.
Examples
--------
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_miller_friedman(x)
'''
if melted and not all([block_col, group_col, y_col]):
raise ValueError('block_col, group_col, y_col should be explicitly specified if using melted data')
def compare_stats(i, j):
dif = np.abs(R[groups[i]] - R[groups[j]])
qval = dif / np.sqrt(k * (k + 1.) / (6. * n))
return qval
x, y_col, group_col, block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
#if not sort:
# x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
# x[block_col] = Categorical(x[block_col], categories=x[block_col].unique(), ordered=True)
x.sort_values(by=[group_col,block_col], ascending=True, inplace=True)
x.dropna(inplace=True)
groups = x[group_col].unique()
k = groups.size
n = x[block_col].unique().size
x['mat'] = x.groupby(block_col)[y_col].rank()
R = x.groupby(group_col)['mat'].mean()
vs = np.zeros((k, k), dtype=np.float)
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
for i, j in combs:
vs[i, j] = compare_stats(i, j)
vs = vs ** 2
vs = ss.chi2.sf(vs, k - 1)
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_durbin(a, y_col=None, block_col=None, group_col=None, melted=False, sort=False, p_adjust=None):
'''Pairwise post hoc test for multiple comparisons of rank sums according to
Durbin and Conover for a two-way balanced incomplete block design (BIBD). See
references for additional information [1]_, [2]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of block design,
i.e. rows are blocks, and columns are groups. In this case you do
not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (string).
y_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values. See statsmodels.sandbox.stats.multicomp
for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
Pandas DataFrame containing p values.
References
----------
.. [1] W. J. Conover and R. L. Iman (1979), On multiple-comparisons procedures,
Tech. Rep. LA-7677-MS, Los Alamos Scientific Laboratory.
.. [2] W. J. Conover (1999), Practical nonparametric Statistics,
3rd. edition, Wiley.
Examples
--------
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_durbin(x)
'''
if melted and not all([block_col, group_col, y_col]):
raise ValueError('block_col, group_col, y_col should be explicitly specified if using melted data')
def compare_stats(i, j):
dif = np.abs(Rj[groups[i]] - Rj[groups[j]])
tval = dif / denom
pval = 2. * ss.t.sf(np.abs(tval), df = df)
return pval
x, y_col, group_col, block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
if not sort:
x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
x[block_col] = Categorical(x[block_col], categories=x[block_col].unique(), ordered=True)
x.sort_values(by=[block_col, group_col], ascending=True, inplace=True)
x.dropna(inplace=True)
groups = x[group_col].unique()
t = len(groups)
b = x[block_col].unique().size
r = b
k = t
x['y_ranked'] = x.groupby(block_col)[y_col].rank()
Rj = x.groupby(group_col)['y_ranked'].sum()
A = (x['y_ranked'] ** 2).sum()
C = (b * k * (k + 1) ** 2) / 4.
D = (Rj ** 2).sum() - r * C
T1 = (t - 1) / (A - C) * D
denom = np.sqrt(((A - C) * 2 * r) / (b * k - b - t + 1) * (1 - T1 / (b * (k -1))))
df = b * k - b - t + 1
vs = np.zeros((t, t), dtype=np.float)
combs = it.combinations(range(t), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
for i, j in combs:
vs[i, j] = compare_stats(i, j)
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_anderson(a, val_col=None, group_col=None, midrank=True, sort=False, p_adjust=None):
'''Anderson-Darling Pairwise Test for k-samples. Tests the null hypothesis
that k-samples are drawn from the same population without having to specify
the distribution function of that population [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
midrank : bool, optional
Type of Anderson-Darling test which is computed. If set to True (default), the
midrank test applicable to continuous and discrete populations is performed. If
False, the right side empirical distribution is used.
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values. See statsmodels.sandbox.stats.multicomp for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
result : pandas DataFrame
P values.
References
----------
.. [1] F.W. Scholz, M.A. Stephens (1987), K-Sample Anderson-Darling Tests,
Journal of the American Statistical Association, Vol. 82, pp. 918-924.
Examples
--------
>>> x = np.array([[2.9, 3.0, 2.5, 2.6, 3.2], [3.8, 2.7, 4.0, 2.4], [2.8, 3.4, 3.7, 2.2, 2.0]])
>>> sp.posthoc_anderson(x)
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = x[_group_col].unique()
k = groups.size
vs = np.zeros((k, k), dtype=np.float)
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
for i, j in combs:
vs[i, j] = ss.anderson_ksamp([x.loc[x[_group_col] == groups[i], _val_col], x.loc[x[_group_col] == groups[j], _val_col]])[2]
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_mackwolfe(a, val_col, group_col, p=None, n_perm=100, sort=False, p_adjust=None):
'''Mack-Wolfe Test for Umbrella Alternatives.
In dose-finding studies one may assume an increasing treatment effect with
increasing dose level. However, the test subject may actually succumb to
toxic effects at high doses, which leads to decresing treatment
effects [1]_, [2]_.
The scope of the Mack-Wolfe Test is to test for umbrella alternatives for
either a known or unknown point P (i.e. dose-level), where the peak
(umbrella point) is present.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
p : int, optional
The a-priori known peak as an ordinal number of the treatment group
including the zero dose level, i.e. p = {1, ..., k}. Defaults to None.
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
p : float
P value.
stat : float
Statistic.
References
----------
.. [1] Chen, I.Y. (1991) Notes on the Mack-Wolfe and Chen-Wolfe Tests for
Umbrella Alternatives. Biom. J., 33, 281-290.
.. [2] Mack, G.A., Wolfe, D. A. (1981) K-sample rank tests for umbrella
alternatives. J. Amer. Statist. Assoc., 76, 175-181.
Examples
--------
>>> x = np.array([[10,'a'], [59,'a'], [76,'b'], [10, 'b']])
>>> sp.posthoc_mackwolfe(x, val_col = 0, group_col = 1)
'''
x = __convert_to_df(a, val_col, group_col)
if not sort:
x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
x.sort_values(by=[group_col], ascending=True, inplace=True)
k = x[group_col].unique().size
if p:
if p > k:
print("Selected 'p' > number of groups:", str(p), " > ", str(k))
return False
elif p < 1:
print("Selected 'p' < 1: ", str(p))
return False
Rij = x[val_col].rank()
n = x.groupby(group_col)[val_col].count()
def _fn(Ri, Rj):
return np.sum(Ri.apply(lambda x: Rj[Rj > x].size))
def _ustat(Rij, g, k):
levels = np.unique(g)
U = np.identity(k)
for i in range(k):
for j in range(i):
U[i,j] = _fn(Rij[x[group_col] == levels[i]], Rij[x[group_col] == levels[j]])
U[j,i] = _fn(Rij[x[group_col] == levels[j]], Rij[x[group_col] == levels[i]])
return U
def _ap(p, U):
tmp1 = 0
if p > 0:
for i in range(p):
for j in range(i+1, p+1):
tmp1 += U[i,j]
tmp2 = 0
if p < k:
for i in range(p, k):
for j in range(i+1, k):
tmp2 += U[j,i]
return tmp1 + tmp2
def _n1(p, n):
return np.sum(n[:p+1])
def _n2(p, n):
return np.sum(n[p:k])
def _mean_at(p, n):
N1 = _n1(p, n)
N2 = _n2(p, n)
return (N1**2 + N2**2 - np.sum(n**2) - n.iloc[p]**2)/4
def _var_at(p, n):
N1 = _n1(p, n)
N2 = _n2(p, n)
N = np.sum(n)
var = (2 * (N1**3 + N2**3) + 3 * (N1**2 + N2**2) -\
np.sum(n**2 * (2*n + 3)) - n.iloc[p]**2 * (2 * n.iloc[p] + 3) +\
12. * n.iloc[p] * N1 * N2 - 12. * n.iloc[p] ** 2 * N) / 72.
return var
if p:
if (x.groupby(val_col).count() > 1).any().any():
print("Ties are present")
U = _ustat(Rij, x[group_col], k)
est = _ap(p, U)
mean = _mean_at(p, n)
sd = np.sqrt(_var_at(p, n))
stat = (est - mean)/sd
p_value = ss.norm.sf(stat)
else:
U = _ustat(Rij, x[group_col], k)
Ap = np.array([_ap(i, U) for i in range(k)]).ravel()
mean = np.array([_mean_at(i, n) for i in range(k)]).ravel()
var = np.array([_var_at(i, n) for i in range(k)]).ravel()
A = (Ap - mean) / np.sqrt(var)
stat = np.max(A)
p = A == stat
est = None
mt = []
for i in range(n_perm):
ix = Series(np.random.permutation(Rij))
Uix = _ustat(ix, x[group_col], k)
Apix = np.array([_ap(i, Uix) for i in range(k)])
Astarix = (Apix - mean) / np.sqrt(var)
mt.append(np.max(Astarix))
mt = np.array(mt)
p_value = mt[mt > stat] / n_perm
return p_value, stat
def posthoc_vanwaerden(a, val_col=None, group_col=None, sort=True, p_adjust=None):
'''Van der Waerden's test for pairwise multiple comparisons between group
levels. See references for additional information [1]_, [2]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
For one-factorial designs with samples that do not meet the assumptions for
one-way-ANOVA and subsequent post hoc tests, the van der Waerden test using
normal scores can be employed. Provided that significant differences were
detected by this global test, one may be interested in applying post hoc
tests according to van der Waerden for pairwise multiple comparisons of the
group levels.
There is no tie correction applied in this function.
References
----------
.. [1] W. J. Conover and R. L. Iman (1979), On multiple-comparisons procedures,
Tech. Rep. LA-7677-MS, Los Alamos Scientific Laboratory.
.. [2] B. L. van der Waerden (1952) Order tests for the two-sample problem and
their power, Indagationes Mathematicae, 14, 453-458.
Examples
--------
>>> x = np.array([[10,'a'], [59,'a'], [76,'b'], [10, 'b']])
>>> sp.posthoc_vanwaerden(x, val_col = 0, group_col = 1)
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = x[_group_col].unique()
n = x[_val_col].size
k = groups.size
r = ss.rankdata(x[_val_col])
x['z_scores'] = ss.norm.ppf(r / (n + 1))
aj = x.groupby(_group_col)['z_scores'].sum()
nj = x.groupby(_group_col)['z_scores'].count()
s2 = (1. / (n - 1.)) * (x['z_scores'] ** 2.).sum()
sts = (1. / s2) * np.sum(aj ** 2. / nj)
param = k - 1
A = aj / nj
vs = np.zeros((k, k), dtype=np.float)
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
def compare_stats(i, j):
dif = np.abs(A[groups[i]] - A[groups[j]])
B = 1. / nj[groups[i]] + 1. / nj[groups[j]]
tval = dif / np.sqrt(s2 * (n - 1. - sts)/(n - k) * B)
pval = 2. * ss.t.sf(np.abs(tval), df = n - k)
return pval
for i, j in combs:
vs[i, j] = compare_stats(i, j)
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_ttest(a, val_col=None, group_col=None, pool_sd=False, equal_var=True, p_adjust=None, sort=True):
'''Pairwise T test for multiple comparisons of independent groups. May be
used after a parametric ANOVA to do pairwise comparisons.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame. Array must be two-dimensional.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
equal_var : bool, optional
If True (default), perform a standard independent test
that assumes equal population variances [1]_.
If False, perform Welch's t-test, which does not assume equal
population variance [2]_.
pool_sd : bool, optional
Calculate a common SD for all groups and use that for all
comparisons (this can be useful if some groups are small).
This method does not actually call scipy ttest_ind() function,
so extra arguments are ignored. Default is False.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by group_col or not. Recommended
unless you sort your data manually.
Returns
-------
Numpy ndarray or pandas DataFrame of p values depending on input
data type.
References
----------
.. [1] http://en.wikipedia.org/wiki/T-test#Independent_two-sample_t-test
.. [2] http://en.wikipedia.org/wiki/Welch%27s_t_test
Examples
--------
>>> x = [[1,2,3,5,1], [12,31,54, np.nan], [10,12,6,74,11]]
>>> sp.posthoc_ttest(x, p_adjust = 'holm')
array([[-1. , 0.04600899, 0.31269089],
[ 0.04600899, -1. , 0.6327077 ],
[ 0.31269089, 0.6327077 , -1. ]])
'''
if isinstance(a, DataFrame):
x = a.copy()
if not sort:
x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
x.sort_values(by=[group_col, val_col], ascending=True, inplace=True)
x_lens = x.groupby(by=group_col)[val_col].count().values
x_lens_cumsum = np.insert(np.cumsum(x_lens), 0, 0)[:-1]
x_grouped = np.array([x[val_col][j:(j + x_lens[i])] for i, j in enumerate(x_lens_cumsum)])
#x_grouped = [x.loc[v, val_col].values.tolist() for g, v in x.groupby(group_col, sort=False).groups.items()]
else:
x = np.array(a)
x_grouped = np.array([np.asarray(a)[~np.isnan(a)] for a in x])
x_lens = np.asarray([len(a) for a in x_grouped])
x_lens_cumsum = np.insert(np.cumsum(x_lens), 0, 0)[:-1]
if any(x_lens == 0):
raise ValueError("All groups must contain data")
x_len = len(x_grouped)
vs = np.zeros((x_len, x_len), dtype=np.float)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
def compare_pooled(i, j):
diff = x_means[i] - x_means[j]
se_diff = pooled_sd * np.sqrt(1 / x_lens[i] + 1 / x_lens[j])
t_value = diff / se_diff
return 2 * ss.t.cdf(-np.abs(t_value), x_totaldegf)
combs = it.combinations(range(x_len), 2)
if pool_sd:
x_means = np.asarray([np.mean(xi) for xi in x_grouped])
x_sd = np.asarray([np.std(xi, ddof=1) for xi in x_grouped])
x_degf = x_lens - 1
x_totaldegf = np.sum(x_degf)
pooled_sd = np.sqrt(np.sum(x_sd ** 2 * x_degf) / x_totaldegf)
for i, j in combs:
vs[i, j] = compare_pooled(i, j)
else:
for i,j in combs:
vs[i, j] = ss.ttest_ind(x_grouped[i], x_grouped[j], equal_var=equal_var)[1]
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
if isinstance(x, DataFrame):
groups_unique = x[group_col].unique()
return DataFrame(vs, index=groups_unique, columns=groups_unique)
else:
return vs
def posthoc_tukey_hsd(x, g, alpha=0.05):
'''Pairwise comparisons with TukeyHSD confidence intervals. This is a
convenience function to make statsmodels `pairwise_tukeyhsd` method more
applicable for further use.
Parameters
----------
x : array_like or pandas Series object, 1d
An array, any object exposing the array interface, containing dependent
variable values (test or response variable). Values should have a
non-nominal scale. NaN values will cause an error (please handle
manually).
g : array_like or pandas Series object, 1d
An array, any object exposing the array interface, containing
independent variable values (grouping or predictor variable). Values
should have a nominal scale (categorical).
alpha : float, optional
Significance level for the test. Default is 0.05.
Returns
-------
result : pandas DataFrame
DataFrame with 0, 1, and -1 values, where 0 is False (not significant),
1 is True (significant), and -1 is for diagonal elements.
Examples
--------
>>> x = [[1,2,3,4,5], [35,31,75,40,21], [10,6,9,6,1]]
>>> g = [['a'] * 5, ['b'] * 5, ['c'] * 5]
>>> sp.posthoc_tukey_hsd(np.concatenate(x), np.concatenate(g))
'''
result = pairwise_tukeyhsd(x, g, alpha=0.05)
groups = np.array(result.groupsunique, dtype=np.str)
groups_len = len(groups)
vs = np.zeros((groups_len, groups_len), dtype=np.int)
for a in result.summary()[1:]:
a0 = str(a[0])
a1 = str(a[1])
a0i = np.where(groups == a0)[0][0]
a1i = np.where(groups == a1)[0][0]
vs[a0i, a1i] = 1 if str(a[5]) == 'True' else 0
vs = np.triu(vs)
np.fill_diagonal(vs, -1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[tri_lower] = vs.T[tri_lower]
return DataFrame(vs, index=groups, columns=groups)
def posthoc_mannwhitney(a, val_col=None, group_col=None, use_continuity=True, alternative='two-sided', p_adjust=None, sort=True):
'''Pairwise comparisons with Mann-Whitney rank test.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame. Array must be two-dimensional.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
use_continuity : bool, optional
Whether a continuity correction (1/2.) should be taken into account.
Default is True.
alternative : ['two-sided', 'less', or 'greater'], optional
Whether to get the p-value for the one-sided hypothesis
('less' or 'greater') or for the two-sided hypothesis ('two-sided').
Defaults to 'two-sided'.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by group_col or not. Recommended
unless you sort your data manually.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
Refer to `scipy.stats.mannwhitneyu` reference page for further details.
Examples
--------
>>> x = [[1,2,3,4,5], [35,31,75,40,21], [10,6,9,6,1]]
>>> sp.posthoc_mannwhitney(x, p_adjust = 'holm')
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col, _val_col], ascending=True, inplace=True)
groups = np.unique(x[_group_col])
x_len = groups.size
vs = np.zeros((x_len, x_len))
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(x_len), 2)
for i,j in combs:
vs[i, j] = ss.mannwhitneyu(x.loc[x[_group_col] == groups[i], _val_col],
x.loc[x[_group_col] == groups[j], _val_col],
use_continuity=use_continuity,
alternative=alternative)[1]
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_wilcoxon(a, val_col=None, group_col=None, zero_method='wilcox', correction=False, p_adjust=None, sort=False):
'''Pairwise comparisons with Wilcoxon signed-rank test. It is a non-parametric
version of the paired T-test for use with non-parametric ANOVA.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame. Array must be two-dimensional.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
zero_method : string, {"pratt", "wilcox", "zsplit"}, optional
"pratt": Pratt treatment, includes zero-differences in the ranking
process (more conservative)
"wilcox": Wilcox treatment, discards all zero-differences
"zsplit": Zero rank split, just like Pratt, but spliting the zero rank
between positive and negative ones
correction : bool, optional
If True, apply continuity correction by adjusting the Wilcoxon rank
statistic by 0.5 towards the mean value when computing the z-statistic.
Default is False.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by group_col and val_col or not.
Default is False.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
Refer to `scipy.stats.wilcoxon` reference page for further details.
Examples
--------
>>> x = [[1,2,3,4,5], [35,31,75,40,21], [10,6,9,6,1]]
>>> sp.posthoc_wilcoxon(x)
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
#x.sort_values(by=[_group_col, _val_col], ascending=True, inplace=True)
groups = np.unique(x[_group_col])
x_len = groups.size
vs = np.zeros((x_len, x_len))
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(x_len), 2)
for i,j in combs:
vs[i, j] = ss.wilcoxon(x.loc[x[_group_col] == groups[i], _val_col],
x.loc[x[_group_col] == groups[j], _val_col],
zero_method=zero_method, correction=correction)[1]
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method=p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_scheffe(a, val_col=None, group_col=None, sort=False, p_adjust=None):
'''Scheffe's all-pairs comparisons test for normally distributed data with equal
group variances. For all-pairs comparisons in an one-factorial layout with
normally distributed residuals and equal variances Scheffe's test can be
performed with parametric ANOVA [1]_, [2]_, [3]_.
A total of m = k(k-1)/2 hypotheses can be tested.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
The p values are computed from the F-distribution.
References
----------
.. [1] J. Bortz (1993) Statistik für Sozialwissenschaftler. 4. Aufl., Berlin:
Springer.
.. [2] L. Sachs (1997) Angewandte Statistik, New York: Springer.
.. [3] H. Scheffe (1953) A Method for Judging all Contrasts in the Analysis
of Variance. Biometrika 40, 87-110.
Examples
--------
>>> import scikit_posthocs as sp
>>> import pandas as pd
>>> x = pd.DataFrame({"a": [1,2,3,5,1], "b": [12,31,54,62,12], "c": [10,12,6,74,11]})
>>> x = x.melt(var_name='groups', value_name='values')
>>> sp.posthoc_scheffe(x, val_col='values', group_col='groups')
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = np.unique(x[_group_col])
x_grouped = x.groupby(_group_col)[_val_col]
ni = x_grouped.count()
n = ni.sum()
xi = x_grouped.mean()
si = x_grouped.var()
sin = 1. / (n - groups.size) * np.sum(si * (ni - 1.))
def compare(i, j):
dif = xi.loc[i] - xi.loc[j]
A = sin * (1. / ni[i] + 1. / ni[j]) * (groups.size - 1.)
f_val = dif ** 2. / A
return f_val
vs = np.zeros((groups.size, groups.size), dtype=np.float)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(groups.size), 2)
for i,j in combs:
vs[i, j] = compare(groups[i], groups[j])
vs[tri_lower] = vs.T[tri_lower]
p_values = ss.f.sf(vs, groups.size - 1., n - groups.size)
np.fill_diagonal(p_values, -1)
return DataFrame(p_values, index=groups, columns=groups)
def posthoc_tamhane(a, val_col=None, group_col=None, welch=True, sort=False):
'''Tamhane's T2 all-pairs comparison test for normally distributed data with
unequal variances. Tamhane's T2 test can be performed for all-pairs
comparisons in an one-factorial layout with normally distributed residuals
but unequal groups variances. A total of m = k(k-1)/2 hypotheses can be
tested. The null hypothesis is tested in the two-tailed test against the
alternative hypothesis [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
welch : bool, optional
If True, use Welch's approximate solution for calculating the degree of
freedom. T2 test uses the usual df = N - 2 approximation.
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
The p values are computed from the t-distribution and adjusted according to
Dunn-Sidak.
References
----------
.. [1] A.C. Tamhane (1979), A Comparison of Procedures for Multiple Comparisons of
Means with Unequal Variances. Journal of the American Statistical Association,
74, 471-480.
Examples
--------
>>> import scikit_posthocs as sp
>>> import pandas as pd
>>> x = pd.DataFrame({"a": [1,2,3,5,1], "b": [12,31,54,62,12], "c": [10,12,6,74,11]})
>>> x = x.melt(var_name='groups', value_name='values')
>>> sp.posthoc_tamhane(x, val_col='values', group_col='groups')
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = x[_group_col].unique()
x_grouped = x.groupby(_group_col)[_val_col]
ni = x_grouped.count()
n = ni.sum()
xi = x_grouped.mean()
si = x_grouped.var()
sin = 1. / (n - groups.size) * np.sum(si * (ni - 1))
def compare(i, j):
dif = xi[i] - xi[j]
A = si[i] / ni[i] + si[j] / ni[j]
t_val = dif / np.sqrt(A)
if welch:
df = A ** 2. / (si[i] ** 2. / (ni[i] ** 2. * (ni[i] - 1.)) + si[j] ** 2. / (ni[j] ** 2. * (ni[j] - 1.)))
else:
## checks according to Tamhane (1979, p. 474)
ok1 = (9./10. <= ni[i]/ni[j]) and (ni[i]/ni[j] <= 10./9.)
ok2 = (9./10. <= (s2i[i] / ni[i]) / (s2i[j] / ni[j])) and ((s2i[i] / ni[i]) / (s2i[j] / ni[j]) <= 10./9.)
ok3 = (4./5. <= ni[i]/ni[j]) and (ni[i]/ni[j] <= 5./4.) and (1./2. <= (s2i[i] / ni[i]) / (s2i[j] / ni[j])) and ((s2i[i] / ni[i]) / (s2i[j] / ni[j]) <= 2.)
ok4 = (2./3. <= ni[i]/ni[j]) and (ni[i]/ni[j] <= 3./2.) and (3./4. <= (s2i[i] / ni[i]) / (s2i[j] / ni[j])) and ((s2i[i] / ni[i]) / (s2i[j] / ni[j]) <= 4./3.)
OK = any(ok1, ok2, ok3, ok4)
if not OK:
print("Sample sizes or standard errors are not balanced. T2 test is recommended.")
df = ni[i] + ni[j] - 2.
p_val = 2. * ss.t.sf(np.abs(t_val), df=df)
return p_val
vs = np.zeros((groups.size, groups.size), dtype=np.float)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(groups.size), 2)
for i,j in combs:
vs[i, j] = compare(groups[i], groups[j])
vs[tri_upper] = 1. - (1. - vs[tri_upper]) ** groups.size
vs[tri_lower] = vs.T[tri_lower]
vs[vs > 1] = 1
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_tukey(a, val_col = None, group_col = None, sort = False):
'''Performs Tukey's all-pairs comparisons test for normally distributed data
with equal group variances. For all-pairs comparisons in an
one-factorial layout with normally distributed residuals and equal variances
Tukey's test can be performed. A total of m = k(k-1)/2 hypotheses can be
tested. The null hypothesis is tested in the two-tailed test against
the alternative hypothesis [1]_, [2]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
The p values are computed from the Tukey-distribution.
References
----------
.. [1] L. Sachs (1997) Angewandte Statistik, New York: Springer.
.. [2] J. Tukey (1949) Comparing Individual Means in the Analysis of Variance,
Biometrics 5, 99-114.
Examples
--------
>>> import scikit_posthocs as sp
>>> import pandas as pd
>>> x = pd.DataFrame({"a": [1,2,3,5,1], "b": [12,31,54,62,12], "c": [10,12,6,74,11]})
>>> x = x.melt(var_name='groups', value_name='values')
>>> sp.posthoc_tukey(x, val_col='values', group_col='groups')
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = np.unique(x[_group_col])
x_grouped = x.groupby(_group_col)[_val_col]
ni = x_grouped.count()
n = ni.sum()
xi = x_grouped.mean()
si = x_grouped.var()
sin = 1. / (n - groups.size) * np.sum(si * (ni - 1))
def compare(i, j):
dif = xi[i] - xi[j]
A = sin * 0.5 * (1. / ni.loc[i] + 1. / ni.loc[j])
q_val = dif / np.sqrt(A)
return q_val
vs = np.zeros((groups.size, groups.size), dtype=np.float)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(groups.size), 2)
for i,j in combs:
vs[i, j] = compare(groups[i], groups[j])
vs[tri_upper] = psturng(np.abs(vs[tri_upper]), groups.size, n - groups.size)
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_dscf(a, val_col=None, group_col=None, sort=False):
'''Dwass, Steel, Critchlow and Fligner all-pairs comparison test for a
one-factorial layout with non-normally distributed residuals. As opposed to
the all-pairs comparison procedures that depend on Kruskal ranks, the DSCF
test is basically an extension of the U-test as re-ranking is conducted for
each pairwise test [1]_, [2]_, [3]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
Pandas DataFrame containing p values.
Notes
-----
The p values are computed from the Tukey-distribution.
References
----------
.. [1] Douglas, C. E., Fligner, A. M. (1991) On distribution-free multiple
comparisons in the one-way analysis of variance, Communications in
Statistics - Theory and Methods, 20, 127-139.
.. [2] Dwass, M. (1960) Some k-sample rank-order tests. In Contributions to
Probability and Statistics, Edited by: I. Olkin, Stanford: Stanford
University Press.
.. [3] Steel, R. G. D. (1960) A rank sum test for comparing all pairs of
treatments, Technometrics, 2, 197-207.
Examples
--------
>>> import scikit_posthocs as sp
>>> import pandas as pd
>>> x = pd.DataFrame({"a": [1,2,3,5,1], "b": [12,31,54,62,12], "c": [10,12,6,74,11]})
>>> x = x.melt(var_name='groups', value_name='values')
>>> sp.posthoc_dscf(x, val_col='values', group_col='groups')
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = np.unique(x[_group_col])
x_grouped = x.groupby(_group_col)[_val_col]
n = x_grouped.count()
k = groups.size
def get_ties(x):
t = x.value_counts().values
c = np.sum((t ** 3 - t) / 12.)
return c
def compare(i, j):
ni = n.loc[i]
nj = n.loc[j]
x_raw = x.loc[(x[_group_col] == i) | (x[_group_col] == j)].copy()
x_raw['ranks'] = x_raw.loc[:, _val_col].rank()
r = x_raw.groupby(_group_col)['ranks'].sum().loc[[i, j]]
u = np.array([nj * ni + (nj * (nj + 1) / 2),
nj * ni + (ni * (ni + 1) / 2)]) - r
u_min = np.min(u)
s = ni + nj
var = (nj*ni/(s*(s - 1.))) * ((s**3 - s)/12. - get_ties(x_raw['ranks']))
p = np.sqrt(2.) * (u_min - nj * ni / 2.) / np.sqrt(var)
return p
vs = np.zeros((k, k))
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(k), 2)
for i, j in combs:
vs[i, j] = compare(groups[i], groups[j])
vs[tri_upper] = psturng(np.abs(vs[tri_upper]), k, np.inf)
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
|
maximtrp/scikit-posthocs | scikit_posthocs/_posthocs.py | posthoc_mackwolfe | python | def posthoc_mackwolfe(a, val_col, group_col, p=None, n_perm=100, sort=False, p_adjust=None):
'''Mack-Wolfe Test for Umbrella Alternatives.
In dose-finding studies one may assume an increasing treatment effect with
increasing dose level. However, the test subject may actually succumb to
toxic effects at high doses, which leads to decresing treatment
effects [1]_, [2]_.
The scope of the Mack-Wolfe Test is to test for umbrella alternatives for
either a known or unknown point P (i.e. dose-level), where the peak
(umbrella point) is present.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
p : int, optional
The a-priori known peak as an ordinal number of the treatment group
including the zero dose level, i.e. p = {1, ..., k}. Defaults to None.
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
p : float
P value.
stat : float
Statistic.
References
----------
.. [1] Chen, I.Y. (1991) Notes on the Mack-Wolfe and Chen-Wolfe Tests for
Umbrella Alternatives. Biom. J., 33, 281-290.
.. [2] Mack, G.A., Wolfe, D. A. (1981) K-sample rank tests for umbrella
alternatives. J. Amer. Statist. Assoc., 76, 175-181.
Examples
--------
>>> x = np.array([[10,'a'], [59,'a'], [76,'b'], [10, 'b']])
>>> sp.posthoc_mackwolfe(x, val_col = 0, group_col = 1)
'''
x = __convert_to_df(a, val_col, group_col)
if not sort:
x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
x.sort_values(by=[group_col], ascending=True, inplace=True)
k = x[group_col].unique().size
if p:
if p > k:
print("Selected 'p' > number of groups:", str(p), " > ", str(k))
return False
elif p < 1:
print("Selected 'p' < 1: ", str(p))
return False
Rij = x[val_col].rank()
n = x.groupby(group_col)[val_col].count()
def _fn(Ri, Rj):
return np.sum(Ri.apply(lambda x: Rj[Rj > x].size))
def _ustat(Rij, g, k):
levels = np.unique(g)
U = np.identity(k)
for i in range(k):
for j in range(i):
U[i,j] = _fn(Rij[x[group_col] == levels[i]], Rij[x[group_col] == levels[j]])
U[j,i] = _fn(Rij[x[group_col] == levels[j]], Rij[x[group_col] == levels[i]])
return U
def _ap(p, U):
tmp1 = 0
if p > 0:
for i in range(p):
for j in range(i+1, p+1):
tmp1 += U[i,j]
tmp2 = 0
if p < k:
for i in range(p, k):
for j in range(i+1, k):
tmp2 += U[j,i]
return tmp1 + tmp2
def _n1(p, n):
return np.sum(n[:p+1])
def _n2(p, n):
return np.sum(n[p:k])
def _mean_at(p, n):
N1 = _n1(p, n)
N2 = _n2(p, n)
return (N1**2 + N2**2 - np.sum(n**2) - n.iloc[p]**2)/4
def _var_at(p, n):
N1 = _n1(p, n)
N2 = _n2(p, n)
N = np.sum(n)
var = (2 * (N1**3 + N2**3) + 3 * (N1**2 + N2**2) -\
np.sum(n**2 * (2*n + 3)) - n.iloc[p]**2 * (2 * n.iloc[p] + 3) +\
12. * n.iloc[p] * N1 * N2 - 12. * n.iloc[p] ** 2 * N) / 72.
return var
if p:
if (x.groupby(val_col).count() > 1).any().any():
print("Ties are present")
U = _ustat(Rij, x[group_col], k)
est = _ap(p, U)
mean = _mean_at(p, n)
sd = np.sqrt(_var_at(p, n))
stat = (est - mean)/sd
p_value = ss.norm.sf(stat)
else:
U = _ustat(Rij, x[group_col], k)
Ap = np.array([_ap(i, U) for i in range(k)]).ravel()
mean = np.array([_mean_at(i, n) for i in range(k)]).ravel()
var = np.array([_var_at(i, n) for i in range(k)]).ravel()
A = (Ap - mean) / np.sqrt(var)
stat = np.max(A)
p = A == stat
est = None
mt = []
for i in range(n_perm):
ix = Series(np.random.permutation(Rij))
Uix = _ustat(ix, x[group_col], k)
Apix = np.array([_ap(i, Uix) for i in range(k)])
Astarix = (Apix - mean) / np.sqrt(var)
mt.append(np.max(Astarix))
mt = np.array(mt)
p_value = mt[mt > stat] / n_perm
return p_value, stat | Mack-Wolfe Test for Umbrella Alternatives.
In dose-finding studies one may assume an increasing treatment effect with
increasing dose level. However, the test subject may actually succumb to
toxic effects at high doses, which leads to decresing treatment
effects [1]_, [2]_.
The scope of the Mack-Wolfe Test is to test for umbrella alternatives for
either a known or unknown point P (i.e. dose-level), where the peak
(umbrella point) is present.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
p : int, optional
The a-priori known peak as an ordinal number of the treatment group
including the zero dose level, i.e. p = {1, ..., k}. Defaults to None.
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
p : float
P value.
stat : float
Statistic.
References
----------
.. [1] Chen, I.Y. (1991) Notes on the Mack-Wolfe and Chen-Wolfe Tests for
Umbrella Alternatives. Biom. J., 33, 281-290.
.. [2] Mack, G.A., Wolfe, D. A. (1981) K-sample rank tests for umbrella
alternatives. J. Amer. Statist. Assoc., 76, 175-181.
Examples
--------
>>> x = np.array([[10,'a'], [59,'a'], [76,'b'], [10, 'b']])
>>> sp.posthoc_mackwolfe(x, val_col = 0, group_col = 1) | train | https://github.com/maximtrp/scikit-posthocs/blob/5476b09e2a325cd4e31c0b0bc6906ab5cd77fc5d/scikit_posthocs/_posthocs.py#L1439-L1595 | [
"def __convert_to_df(a, val_col=None, group_col=None, val_id=None, group_id=None):\n\n '''Hidden helper method to create a DataFrame with input data for further\n processing.\n\n Parameters\n ----------\n a : array_like or pandas DataFrame object\n An array, any object exposing the array inter... | # -*- coding: utf-8 -*-
import numpy as np
import scipy.stats as ss
import itertools as it
from statsmodels.sandbox.stats.multicomp import multipletests
from statsmodels.stats.multicomp import pairwise_tukeyhsd
from statsmodels.stats.libqsturng import psturng
from pandas import DataFrame, Categorical, Series
def __convert_to_df(a, val_col=None, group_col=None, val_id=None, group_id=None):
'''Hidden helper method to create a DataFrame with input data for further
processing.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas DataFrame.
Array must be two-dimensional. Second dimension may vary,
i.e. groups may have different lengths.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
val_id : int, optional
Index of a column that contains dependent variable values (test or
response variable). Should be specified if a NumPy ndarray is used as an
input. It will be inferred from data, if not specified.
group_id : int, optional
Index of a column that contains independent variable values (grouping or
predictor variable). Should be specified if a NumPy ndarray is used as
an input. It will be inferred from data, if not specified.
Returns
-------
x : pandas DataFrame
DataFrame with input data, `val_col` column contains numerical values and
`group_col` column contains categorical values.
val_col : str
Name of a DataFrame column that contains dependent variable values (test
or response variable).
group_col : str
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable).
Notes
-----
Inferrence algorithm for determining `val_id` and `group_id` args is rather
simple, so it is better to specify them explicitly to prevent errors.
'''
if not group_col:
group_col = 'groups'
if not val_col:
val_col = 'vals'
if isinstance(a, DataFrame):
x = a.copy()
if not {group_col, val_col}.issubset(a.columns):
raise ValueError('Specify correct column names using `group_col` and `val_col` args')
return x, val_col, group_col
elif isinstance(a, list) or (isinstance(a, np.ndarray) and not a.shape.count(2)):
grps_len = map(len, a)
grps = list(it.chain(*[[i+1] * l for i, l in enumerate(grps_len)]))
vals = list(it.chain(*a))
return DataFrame({val_col: vals, group_col: grps}), val_col, group_col
elif isinstance(a, np.ndarray):
# cols ids not defined
# trying to infer
if not(all([val_id, group_id])):
if np.argmax(a.shape):
a = a.T
ax = [np.unique(a[:, 0]).size, np.unique(a[:, 1]).size]
if np.asscalar(np.diff(ax)):
__val_col = np.argmax(ax)
__group_col = np.argmin(ax)
else:
raise ValueError('Cannot infer input format.\nPlease specify `val_id` and `group_id` args')
cols = {__val_col: val_col,
__group_col: group_col}
else:
cols = {val_id: val_col,
group_id: group_col}
cols_vals = dict(sorted(cols.items())).values()
return DataFrame(a, columns=cols_vals), val_col, group_col
def __convert_to_block_df(a, y_col=None, group_col=None, block_col=None, melted=False):
if isinstance(a, DataFrame) and not melted:
x = a.copy(deep=True)
group_col = 'groups'
block_col = 'blocks'
y_col = 'y'
x.columns.name = group_col
x.index.name = block_col
x = x.reset_index().melt(id_vars=block_col, var_name=group_col, value_name=y_col)
elif melted:
x = DataFrame.from_dict({'groups': a[group_col],
'blocks': a[block_col],
'y': a[y_col]})
elif not isinstance(a, DataFrame):
x = np.array(a)
x = DataFrame(x, index=np.arange(x.shape[0]), columns=np.arange(x.shape[1]))
if not melted:
group_col = 'groups'
block_col = 'blocks'
y_col = 'y'
x.columns.name = group_col
x.index.name = block_col
x = x.reset_index().melt(id_vars=block_col, var_name=group_col, value_name=y_col)
else:
x.columns[group_col] = 'groups'
x.columns[block_col] = 'blocks'
x.columns[y_col] = 'y'
group_col = 'groups'
block_col = 'blocks'
y_col = 'y'
return x, 'y', 'groups', 'blocks'
def posthoc_conover(a, val_col=None, group_col=None, p_adjust=None, sort=True):
'''Post hoc pairwise test for multiple comparisons of mean rank sums
(Conover's test). May be used after Kruskal-Wallis one-way analysis of
variance by ranks to do pairwise comparisons [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas DataFrame.
Array must be two-dimensional. Second dimension may vary,
i.e. groups may have different lengths.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
p_adjust : str, optional
Method for adjusting p values. See `statsmodels.sandbox.stats.multicomp`
for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by `group_col` or not. Recommended
unless you sort your data manually.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
A tie correction are employed according to Conover [1]_.
References
----------
.. [1] W. J. Conover and R. L. Iman (1979), On multiple-comparisons procedures,
Tech. Rep. LA-7677-MS, Los Alamos Scientific Laboratory.
Examples
--------
>>> x = [[1,2,3,5,1], [12,31,54, np.nan], [10,12,6,74,11]]
>>> sp.posthoc_conover(x, p_adjust = 'holm')
'''
def compare_conover(i, j):
diff = np.abs(x_ranks_avg.loc[i] - x_ranks_avg.loc[j])
B = (1. / x_lens.loc[i] + 1. / x_lens.loc[j])
D = (n - 1. - H_cor) / (n - x_len)
t_value = diff / np.sqrt(S2 * B * D)
p_value = 2. * ss.t.sf(np.abs(t_value), df = n - x_len)
return p_value
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col, _val_col], ascending=True, inplace=True)
n = len(x.index)
x_groups_unique = np.unique(x[_group_col])
x_len = x_groups_unique.size
x_lens = x.groupby(_group_col)[_val_col].count()
x['ranks'] = x[_val_col].rank()
x_ranks_avg = x.groupby(_group_col)['ranks'].mean()
x_ranks_sum = x.groupby(_group_col)['ranks'].sum()
# ties
vals = x.groupby('ranks').count()[_val_col].values
tie_sum = np.sum(vals[vals != 1] ** 3 - vals[vals != 1])
tie_sum = 0 if not tie_sum else tie_sum
x_ties = np.min([1., 1. - tie_sum / (n ** 3. - n)])
H = (12. / (n * (n + 1.))) * np.sum(x_ranks_sum**2 / x_lens) - 3. * (n + 1.)
H_cor = H / x_ties
if x_ties == 1:
S2 = n * (n + 1.) / 12.
else:
S2 = (1. / (n - 1.)) * (np.sum(x['ranks'] ** 2.) - (n * (((n + 1.)**2.) / 4.)))
vs = np.zeros((x_len, x_len))
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(x_len), 2)
for i, j in combs:
vs[i, j] = compare_conover(x_groups_unique[i], x_groups_unique[j])
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=x_groups_unique, columns=x_groups_unique)
def posthoc_dunn(a, val_col=None, group_col=None, p_adjust=None, sort=True):
'''Post hoc pairwise test for multiple comparisons of mean rank sums
(Dunn's test). May be used after Kruskal-Wallis one-way analysis of
variance by ranks to do pairwise comparisons [1]_, [2]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas DataFrame.
Array must be two-dimensional. Second dimension may vary,
i.e. groups may have different lengths.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
p_adjust : str, optional
Method for adjusting p values. See `statsmodels.sandbox.stats.multicomp`
for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by group_col or not. Recommended
unless you sort your data manually.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
A tie correction will be employed according to Glantz (2012).
References
----------
.. [1] O.J. Dunn (1964). Multiple comparisons using rank sums.
Technometrics, 6, 241-252.
.. [2] S.A. Glantz (2012), Primer of Biostatistics. New York: McGraw Hill.
Examples
--------
>>> x = [[1,2,3,5,1], [12,31,54, np.nan], [10,12,6,74,11]]
>>> sp.posthoc_dunn(x, p_adjust = 'holm')
'''
def compare_dunn(i, j):
diff = np.abs(x_ranks_avg.loc[i] - x_ranks_avg.loc[j])
A = n * (n + 1.) / 12.
B = (1. / x_lens.loc[i] + 1. / x_lens.loc[j])
z_value = diff / np.sqrt((A - x_ties) * B)
p_value = 2. * ss.norm.sf(np.abs(z_value))
return p_value
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col, _val_col], ascending=True, inplace=True)
n = len(x.index)
x_groups_unique = np.unique(x[_group_col])
x_len = x_groups_unique.size
x_lens = x.groupby(_group_col)[_val_col].count()
x['ranks'] = x[_val_col].rank()
x_ranks_avg = x.groupby(_group_col)['ranks'].mean()
# ties
vals = x.groupby('ranks').count()[_val_col].values
tie_sum = np.sum(vals[vals != 1] ** 3 - vals[vals != 1])
tie_sum = 0 if not tie_sum else tie_sum
x_ties = tie_sum / (12. * (n - 1))
vs = np.zeros((x_len, x_len))
combs = it.combinations(range(x_len), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
for i,j in combs:
vs[i, j] = compare_dunn(x_groups_unique[i], x_groups_unique[j])
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=x_groups_unique, columns=x_groups_unique)
def posthoc_nemenyi(a, val_col=None, group_col=None, dist='chi', sort=True):
'''Post hoc pairwise test for multiple comparisons of mean rank sums
(Nemenyi's test). May be used after Kruskal-Wallis one-way analysis of
variance by ranks to do pairwise comparisons [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame. Array must be two-dimensional. Second dimension may vary,
i.e. groups may have different lengths.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
dist : str, optional
Method for determining the p value. The default distribution is "chi"
(chi-squared), else "tukey" (studentized range).
sort : bool, optional
Specifies whether to sort DataFrame by group_col or not. Recommended
unless you sort your data manually.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
A tie correction will be employed according to Glantz (2012).
References
----------
.. [1] Lothar Sachs (1997), Angewandte Statistik. Berlin: Springer.
Pages: 395-397, 662-664.
Examples
--------
>>> x = [[1,2,3,5,1], [12,31,54, np.nan], [10,12,6,74,11]]
>>> sp.posthoc_nemenyi(x)
'''
def compare_stats_chi(i, j):
diff = np.abs(x_ranks_avg.loc[i] - x_ranks_avg.loc[j])
A = n * (n + 1.) / 12.
B = (1. / x_lens.loc[i] + 1. / x_lens.loc[j])
chi = diff ** 2. / (A * B)
return chi
def compare_stats_tukey(i, j):
diff = np.abs(x_ranks_avg.loc[i] - x_ranks_avg.loc[j])
B = (1. / x_lens.loc[i] + 1. / x_lens.loc[j])
q = diff / np.sqrt((n * (n + 1.) / 12.) * B)
return q
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col, _val_col], ascending=True, inplace=True)
n = len(x.index)
x_groups_unique = np.unique(x[_group_col])
x_len = x_groups_unique.size
x_lens = x.groupby(_group_col)[_val_col].count()
x['ranks'] = x[_val_col].rank()
x_ranks_avg = x.groupby(_group_col)['ranks'].mean()
# ties
vals = x.groupby('ranks').count()[_val_col].values
tie_sum = np.sum(vals[vals != 1] ** 3 - vals[vals != 1])
tie_sum = 0 if not tie_sum else tie_sum
x_ties = np.min([1., 1. - tie_sum / (n ** 3. - n)])
vs = np.zeros((x_len, x_len))
combs = it.combinations(range(x_len), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
if dist == 'chi':
for i,j in combs:
vs[i, j] = compare_stats_chi(x_groups_unique[i], x_groups_unique[j]) / x_ties
vs[tri_upper] = ss.chi2.sf(vs[tri_upper], x_len - 1)
elif dist == 'tukey':
for i,j in combs:
vs[i, j] = compare_stats_tukey(x_groups_unique[i], x_groups_unique[j]) * np.sqrt(2.)
vs[tri_upper] = psturng(vs[tri_upper], x_len, np.inf)
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=x_groups_unique, columns=x_groups_unique)
def posthoc_nemenyi_friedman(a, y_col=None, block_col=None, group_col=None, melted=False, sort=False):
'''Calculate pairwise comparisons using Nemenyi post hoc test for
unreplicated blocked data. This test is usually conducted post hoc if
significant results of the Friedman's test are obtained. The statistics
refer to upper quantiles of the studentized range distribution (Tukey) [1]_,
[2]_, [3]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of
block design, i.e. rows are blocks, and columns are groups. In this
case you do not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (strings).
y_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
A one-way ANOVA with repeated measures that is also referred to as ANOVA
with unreplicated block design can also be conducted via Friedman's
test. The consequent post hoc pairwise multiple comparison test
according to Nemenyi is conducted with this function.
This function does not test for ties.
References
----------
.. [1] J. Demsar (2006), Statistical comparisons of classifiers over
multiple data sets, Journal of Machine Learning Research, 7, 1-30.
.. [2] P. Nemenyi (1963) Distribution-free Multiple Comparisons. Ph.D.
thesis, Princeton University.
.. [3] L. Sachs (1997), Angewandte Statistik. Berlin: Springer.
Pages: 668-675.
Examples
--------
>>> # Non-melted case, x is a block design matrix, i.e. rows are blocks
>>> # and columns are groups.
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_nemenyi_friedman(x)
'''
if melted and not all([block_col, group_col, y_col]):
raise ValueError('block_col, group_col, y_col should be explicitly specified if using melted data')
def compare_stats(i, j):
dif = np.abs(R[groups[i]] - R[groups[j]])
qval = dif / np.sqrt(k * (k + 1.) / (6. * n))
return qval
x, _y_col, _group_col, _block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
#if not sort:
# x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
# x[block_col] = Categorical(x[block_col], categories=x[block_col].unique(), ordered=True)
x.sort_values(by=[_group_col, _block_col], ascending=True, inplace=True)
x.dropna(inplace=True)
groups = x[_group_col].unique()
k = groups.size
n = x[_block_col].unique().size
x['mat'] = x.groupby(_block_col)[_y_col].rank()
R = x.groupby(_group_col)['mat'].mean()
vs = np.zeros((k, k))
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
for i, j in combs:
vs[i, j] = compare_stats(i, j)
vs *= np.sqrt(2.)
vs[tri_upper] = psturng(vs[tri_upper], k, np.inf)
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_conover_friedman(a, y_col=None, block_col=None, group_col=None, melted=False, sort=False, p_adjust=None):
'''Calculate pairwise comparisons using Conover post hoc test for unreplicated
blocked data. This test is usually conducted post hoc after
significant results of the Friedman test. The statistics refer to
the Student t distribution [1]_, [2]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of
block design, i.e. rows are blocks, and columns are groups. In this
case you do not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (strings).
y_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values. See statsmodels.sandbox.stats.multicomp
for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
'single-step' : uses Tukey distribution for multiple comparisons
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
A one-way ANOVA with repeated measures that is also referred to as ANOVA
with unreplicated block design can also be conducted via the
friedman.test. The consequent post hoc pairwise multiple comparison test
according to Conover is conducted with this function.
If y is a matrix, than the columns refer to the treatment and the rows
indicate the block.
References
----------
.. [1] W. J. Conover and R. L. Iman (1979), On multiple-comparisons procedures,
Tech. Rep. LA-7677-MS, Los Alamos Scientific Laboratory.
.. [2] W. J. Conover (1999), Practical nonparametric Statistics, 3rd. Edition,
Wiley.
Examples
--------
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_conover_friedman(x)
'''
if melted and not all([block_col, group_col, y_col]):
raise ValueError('block_col, group_col, y_col should be explicitly specified if using melted data')
def compare_stats(i, j):
dif = np.abs(R.loc[groups[i]] - R.loc[groups[j]])
tval = dif / np.sqrt(A) / np.sqrt(B)
pval = 2. * ss.t.sf(np.abs(tval), df = (m*n*k - k - n + 1))
return pval
def compare_tukey(i, j):
dif = np.abs(R.loc[groups[i]] - R.loc[groups[j]])
qval = np.sqrt(2.) * dif / (np.sqrt(A) * np.sqrt(B))
pval = psturng(qval, k, np.inf)
return pval
x, _y_col, _group_col, _block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
#if not sort:
# x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
# x[block_col] = Categorical(x[block_col], categories=x[block_col].unique(), ordered=True)
x.sort_values(by=[_group_col,_block_col], ascending=True, inplace=True)
x.dropna(inplace=True)
groups = np.unique(x[_group_col])
k = groups.size
n = x[_block_col].unique().size
x['mat'] = x.groupby(_block_col)[_y_col].rank()
R = x.groupby(_group_col)['mat'].sum()
A1 = (x['mat'] ** 2).sum()
m = 1
S2 = m/(m*k - 1.) * (A1 - m*k*n*(m*k + 1.)**2./4.)
T2 = 1 / S2 * (np.sum(R) - n * m * ((m * k + 1.) / 2.)**2.)
A = S2 * (2. * n * (m * k - 1.)) / ( m * n * k - k - n + 1.)
B = 1. - T2 / (n * (m * k - 1.))
vs = np.zeros((k, k))
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
if p_adjust == 'single-step':
for i, j in combs:
vs[i, j] = compare_tukey(i, j)
else:
for i, j in combs:
vs[i, j] = compare_stats(i, j)
if p_adjust is not None:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_npm_test(a, val_col=None, group_col=None, sort=False, p_adjust=None):
'''Calculate pairwise comparisons using Nashimoto and Wright's all-pairs
comparison procedure (NPM test) for simply ordered mean ranksums.
NPM test is basically an extension of Nemenyi's procedure for testing
increasingly ordered alternatives [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values. See `statsmodels.sandbox.stats.multicomp`
for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
The p values are estimated from the studentized range distribution. If
the medians are already increasingly ordered, than the NPM-test
simplifies to the ordinary Nemenyi test
References
----------
.. [1] Nashimoto, K., Wright, F.T., (2005), Multiple comparison procedures for
detecting differences in simply ordered means. Comput. Statist. Data
Anal. 48, 291--306.
Examples
--------
>>> x = np.array([[102,109,114,120,124],
[110,112,123,130,145],
[132,141,156,160,172]])
>>> sp.posthoc_npm_test(x)
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
#if not sort:
# x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = np.unique(x[_group_col])
x['ranks'] = x[_val_col].rank()
Ri = x.groupby(_group_col)['ranks'].mean()
ni = x.groupby(_group_col)[_val_col].count()
k = groups.size
n = x.shape[0]
sigma = np.sqrt(n * (n + 1) / 12.)
df = np.inf
def compare(m, u):
a = [(Ri.loc[groups[u]]-Ri.loc[groups[_mi]])/(sigma/np.sqrt(2)*np.sqrt(1./ni.loc[groups[_mi]] + 1./ni.loc[groups[u]])) for _mi in m]
return np.array(a)
stat = np.zeros((k, k))
for i in range(k-1):
for j in range(i+1, k):
u = j
m = np.arange(i, u)
tmp = compare(m, u)
stat[j, i] = np.max(tmp)
stat[stat < 0] = 0
p_values = psturng(stat, k, np.inf)
tri_upper = np.triu_indices(p_values.shape[0], 1)
tri_lower = np.tril_indices(p_values.shape[0], -1)
p_values[tri_lower] = p_values.T[tri_lower]
#if p_adjust:
# p_values[tri_upper] = multipletests(p_values[tri_upper], method = p_adjust)[1]
np.fill_diagonal(p_values, -1)
return DataFrame(p_values, index=groups, columns=groups)
def posthoc_siegel_friedman(a, y_col=None, block_col=None, group_col=None, melted=False, sort=False, p_adjust=None):
'''Siegel and Castellan's All-Pairs Comparisons Test for Unreplicated Blocked
Data. See authors' paper for additional information [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of
block design, i.e. rows are blocks, and columns are groups. In this
case you do not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (strings).
y_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values. See statsmodels.sandbox.stats.multicomp for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
For all-pairs comparisons in a two factorial unreplicated complete block design
with non-normally distributed residuals, Siegel and Castellan's test can be
performed on Friedman-type ranked data.
References
----------
.. [1] S. Siegel, N. J. Castellan Jr. (1988), Nonparametric Statistics for the
Behavioral Sciences. 2nd ed. New York: McGraw-Hill.
Examples
--------
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_siegel_friedman(x)
'''
if melted and not all([block_col, group_col, y_col]):
raise ValueError('block_col, group_col, y_col should be explicitly specified if using melted data')
def compare_stats(i, j):
dif = np.abs(R[groups[i]] - R[groups[j]])
zval = dif / np.sqrt(k * (k + 1.) / (6. * n))
return zval
x, y_col, group_col, block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
#if not sort:
# x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
# x[block_col] = Categorical(x[block_col], categories=x[block_col].unique(), ordered=True)
x.sort_values(by=[group_col,block_col], ascending=True, inplace=True)
x.dropna(inplace=True)
groups = x[group_col].unique()
k = groups.size
n = x[block_col].unique().size
x['mat'] = x.groupby(block_col)[y_col].rank()
R = x.groupby(group_col)['mat'].mean()
vs = np.zeros((k, k), dtype=np.float)
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
for i, j in combs:
vs[i, j] = compare_stats(i, j)
vs = 2. * ss.norm.sf(np.abs(vs))
vs[vs > 1] = 1.
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_miller_friedman(a, y_col=None, block_col=None, group_col=None, melted=False, sort=False):
'''Miller's All-Pairs Comparisons Test for Unreplicated Blocked Data.
The p-values are computed from the chi-square distribution [1]_, [2]_,
[3]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of
block design, i.e. rows are blocks, and columns are groups. In this
case you do not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (strings).
y_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
Pandas DataFrame containing p values.
Notes
-----
For all-pairs comparisons in a two factorial unreplicated complete block
design with non-normally distributed residuals, Miller's test can be
performed on Friedman-type ranked data.
References
----------
.. [1] J. Bortz J, G. A. Lienert, K. Boehnke (1990), Verteilungsfreie
Methoden in der Biostatistik. Berlin: Springerself.
.. [2] R. G. Miller Jr. (1996), Simultaneous statistical inference. New
York: McGraw-Hill.
.. [3] E. L. Wike (2006), Data Analysis. A Statistical Primer for Psychology
Students. New Brunswick: Aldine Transaction.
Examples
--------
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_miller_friedman(x)
'''
if melted and not all([block_col, group_col, y_col]):
raise ValueError('block_col, group_col, y_col should be explicitly specified if using melted data')
def compare_stats(i, j):
dif = np.abs(R[groups[i]] - R[groups[j]])
qval = dif / np.sqrt(k * (k + 1.) / (6. * n))
return qval
x, y_col, group_col, block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
#if not sort:
# x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
# x[block_col] = Categorical(x[block_col], categories=x[block_col].unique(), ordered=True)
x.sort_values(by=[group_col,block_col], ascending=True, inplace=True)
x.dropna(inplace=True)
groups = x[group_col].unique()
k = groups.size
n = x[block_col].unique().size
x['mat'] = x.groupby(block_col)[y_col].rank()
R = x.groupby(group_col)['mat'].mean()
vs = np.zeros((k, k), dtype=np.float)
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
for i, j in combs:
vs[i, j] = compare_stats(i, j)
vs = vs ** 2
vs = ss.chi2.sf(vs, k - 1)
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_durbin(a, y_col=None, block_col=None, group_col=None, melted=False, sort=False, p_adjust=None):
'''Pairwise post hoc test for multiple comparisons of rank sums according to
Durbin and Conover for a two-way balanced incomplete block design (BIBD). See
references for additional information [1]_, [2]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of block design,
i.e. rows are blocks, and columns are groups. In this case you do
not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (string).
y_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values. See statsmodels.sandbox.stats.multicomp
for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
Pandas DataFrame containing p values.
References
----------
.. [1] W. J. Conover and R. L. Iman (1979), On multiple-comparisons procedures,
Tech. Rep. LA-7677-MS, Los Alamos Scientific Laboratory.
.. [2] W. J. Conover (1999), Practical nonparametric Statistics,
3rd. edition, Wiley.
Examples
--------
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_durbin(x)
'''
if melted and not all([block_col, group_col, y_col]):
raise ValueError('block_col, group_col, y_col should be explicitly specified if using melted data')
def compare_stats(i, j):
dif = np.abs(Rj[groups[i]] - Rj[groups[j]])
tval = dif / denom
pval = 2. * ss.t.sf(np.abs(tval), df = df)
return pval
x, y_col, group_col, block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
if not sort:
x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
x[block_col] = Categorical(x[block_col], categories=x[block_col].unique(), ordered=True)
x.sort_values(by=[block_col, group_col], ascending=True, inplace=True)
x.dropna(inplace=True)
groups = x[group_col].unique()
t = len(groups)
b = x[block_col].unique().size
r = b
k = t
x['y_ranked'] = x.groupby(block_col)[y_col].rank()
Rj = x.groupby(group_col)['y_ranked'].sum()
A = (x['y_ranked'] ** 2).sum()
C = (b * k * (k + 1) ** 2) / 4.
D = (Rj ** 2).sum() - r * C
T1 = (t - 1) / (A - C) * D
denom = np.sqrt(((A - C) * 2 * r) / (b * k - b - t + 1) * (1 - T1 / (b * (k -1))))
df = b * k - b - t + 1
vs = np.zeros((t, t), dtype=np.float)
combs = it.combinations(range(t), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
for i, j in combs:
vs[i, j] = compare_stats(i, j)
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_anderson(a, val_col=None, group_col=None, midrank=True, sort=False, p_adjust=None):
'''Anderson-Darling Pairwise Test for k-samples. Tests the null hypothesis
that k-samples are drawn from the same population without having to specify
the distribution function of that population [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
midrank : bool, optional
Type of Anderson-Darling test which is computed. If set to True (default), the
midrank test applicable to continuous and discrete populations is performed. If
False, the right side empirical distribution is used.
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values. See statsmodels.sandbox.stats.multicomp for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
result : pandas DataFrame
P values.
References
----------
.. [1] F.W. Scholz, M.A. Stephens (1987), K-Sample Anderson-Darling Tests,
Journal of the American Statistical Association, Vol. 82, pp. 918-924.
Examples
--------
>>> x = np.array([[2.9, 3.0, 2.5, 2.6, 3.2], [3.8, 2.7, 4.0, 2.4], [2.8, 3.4, 3.7, 2.2, 2.0]])
>>> sp.posthoc_anderson(x)
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = x[_group_col].unique()
k = groups.size
vs = np.zeros((k, k), dtype=np.float)
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
for i, j in combs:
vs[i, j] = ss.anderson_ksamp([x.loc[x[_group_col] == groups[i], _val_col], x.loc[x[_group_col] == groups[j], _val_col]])[2]
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_quade(a, y_col=None, block_col=None, group_col=None, dist='t', melted=False, sort=False, p_adjust=None):
'''Calculate pairwise comparisons using Quade's post hoc test for
unreplicated blocked data. This test is usually conducted if significant
results were obtained by the omnibus test [1]_, [2]_, [3]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of
block design, i.e. rows are blocks, and columns are groups. In this
case you do not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (string).
y_col : str or int, optional
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
dist : str, optional
Method for determining p values.
The default distribution is "t", else "normal".
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
Pandas DataFrame containing p values.
References
----------
.. [1] W. J. Conover (1999), Practical nonparametric Statistics, 3rd. Edition,
Wiley.
.. [2] N. A. Heckert and J. J. Filliben (2003). NIST Handbook 148: Dataplot
Reference Manual, Volume 2: Let Subcommands and Library Functions.
National Institute of Standards and Technology Handbook Series, June 2003.
.. [3] D. Quade (1979), Using weighted rankings in the analysis of complete
blocks with additive block effects. Journal of the American Statistical
Association, 74, 680-683.
Examples
--------
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_quade(x)
'''
if melted and not all([block_col, group_col, y_col]):
raise ValueError('block_col, group_col, y_col should be explicitly specified if using melted data')
def compare_stats_t(i, j):
dif = np.abs(S[groups[i]] - S[groups[j]])
tval = dif / denom
pval = 2. * ss.t.sf(np.abs(tval), df = (b - 1) * (k - 1))
return pval
def compare_stats_norm(i, j):
dif = np.abs(W[groups[i]] * ff - W[groups[j]] * ff)
zval = dif / denom
pval = 2. * ss.norm.sf(np.abs(zval))
return pval
x, y_col, group_col, block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
if not sort:
x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
x[block_col] = Categorical(x[block_col], categories=x[block_col].unique(), ordered=True)
x.sort_values(by=[block_col, group_col], ascending=True, inplace=True)
x.dropna(inplace=True)
groups = x[group_col].unique()
k = len(groups)
b = x[block_col].unique().size
x['r'] = x.groupby(block_col)[y_col].rank()
q = (x.groupby(block_col)[y_col].max() - x.groupby(block_col)[y_col].min()).rank()
x['rr'] = x['r'] - (k + 1)/2
x['s'] = x.apply(lambda x, y: x['rr'] * y[x['blocks']], axis=1, args=(q,))
x['w'] = x.apply(lambda x, y: x['r'] * y[x['blocks']], axis=1, args=(q,))
A = (x['s'] ** 2).sum()
S = x.groupby(group_col)['s'].sum()
B = np.sum(S ** 2) / b
W = x.groupby(group_col)['w'].sum()
vs = np.zeros((k, k), dtype=np.float)
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
if dist == 't':
denom = np.sqrt((2 * b * (A - B)) / ((b - 1) * (k - 1)))
for i, j in combs:
vs[i, j] = compare_stats_t(i, j)
else:
n = b * k
denom = np.sqrt((k * (k + 1) * (2 * n + 1) * (k-1)) / (18 * n * (n + 1)))
ff = 1. / (b * (b + 1)/2)
for i, j in combs:
vs[i, j] = compare_stats_norm(i, j)
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_vanwaerden(a, val_col=None, group_col=None, sort=True, p_adjust=None):
'''Van der Waerden's test for pairwise multiple comparisons between group
levels. See references for additional information [1]_, [2]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
For one-factorial designs with samples that do not meet the assumptions for
one-way-ANOVA and subsequent post hoc tests, the van der Waerden test using
normal scores can be employed. Provided that significant differences were
detected by this global test, one may be interested in applying post hoc
tests according to van der Waerden for pairwise multiple comparisons of the
group levels.
There is no tie correction applied in this function.
References
----------
.. [1] W. J. Conover and R. L. Iman (1979), On multiple-comparisons procedures,
Tech. Rep. LA-7677-MS, Los Alamos Scientific Laboratory.
.. [2] B. L. van der Waerden (1952) Order tests for the two-sample problem and
their power, Indagationes Mathematicae, 14, 453-458.
Examples
--------
>>> x = np.array([[10,'a'], [59,'a'], [76,'b'], [10, 'b']])
>>> sp.posthoc_vanwaerden(x, val_col = 0, group_col = 1)
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = x[_group_col].unique()
n = x[_val_col].size
k = groups.size
r = ss.rankdata(x[_val_col])
x['z_scores'] = ss.norm.ppf(r / (n + 1))
aj = x.groupby(_group_col)['z_scores'].sum()
nj = x.groupby(_group_col)['z_scores'].count()
s2 = (1. / (n - 1.)) * (x['z_scores'] ** 2.).sum()
sts = (1. / s2) * np.sum(aj ** 2. / nj)
param = k - 1
A = aj / nj
vs = np.zeros((k, k), dtype=np.float)
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
def compare_stats(i, j):
dif = np.abs(A[groups[i]] - A[groups[j]])
B = 1. / nj[groups[i]] + 1. / nj[groups[j]]
tval = dif / np.sqrt(s2 * (n - 1. - sts)/(n - k) * B)
pval = 2. * ss.t.sf(np.abs(tval), df = n - k)
return pval
for i, j in combs:
vs[i, j] = compare_stats(i, j)
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_ttest(a, val_col=None, group_col=None, pool_sd=False, equal_var=True, p_adjust=None, sort=True):
'''Pairwise T test for multiple comparisons of independent groups. May be
used after a parametric ANOVA to do pairwise comparisons.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame. Array must be two-dimensional.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
equal_var : bool, optional
If True (default), perform a standard independent test
that assumes equal population variances [1]_.
If False, perform Welch's t-test, which does not assume equal
population variance [2]_.
pool_sd : bool, optional
Calculate a common SD for all groups and use that for all
comparisons (this can be useful if some groups are small).
This method does not actually call scipy ttest_ind() function,
so extra arguments are ignored. Default is False.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by group_col or not. Recommended
unless you sort your data manually.
Returns
-------
Numpy ndarray or pandas DataFrame of p values depending on input
data type.
References
----------
.. [1] http://en.wikipedia.org/wiki/T-test#Independent_two-sample_t-test
.. [2] http://en.wikipedia.org/wiki/Welch%27s_t_test
Examples
--------
>>> x = [[1,2,3,5,1], [12,31,54, np.nan], [10,12,6,74,11]]
>>> sp.posthoc_ttest(x, p_adjust = 'holm')
array([[-1. , 0.04600899, 0.31269089],
[ 0.04600899, -1. , 0.6327077 ],
[ 0.31269089, 0.6327077 , -1. ]])
'''
if isinstance(a, DataFrame):
x = a.copy()
if not sort:
x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
x.sort_values(by=[group_col, val_col], ascending=True, inplace=True)
x_lens = x.groupby(by=group_col)[val_col].count().values
x_lens_cumsum = np.insert(np.cumsum(x_lens), 0, 0)[:-1]
x_grouped = np.array([x[val_col][j:(j + x_lens[i])] for i, j in enumerate(x_lens_cumsum)])
#x_grouped = [x.loc[v, val_col].values.tolist() for g, v in x.groupby(group_col, sort=False).groups.items()]
else:
x = np.array(a)
x_grouped = np.array([np.asarray(a)[~np.isnan(a)] for a in x])
x_lens = np.asarray([len(a) for a in x_grouped])
x_lens_cumsum = np.insert(np.cumsum(x_lens), 0, 0)[:-1]
if any(x_lens == 0):
raise ValueError("All groups must contain data")
x_len = len(x_grouped)
vs = np.zeros((x_len, x_len), dtype=np.float)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
def compare_pooled(i, j):
diff = x_means[i] - x_means[j]
se_diff = pooled_sd * np.sqrt(1 / x_lens[i] + 1 / x_lens[j])
t_value = diff / se_diff
return 2 * ss.t.cdf(-np.abs(t_value), x_totaldegf)
combs = it.combinations(range(x_len), 2)
if pool_sd:
x_means = np.asarray([np.mean(xi) for xi in x_grouped])
x_sd = np.asarray([np.std(xi, ddof=1) for xi in x_grouped])
x_degf = x_lens - 1
x_totaldegf = np.sum(x_degf)
pooled_sd = np.sqrt(np.sum(x_sd ** 2 * x_degf) / x_totaldegf)
for i, j in combs:
vs[i, j] = compare_pooled(i, j)
else:
for i,j in combs:
vs[i, j] = ss.ttest_ind(x_grouped[i], x_grouped[j], equal_var=equal_var)[1]
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
if isinstance(x, DataFrame):
groups_unique = x[group_col].unique()
return DataFrame(vs, index=groups_unique, columns=groups_unique)
else:
return vs
def posthoc_tukey_hsd(x, g, alpha=0.05):
'''Pairwise comparisons with TukeyHSD confidence intervals. This is a
convenience function to make statsmodels `pairwise_tukeyhsd` method more
applicable for further use.
Parameters
----------
x : array_like or pandas Series object, 1d
An array, any object exposing the array interface, containing dependent
variable values (test or response variable). Values should have a
non-nominal scale. NaN values will cause an error (please handle
manually).
g : array_like or pandas Series object, 1d
An array, any object exposing the array interface, containing
independent variable values (grouping or predictor variable). Values
should have a nominal scale (categorical).
alpha : float, optional
Significance level for the test. Default is 0.05.
Returns
-------
result : pandas DataFrame
DataFrame with 0, 1, and -1 values, where 0 is False (not significant),
1 is True (significant), and -1 is for diagonal elements.
Examples
--------
>>> x = [[1,2,3,4,5], [35,31,75,40,21], [10,6,9,6,1]]
>>> g = [['a'] * 5, ['b'] * 5, ['c'] * 5]
>>> sp.posthoc_tukey_hsd(np.concatenate(x), np.concatenate(g))
'''
result = pairwise_tukeyhsd(x, g, alpha=0.05)
groups = np.array(result.groupsunique, dtype=np.str)
groups_len = len(groups)
vs = np.zeros((groups_len, groups_len), dtype=np.int)
for a in result.summary()[1:]:
a0 = str(a[0])
a1 = str(a[1])
a0i = np.where(groups == a0)[0][0]
a1i = np.where(groups == a1)[0][0]
vs[a0i, a1i] = 1 if str(a[5]) == 'True' else 0
vs = np.triu(vs)
np.fill_diagonal(vs, -1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[tri_lower] = vs.T[tri_lower]
return DataFrame(vs, index=groups, columns=groups)
def posthoc_mannwhitney(a, val_col=None, group_col=None, use_continuity=True, alternative='two-sided', p_adjust=None, sort=True):
'''Pairwise comparisons with Mann-Whitney rank test.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame. Array must be two-dimensional.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
use_continuity : bool, optional
Whether a continuity correction (1/2.) should be taken into account.
Default is True.
alternative : ['two-sided', 'less', or 'greater'], optional
Whether to get the p-value for the one-sided hypothesis
('less' or 'greater') or for the two-sided hypothesis ('two-sided').
Defaults to 'two-sided'.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by group_col or not. Recommended
unless you sort your data manually.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
Refer to `scipy.stats.mannwhitneyu` reference page for further details.
Examples
--------
>>> x = [[1,2,3,4,5], [35,31,75,40,21], [10,6,9,6,1]]
>>> sp.posthoc_mannwhitney(x, p_adjust = 'holm')
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col, _val_col], ascending=True, inplace=True)
groups = np.unique(x[_group_col])
x_len = groups.size
vs = np.zeros((x_len, x_len))
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(x_len), 2)
for i,j in combs:
vs[i, j] = ss.mannwhitneyu(x.loc[x[_group_col] == groups[i], _val_col],
x.loc[x[_group_col] == groups[j], _val_col],
use_continuity=use_continuity,
alternative=alternative)[1]
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_wilcoxon(a, val_col=None, group_col=None, zero_method='wilcox', correction=False, p_adjust=None, sort=False):
'''Pairwise comparisons with Wilcoxon signed-rank test. It is a non-parametric
version of the paired T-test for use with non-parametric ANOVA.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame. Array must be two-dimensional.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
zero_method : string, {"pratt", "wilcox", "zsplit"}, optional
"pratt": Pratt treatment, includes zero-differences in the ranking
process (more conservative)
"wilcox": Wilcox treatment, discards all zero-differences
"zsplit": Zero rank split, just like Pratt, but spliting the zero rank
between positive and negative ones
correction : bool, optional
If True, apply continuity correction by adjusting the Wilcoxon rank
statistic by 0.5 towards the mean value when computing the z-statistic.
Default is False.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by group_col and val_col or not.
Default is False.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
Refer to `scipy.stats.wilcoxon` reference page for further details.
Examples
--------
>>> x = [[1,2,3,4,5], [35,31,75,40,21], [10,6,9,6,1]]
>>> sp.posthoc_wilcoxon(x)
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
#x.sort_values(by=[_group_col, _val_col], ascending=True, inplace=True)
groups = np.unique(x[_group_col])
x_len = groups.size
vs = np.zeros((x_len, x_len))
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(x_len), 2)
for i,j in combs:
vs[i, j] = ss.wilcoxon(x.loc[x[_group_col] == groups[i], _val_col],
x.loc[x[_group_col] == groups[j], _val_col],
zero_method=zero_method, correction=correction)[1]
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method=p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_scheffe(a, val_col=None, group_col=None, sort=False, p_adjust=None):
'''Scheffe's all-pairs comparisons test for normally distributed data with equal
group variances. For all-pairs comparisons in an one-factorial layout with
normally distributed residuals and equal variances Scheffe's test can be
performed with parametric ANOVA [1]_, [2]_, [3]_.
A total of m = k(k-1)/2 hypotheses can be tested.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
The p values are computed from the F-distribution.
References
----------
.. [1] J. Bortz (1993) Statistik für Sozialwissenschaftler. 4. Aufl., Berlin:
Springer.
.. [2] L. Sachs (1997) Angewandte Statistik, New York: Springer.
.. [3] H. Scheffe (1953) A Method for Judging all Contrasts in the Analysis
of Variance. Biometrika 40, 87-110.
Examples
--------
>>> import scikit_posthocs as sp
>>> import pandas as pd
>>> x = pd.DataFrame({"a": [1,2,3,5,1], "b": [12,31,54,62,12], "c": [10,12,6,74,11]})
>>> x = x.melt(var_name='groups', value_name='values')
>>> sp.posthoc_scheffe(x, val_col='values', group_col='groups')
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = np.unique(x[_group_col])
x_grouped = x.groupby(_group_col)[_val_col]
ni = x_grouped.count()
n = ni.sum()
xi = x_grouped.mean()
si = x_grouped.var()
sin = 1. / (n - groups.size) * np.sum(si * (ni - 1.))
def compare(i, j):
dif = xi.loc[i] - xi.loc[j]
A = sin * (1. / ni[i] + 1. / ni[j]) * (groups.size - 1.)
f_val = dif ** 2. / A
return f_val
vs = np.zeros((groups.size, groups.size), dtype=np.float)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(groups.size), 2)
for i,j in combs:
vs[i, j] = compare(groups[i], groups[j])
vs[tri_lower] = vs.T[tri_lower]
p_values = ss.f.sf(vs, groups.size - 1., n - groups.size)
np.fill_diagonal(p_values, -1)
return DataFrame(p_values, index=groups, columns=groups)
def posthoc_tamhane(a, val_col=None, group_col=None, welch=True, sort=False):
'''Tamhane's T2 all-pairs comparison test for normally distributed data with
unequal variances. Tamhane's T2 test can be performed for all-pairs
comparisons in an one-factorial layout with normally distributed residuals
but unequal groups variances. A total of m = k(k-1)/2 hypotheses can be
tested. The null hypothesis is tested in the two-tailed test against the
alternative hypothesis [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
welch : bool, optional
If True, use Welch's approximate solution for calculating the degree of
freedom. T2 test uses the usual df = N - 2 approximation.
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
The p values are computed from the t-distribution and adjusted according to
Dunn-Sidak.
References
----------
.. [1] A.C. Tamhane (1979), A Comparison of Procedures for Multiple Comparisons of
Means with Unequal Variances. Journal of the American Statistical Association,
74, 471-480.
Examples
--------
>>> import scikit_posthocs as sp
>>> import pandas as pd
>>> x = pd.DataFrame({"a": [1,2,3,5,1], "b": [12,31,54,62,12], "c": [10,12,6,74,11]})
>>> x = x.melt(var_name='groups', value_name='values')
>>> sp.posthoc_tamhane(x, val_col='values', group_col='groups')
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = x[_group_col].unique()
x_grouped = x.groupby(_group_col)[_val_col]
ni = x_grouped.count()
n = ni.sum()
xi = x_grouped.mean()
si = x_grouped.var()
sin = 1. / (n - groups.size) * np.sum(si * (ni - 1))
def compare(i, j):
dif = xi[i] - xi[j]
A = si[i] / ni[i] + si[j] / ni[j]
t_val = dif / np.sqrt(A)
if welch:
df = A ** 2. / (si[i] ** 2. / (ni[i] ** 2. * (ni[i] - 1.)) + si[j] ** 2. / (ni[j] ** 2. * (ni[j] - 1.)))
else:
## checks according to Tamhane (1979, p. 474)
ok1 = (9./10. <= ni[i]/ni[j]) and (ni[i]/ni[j] <= 10./9.)
ok2 = (9./10. <= (s2i[i] / ni[i]) / (s2i[j] / ni[j])) and ((s2i[i] / ni[i]) / (s2i[j] / ni[j]) <= 10./9.)
ok3 = (4./5. <= ni[i]/ni[j]) and (ni[i]/ni[j] <= 5./4.) and (1./2. <= (s2i[i] / ni[i]) / (s2i[j] / ni[j])) and ((s2i[i] / ni[i]) / (s2i[j] / ni[j]) <= 2.)
ok4 = (2./3. <= ni[i]/ni[j]) and (ni[i]/ni[j] <= 3./2.) and (3./4. <= (s2i[i] / ni[i]) / (s2i[j] / ni[j])) and ((s2i[i] / ni[i]) / (s2i[j] / ni[j]) <= 4./3.)
OK = any(ok1, ok2, ok3, ok4)
if not OK:
print("Sample sizes or standard errors are not balanced. T2 test is recommended.")
df = ni[i] + ni[j] - 2.
p_val = 2. * ss.t.sf(np.abs(t_val), df=df)
return p_val
vs = np.zeros((groups.size, groups.size), dtype=np.float)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(groups.size), 2)
for i,j in combs:
vs[i, j] = compare(groups[i], groups[j])
vs[tri_upper] = 1. - (1. - vs[tri_upper]) ** groups.size
vs[tri_lower] = vs.T[tri_lower]
vs[vs > 1] = 1
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_tukey(a, val_col = None, group_col = None, sort = False):
'''Performs Tukey's all-pairs comparisons test for normally distributed data
with equal group variances. For all-pairs comparisons in an
one-factorial layout with normally distributed residuals and equal variances
Tukey's test can be performed. A total of m = k(k-1)/2 hypotheses can be
tested. The null hypothesis is tested in the two-tailed test against
the alternative hypothesis [1]_, [2]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
The p values are computed from the Tukey-distribution.
References
----------
.. [1] L. Sachs (1997) Angewandte Statistik, New York: Springer.
.. [2] J. Tukey (1949) Comparing Individual Means in the Analysis of Variance,
Biometrics 5, 99-114.
Examples
--------
>>> import scikit_posthocs as sp
>>> import pandas as pd
>>> x = pd.DataFrame({"a": [1,2,3,5,1], "b": [12,31,54,62,12], "c": [10,12,6,74,11]})
>>> x = x.melt(var_name='groups', value_name='values')
>>> sp.posthoc_tukey(x, val_col='values', group_col='groups')
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = np.unique(x[_group_col])
x_grouped = x.groupby(_group_col)[_val_col]
ni = x_grouped.count()
n = ni.sum()
xi = x_grouped.mean()
si = x_grouped.var()
sin = 1. / (n - groups.size) * np.sum(si * (ni - 1))
def compare(i, j):
dif = xi[i] - xi[j]
A = sin * 0.5 * (1. / ni.loc[i] + 1. / ni.loc[j])
q_val = dif / np.sqrt(A)
return q_val
vs = np.zeros((groups.size, groups.size), dtype=np.float)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(groups.size), 2)
for i,j in combs:
vs[i, j] = compare(groups[i], groups[j])
vs[tri_upper] = psturng(np.abs(vs[tri_upper]), groups.size, n - groups.size)
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_dscf(a, val_col=None, group_col=None, sort=False):
'''Dwass, Steel, Critchlow and Fligner all-pairs comparison test for a
one-factorial layout with non-normally distributed residuals. As opposed to
the all-pairs comparison procedures that depend on Kruskal ranks, the DSCF
test is basically an extension of the U-test as re-ranking is conducted for
each pairwise test [1]_, [2]_, [3]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
Pandas DataFrame containing p values.
Notes
-----
The p values are computed from the Tukey-distribution.
References
----------
.. [1] Douglas, C. E., Fligner, A. M. (1991) On distribution-free multiple
comparisons in the one-way analysis of variance, Communications in
Statistics - Theory and Methods, 20, 127-139.
.. [2] Dwass, M. (1960) Some k-sample rank-order tests. In Contributions to
Probability and Statistics, Edited by: I. Olkin, Stanford: Stanford
University Press.
.. [3] Steel, R. G. D. (1960) A rank sum test for comparing all pairs of
treatments, Technometrics, 2, 197-207.
Examples
--------
>>> import scikit_posthocs as sp
>>> import pandas as pd
>>> x = pd.DataFrame({"a": [1,2,3,5,1], "b": [12,31,54,62,12], "c": [10,12,6,74,11]})
>>> x = x.melt(var_name='groups', value_name='values')
>>> sp.posthoc_dscf(x, val_col='values', group_col='groups')
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = np.unique(x[_group_col])
x_grouped = x.groupby(_group_col)[_val_col]
n = x_grouped.count()
k = groups.size
def get_ties(x):
t = x.value_counts().values
c = np.sum((t ** 3 - t) / 12.)
return c
def compare(i, j):
ni = n.loc[i]
nj = n.loc[j]
x_raw = x.loc[(x[_group_col] == i) | (x[_group_col] == j)].copy()
x_raw['ranks'] = x_raw.loc[:, _val_col].rank()
r = x_raw.groupby(_group_col)['ranks'].sum().loc[[i, j]]
u = np.array([nj * ni + (nj * (nj + 1) / 2),
nj * ni + (ni * (ni + 1) / 2)]) - r
u_min = np.min(u)
s = ni + nj
var = (nj*ni/(s*(s - 1.))) * ((s**3 - s)/12. - get_ties(x_raw['ranks']))
p = np.sqrt(2.) * (u_min - nj * ni / 2.) / np.sqrt(var)
return p
vs = np.zeros((k, k))
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(k), 2)
for i, j in combs:
vs[i, j] = compare(groups[i], groups[j])
vs[tri_upper] = psturng(np.abs(vs[tri_upper]), k, np.inf)
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
|
maximtrp/scikit-posthocs | scikit_posthocs/_posthocs.py | posthoc_vanwaerden | python | def posthoc_vanwaerden(a, val_col=None, group_col=None, sort=True, p_adjust=None):
'''Van der Waerden's test for pairwise multiple comparisons between group
levels. See references for additional information [1]_, [2]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
For one-factorial designs with samples that do not meet the assumptions for
one-way-ANOVA and subsequent post hoc tests, the van der Waerden test using
normal scores can be employed. Provided that significant differences were
detected by this global test, one may be interested in applying post hoc
tests according to van der Waerden for pairwise multiple comparisons of the
group levels.
There is no tie correction applied in this function.
References
----------
.. [1] W. J. Conover and R. L. Iman (1979), On multiple-comparisons procedures,
Tech. Rep. LA-7677-MS, Los Alamos Scientific Laboratory.
.. [2] B. L. van der Waerden (1952) Order tests for the two-sample problem and
their power, Indagationes Mathematicae, 14, 453-458.
Examples
--------
>>> x = np.array([[10,'a'], [59,'a'], [76,'b'], [10, 'b']])
>>> sp.posthoc_vanwaerden(x, val_col = 0, group_col = 1)
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = x[_group_col].unique()
n = x[_val_col].size
k = groups.size
r = ss.rankdata(x[_val_col])
x['z_scores'] = ss.norm.ppf(r / (n + 1))
aj = x.groupby(_group_col)['z_scores'].sum()
nj = x.groupby(_group_col)['z_scores'].count()
s2 = (1. / (n - 1.)) * (x['z_scores'] ** 2.).sum()
sts = (1. / s2) * np.sum(aj ** 2. / nj)
param = k - 1
A = aj / nj
vs = np.zeros((k, k), dtype=np.float)
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
def compare_stats(i, j):
dif = np.abs(A[groups[i]] - A[groups[j]])
B = 1. / nj[groups[i]] + 1. / nj[groups[j]]
tval = dif / np.sqrt(s2 * (n - 1. - sts)/(n - k) * B)
pval = 2. * ss.t.sf(np.abs(tval), df = n - k)
return pval
for i, j in combs:
vs[i, j] = compare_stats(i, j)
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups) | Van der Waerden's test for pairwise multiple comparisons between group
levels. See references for additional information [1]_, [2]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
For one-factorial designs with samples that do not meet the assumptions for
one-way-ANOVA and subsequent post hoc tests, the van der Waerden test using
normal scores can be employed. Provided that significant differences were
detected by this global test, one may be interested in applying post hoc
tests according to van der Waerden for pairwise multiple comparisons of the
group levels.
There is no tie correction applied in this function.
References
----------
.. [1] W. J. Conover and R. L. Iman (1979), On multiple-comparisons procedures,
Tech. Rep. LA-7677-MS, Los Alamos Scientific Laboratory.
.. [2] B. L. van der Waerden (1952) Order tests for the two-sample problem and
their power, Indagationes Mathematicae, 14, 453-458.
Examples
--------
>>> x = np.array([[10,'a'], [59,'a'], [76,'b'], [10, 'b']])
>>> sp.posthoc_vanwaerden(x, val_col = 0, group_col = 1) | train | https://github.com/maximtrp/scikit-posthocs/blob/5476b09e2a325cd4e31c0b0bc6906ab5cd77fc5d/scikit_posthocs/_posthocs.py#L1598-L1709 | [
"def __convert_to_df(a, val_col=None, group_col=None, val_id=None, group_id=None):\n\n '''Hidden helper method to create a DataFrame with input data for further\n processing.\n\n Parameters\n ----------\n a : array_like or pandas DataFrame object\n An array, any object exposing the array inter... | # -*- coding: utf-8 -*-
import numpy as np
import scipy.stats as ss
import itertools as it
from statsmodels.sandbox.stats.multicomp import multipletests
from statsmodels.stats.multicomp import pairwise_tukeyhsd
from statsmodels.stats.libqsturng import psturng
from pandas import DataFrame, Categorical, Series
def __convert_to_df(a, val_col=None, group_col=None, val_id=None, group_id=None):
'''Hidden helper method to create a DataFrame with input data for further
processing.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas DataFrame.
Array must be two-dimensional. Second dimension may vary,
i.e. groups may have different lengths.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
val_id : int, optional
Index of a column that contains dependent variable values (test or
response variable). Should be specified if a NumPy ndarray is used as an
input. It will be inferred from data, if not specified.
group_id : int, optional
Index of a column that contains independent variable values (grouping or
predictor variable). Should be specified if a NumPy ndarray is used as
an input. It will be inferred from data, if not specified.
Returns
-------
x : pandas DataFrame
DataFrame with input data, `val_col` column contains numerical values and
`group_col` column contains categorical values.
val_col : str
Name of a DataFrame column that contains dependent variable values (test
or response variable).
group_col : str
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable).
Notes
-----
Inferrence algorithm for determining `val_id` and `group_id` args is rather
simple, so it is better to specify them explicitly to prevent errors.
'''
if not group_col:
group_col = 'groups'
if not val_col:
val_col = 'vals'
if isinstance(a, DataFrame):
x = a.copy()
if not {group_col, val_col}.issubset(a.columns):
raise ValueError('Specify correct column names using `group_col` and `val_col` args')
return x, val_col, group_col
elif isinstance(a, list) or (isinstance(a, np.ndarray) and not a.shape.count(2)):
grps_len = map(len, a)
grps = list(it.chain(*[[i+1] * l for i, l in enumerate(grps_len)]))
vals = list(it.chain(*a))
return DataFrame({val_col: vals, group_col: grps}), val_col, group_col
elif isinstance(a, np.ndarray):
# cols ids not defined
# trying to infer
if not(all([val_id, group_id])):
if np.argmax(a.shape):
a = a.T
ax = [np.unique(a[:, 0]).size, np.unique(a[:, 1]).size]
if np.asscalar(np.diff(ax)):
__val_col = np.argmax(ax)
__group_col = np.argmin(ax)
else:
raise ValueError('Cannot infer input format.\nPlease specify `val_id` and `group_id` args')
cols = {__val_col: val_col,
__group_col: group_col}
else:
cols = {val_id: val_col,
group_id: group_col}
cols_vals = dict(sorted(cols.items())).values()
return DataFrame(a, columns=cols_vals), val_col, group_col
def __convert_to_block_df(a, y_col=None, group_col=None, block_col=None, melted=False):
if isinstance(a, DataFrame) and not melted:
x = a.copy(deep=True)
group_col = 'groups'
block_col = 'blocks'
y_col = 'y'
x.columns.name = group_col
x.index.name = block_col
x = x.reset_index().melt(id_vars=block_col, var_name=group_col, value_name=y_col)
elif melted:
x = DataFrame.from_dict({'groups': a[group_col],
'blocks': a[block_col],
'y': a[y_col]})
elif not isinstance(a, DataFrame):
x = np.array(a)
x = DataFrame(x, index=np.arange(x.shape[0]), columns=np.arange(x.shape[1]))
if not melted:
group_col = 'groups'
block_col = 'blocks'
y_col = 'y'
x.columns.name = group_col
x.index.name = block_col
x = x.reset_index().melt(id_vars=block_col, var_name=group_col, value_name=y_col)
else:
x.columns[group_col] = 'groups'
x.columns[block_col] = 'blocks'
x.columns[y_col] = 'y'
group_col = 'groups'
block_col = 'blocks'
y_col = 'y'
return x, 'y', 'groups', 'blocks'
def posthoc_conover(a, val_col=None, group_col=None, p_adjust=None, sort=True):
'''Post hoc pairwise test for multiple comparisons of mean rank sums
(Conover's test). May be used after Kruskal-Wallis one-way analysis of
variance by ranks to do pairwise comparisons [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas DataFrame.
Array must be two-dimensional. Second dimension may vary,
i.e. groups may have different lengths.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
p_adjust : str, optional
Method for adjusting p values. See `statsmodels.sandbox.stats.multicomp`
for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by `group_col` or not. Recommended
unless you sort your data manually.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
A tie correction are employed according to Conover [1]_.
References
----------
.. [1] W. J. Conover and R. L. Iman (1979), On multiple-comparisons procedures,
Tech. Rep. LA-7677-MS, Los Alamos Scientific Laboratory.
Examples
--------
>>> x = [[1,2,3,5,1], [12,31,54, np.nan], [10,12,6,74,11]]
>>> sp.posthoc_conover(x, p_adjust = 'holm')
'''
def compare_conover(i, j):
diff = np.abs(x_ranks_avg.loc[i] - x_ranks_avg.loc[j])
B = (1. / x_lens.loc[i] + 1. / x_lens.loc[j])
D = (n - 1. - H_cor) / (n - x_len)
t_value = diff / np.sqrt(S2 * B * D)
p_value = 2. * ss.t.sf(np.abs(t_value), df = n - x_len)
return p_value
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col, _val_col], ascending=True, inplace=True)
n = len(x.index)
x_groups_unique = np.unique(x[_group_col])
x_len = x_groups_unique.size
x_lens = x.groupby(_group_col)[_val_col].count()
x['ranks'] = x[_val_col].rank()
x_ranks_avg = x.groupby(_group_col)['ranks'].mean()
x_ranks_sum = x.groupby(_group_col)['ranks'].sum()
# ties
vals = x.groupby('ranks').count()[_val_col].values
tie_sum = np.sum(vals[vals != 1] ** 3 - vals[vals != 1])
tie_sum = 0 if not tie_sum else tie_sum
x_ties = np.min([1., 1. - tie_sum / (n ** 3. - n)])
H = (12. / (n * (n + 1.))) * np.sum(x_ranks_sum**2 / x_lens) - 3. * (n + 1.)
H_cor = H / x_ties
if x_ties == 1:
S2 = n * (n + 1.) / 12.
else:
S2 = (1. / (n - 1.)) * (np.sum(x['ranks'] ** 2.) - (n * (((n + 1.)**2.) / 4.)))
vs = np.zeros((x_len, x_len))
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(x_len), 2)
for i, j in combs:
vs[i, j] = compare_conover(x_groups_unique[i], x_groups_unique[j])
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=x_groups_unique, columns=x_groups_unique)
def posthoc_dunn(a, val_col=None, group_col=None, p_adjust=None, sort=True):
'''Post hoc pairwise test for multiple comparisons of mean rank sums
(Dunn's test). May be used after Kruskal-Wallis one-way analysis of
variance by ranks to do pairwise comparisons [1]_, [2]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas DataFrame.
Array must be two-dimensional. Second dimension may vary,
i.e. groups may have different lengths.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
p_adjust : str, optional
Method for adjusting p values. See `statsmodels.sandbox.stats.multicomp`
for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by group_col or not. Recommended
unless you sort your data manually.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
A tie correction will be employed according to Glantz (2012).
References
----------
.. [1] O.J. Dunn (1964). Multiple comparisons using rank sums.
Technometrics, 6, 241-252.
.. [2] S.A. Glantz (2012), Primer of Biostatistics. New York: McGraw Hill.
Examples
--------
>>> x = [[1,2,3,5,1], [12,31,54, np.nan], [10,12,6,74,11]]
>>> sp.posthoc_dunn(x, p_adjust = 'holm')
'''
def compare_dunn(i, j):
diff = np.abs(x_ranks_avg.loc[i] - x_ranks_avg.loc[j])
A = n * (n + 1.) / 12.
B = (1. / x_lens.loc[i] + 1. / x_lens.loc[j])
z_value = diff / np.sqrt((A - x_ties) * B)
p_value = 2. * ss.norm.sf(np.abs(z_value))
return p_value
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col, _val_col], ascending=True, inplace=True)
n = len(x.index)
x_groups_unique = np.unique(x[_group_col])
x_len = x_groups_unique.size
x_lens = x.groupby(_group_col)[_val_col].count()
x['ranks'] = x[_val_col].rank()
x_ranks_avg = x.groupby(_group_col)['ranks'].mean()
# ties
vals = x.groupby('ranks').count()[_val_col].values
tie_sum = np.sum(vals[vals != 1] ** 3 - vals[vals != 1])
tie_sum = 0 if not tie_sum else tie_sum
x_ties = tie_sum / (12. * (n - 1))
vs = np.zeros((x_len, x_len))
combs = it.combinations(range(x_len), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
for i,j in combs:
vs[i, j] = compare_dunn(x_groups_unique[i], x_groups_unique[j])
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=x_groups_unique, columns=x_groups_unique)
def posthoc_nemenyi(a, val_col=None, group_col=None, dist='chi', sort=True):
'''Post hoc pairwise test for multiple comparisons of mean rank sums
(Nemenyi's test). May be used after Kruskal-Wallis one-way analysis of
variance by ranks to do pairwise comparisons [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame. Array must be two-dimensional. Second dimension may vary,
i.e. groups may have different lengths.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
dist : str, optional
Method for determining the p value. The default distribution is "chi"
(chi-squared), else "tukey" (studentized range).
sort : bool, optional
Specifies whether to sort DataFrame by group_col or not. Recommended
unless you sort your data manually.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
A tie correction will be employed according to Glantz (2012).
References
----------
.. [1] Lothar Sachs (1997), Angewandte Statistik. Berlin: Springer.
Pages: 395-397, 662-664.
Examples
--------
>>> x = [[1,2,3,5,1], [12,31,54, np.nan], [10,12,6,74,11]]
>>> sp.posthoc_nemenyi(x)
'''
def compare_stats_chi(i, j):
diff = np.abs(x_ranks_avg.loc[i] - x_ranks_avg.loc[j])
A = n * (n + 1.) / 12.
B = (1. / x_lens.loc[i] + 1. / x_lens.loc[j])
chi = diff ** 2. / (A * B)
return chi
def compare_stats_tukey(i, j):
diff = np.abs(x_ranks_avg.loc[i] - x_ranks_avg.loc[j])
B = (1. / x_lens.loc[i] + 1. / x_lens.loc[j])
q = diff / np.sqrt((n * (n + 1.) / 12.) * B)
return q
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col, _val_col], ascending=True, inplace=True)
n = len(x.index)
x_groups_unique = np.unique(x[_group_col])
x_len = x_groups_unique.size
x_lens = x.groupby(_group_col)[_val_col].count()
x['ranks'] = x[_val_col].rank()
x_ranks_avg = x.groupby(_group_col)['ranks'].mean()
# ties
vals = x.groupby('ranks').count()[_val_col].values
tie_sum = np.sum(vals[vals != 1] ** 3 - vals[vals != 1])
tie_sum = 0 if not tie_sum else tie_sum
x_ties = np.min([1., 1. - tie_sum / (n ** 3. - n)])
vs = np.zeros((x_len, x_len))
combs = it.combinations(range(x_len), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
if dist == 'chi':
for i,j in combs:
vs[i, j] = compare_stats_chi(x_groups_unique[i], x_groups_unique[j]) / x_ties
vs[tri_upper] = ss.chi2.sf(vs[tri_upper], x_len - 1)
elif dist == 'tukey':
for i,j in combs:
vs[i, j] = compare_stats_tukey(x_groups_unique[i], x_groups_unique[j]) * np.sqrt(2.)
vs[tri_upper] = psturng(vs[tri_upper], x_len, np.inf)
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=x_groups_unique, columns=x_groups_unique)
def posthoc_nemenyi_friedman(a, y_col=None, block_col=None, group_col=None, melted=False, sort=False):
'''Calculate pairwise comparisons using Nemenyi post hoc test for
unreplicated blocked data. This test is usually conducted post hoc if
significant results of the Friedman's test are obtained. The statistics
refer to upper quantiles of the studentized range distribution (Tukey) [1]_,
[2]_, [3]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of
block design, i.e. rows are blocks, and columns are groups. In this
case you do not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (strings).
y_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
A one-way ANOVA with repeated measures that is also referred to as ANOVA
with unreplicated block design can also be conducted via Friedman's
test. The consequent post hoc pairwise multiple comparison test
according to Nemenyi is conducted with this function.
This function does not test for ties.
References
----------
.. [1] J. Demsar (2006), Statistical comparisons of classifiers over
multiple data sets, Journal of Machine Learning Research, 7, 1-30.
.. [2] P. Nemenyi (1963) Distribution-free Multiple Comparisons. Ph.D.
thesis, Princeton University.
.. [3] L. Sachs (1997), Angewandte Statistik. Berlin: Springer.
Pages: 668-675.
Examples
--------
>>> # Non-melted case, x is a block design matrix, i.e. rows are blocks
>>> # and columns are groups.
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_nemenyi_friedman(x)
'''
if melted and not all([block_col, group_col, y_col]):
raise ValueError('block_col, group_col, y_col should be explicitly specified if using melted data')
def compare_stats(i, j):
dif = np.abs(R[groups[i]] - R[groups[j]])
qval = dif / np.sqrt(k * (k + 1.) / (6. * n))
return qval
x, _y_col, _group_col, _block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
#if not sort:
# x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
# x[block_col] = Categorical(x[block_col], categories=x[block_col].unique(), ordered=True)
x.sort_values(by=[_group_col, _block_col], ascending=True, inplace=True)
x.dropna(inplace=True)
groups = x[_group_col].unique()
k = groups.size
n = x[_block_col].unique().size
x['mat'] = x.groupby(_block_col)[_y_col].rank()
R = x.groupby(_group_col)['mat'].mean()
vs = np.zeros((k, k))
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
for i, j in combs:
vs[i, j] = compare_stats(i, j)
vs *= np.sqrt(2.)
vs[tri_upper] = psturng(vs[tri_upper], k, np.inf)
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_conover_friedman(a, y_col=None, block_col=None, group_col=None, melted=False, sort=False, p_adjust=None):
'''Calculate pairwise comparisons using Conover post hoc test for unreplicated
blocked data. This test is usually conducted post hoc after
significant results of the Friedman test. The statistics refer to
the Student t distribution [1]_, [2]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of
block design, i.e. rows are blocks, and columns are groups. In this
case you do not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (strings).
y_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values. See statsmodels.sandbox.stats.multicomp
for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
'single-step' : uses Tukey distribution for multiple comparisons
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
A one-way ANOVA with repeated measures that is also referred to as ANOVA
with unreplicated block design can also be conducted via the
friedman.test. The consequent post hoc pairwise multiple comparison test
according to Conover is conducted with this function.
If y is a matrix, than the columns refer to the treatment and the rows
indicate the block.
References
----------
.. [1] W. J. Conover and R. L. Iman (1979), On multiple-comparisons procedures,
Tech. Rep. LA-7677-MS, Los Alamos Scientific Laboratory.
.. [2] W. J. Conover (1999), Practical nonparametric Statistics, 3rd. Edition,
Wiley.
Examples
--------
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_conover_friedman(x)
'''
if melted and not all([block_col, group_col, y_col]):
raise ValueError('block_col, group_col, y_col should be explicitly specified if using melted data')
def compare_stats(i, j):
dif = np.abs(R.loc[groups[i]] - R.loc[groups[j]])
tval = dif / np.sqrt(A) / np.sqrt(B)
pval = 2. * ss.t.sf(np.abs(tval), df = (m*n*k - k - n + 1))
return pval
def compare_tukey(i, j):
dif = np.abs(R.loc[groups[i]] - R.loc[groups[j]])
qval = np.sqrt(2.) * dif / (np.sqrt(A) * np.sqrt(B))
pval = psturng(qval, k, np.inf)
return pval
x, _y_col, _group_col, _block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
#if not sort:
# x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
# x[block_col] = Categorical(x[block_col], categories=x[block_col].unique(), ordered=True)
x.sort_values(by=[_group_col,_block_col], ascending=True, inplace=True)
x.dropna(inplace=True)
groups = np.unique(x[_group_col])
k = groups.size
n = x[_block_col].unique().size
x['mat'] = x.groupby(_block_col)[_y_col].rank()
R = x.groupby(_group_col)['mat'].sum()
A1 = (x['mat'] ** 2).sum()
m = 1
S2 = m/(m*k - 1.) * (A1 - m*k*n*(m*k + 1.)**2./4.)
T2 = 1 / S2 * (np.sum(R) - n * m * ((m * k + 1.) / 2.)**2.)
A = S2 * (2. * n * (m * k - 1.)) / ( m * n * k - k - n + 1.)
B = 1. - T2 / (n * (m * k - 1.))
vs = np.zeros((k, k))
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
if p_adjust == 'single-step':
for i, j in combs:
vs[i, j] = compare_tukey(i, j)
else:
for i, j in combs:
vs[i, j] = compare_stats(i, j)
if p_adjust is not None:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_npm_test(a, val_col=None, group_col=None, sort=False, p_adjust=None):
'''Calculate pairwise comparisons using Nashimoto and Wright's all-pairs
comparison procedure (NPM test) for simply ordered mean ranksums.
NPM test is basically an extension of Nemenyi's procedure for testing
increasingly ordered alternatives [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values. See `statsmodels.sandbox.stats.multicomp`
for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
The p values are estimated from the studentized range distribution. If
the medians are already increasingly ordered, than the NPM-test
simplifies to the ordinary Nemenyi test
References
----------
.. [1] Nashimoto, K., Wright, F.T., (2005), Multiple comparison procedures for
detecting differences in simply ordered means. Comput. Statist. Data
Anal. 48, 291--306.
Examples
--------
>>> x = np.array([[102,109,114,120,124],
[110,112,123,130,145],
[132,141,156,160,172]])
>>> sp.posthoc_npm_test(x)
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
#if not sort:
# x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = np.unique(x[_group_col])
x['ranks'] = x[_val_col].rank()
Ri = x.groupby(_group_col)['ranks'].mean()
ni = x.groupby(_group_col)[_val_col].count()
k = groups.size
n = x.shape[0]
sigma = np.sqrt(n * (n + 1) / 12.)
df = np.inf
def compare(m, u):
a = [(Ri.loc[groups[u]]-Ri.loc[groups[_mi]])/(sigma/np.sqrt(2)*np.sqrt(1./ni.loc[groups[_mi]] + 1./ni.loc[groups[u]])) for _mi in m]
return np.array(a)
stat = np.zeros((k, k))
for i in range(k-1):
for j in range(i+1, k):
u = j
m = np.arange(i, u)
tmp = compare(m, u)
stat[j, i] = np.max(tmp)
stat[stat < 0] = 0
p_values = psturng(stat, k, np.inf)
tri_upper = np.triu_indices(p_values.shape[0], 1)
tri_lower = np.tril_indices(p_values.shape[0], -1)
p_values[tri_lower] = p_values.T[tri_lower]
#if p_adjust:
# p_values[tri_upper] = multipletests(p_values[tri_upper], method = p_adjust)[1]
np.fill_diagonal(p_values, -1)
return DataFrame(p_values, index=groups, columns=groups)
def posthoc_siegel_friedman(a, y_col=None, block_col=None, group_col=None, melted=False, sort=False, p_adjust=None):
'''Siegel and Castellan's All-Pairs Comparisons Test for Unreplicated Blocked
Data. See authors' paper for additional information [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of
block design, i.e. rows are blocks, and columns are groups. In this
case you do not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (strings).
y_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values. See statsmodels.sandbox.stats.multicomp for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
For all-pairs comparisons in a two factorial unreplicated complete block design
with non-normally distributed residuals, Siegel and Castellan's test can be
performed on Friedman-type ranked data.
References
----------
.. [1] S. Siegel, N. J. Castellan Jr. (1988), Nonparametric Statistics for the
Behavioral Sciences. 2nd ed. New York: McGraw-Hill.
Examples
--------
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_siegel_friedman(x)
'''
if melted and not all([block_col, group_col, y_col]):
raise ValueError('block_col, group_col, y_col should be explicitly specified if using melted data')
def compare_stats(i, j):
dif = np.abs(R[groups[i]] - R[groups[j]])
zval = dif / np.sqrt(k * (k + 1.) / (6. * n))
return zval
x, y_col, group_col, block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
#if not sort:
# x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
# x[block_col] = Categorical(x[block_col], categories=x[block_col].unique(), ordered=True)
x.sort_values(by=[group_col,block_col], ascending=True, inplace=True)
x.dropna(inplace=True)
groups = x[group_col].unique()
k = groups.size
n = x[block_col].unique().size
x['mat'] = x.groupby(block_col)[y_col].rank()
R = x.groupby(group_col)['mat'].mean()
vs = np.zeros((k, k), dtype=np.float)
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
for i, j in combs:
vs[i, j] = compare_stats(i, j)
vs = 2. * ss.norm.sf(np.abs(vs))
vs[vs > 1] = 1.
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_miller_friedman(a, y_col=None, block_col=None, group_col=None, melted=False, sort=False):
'''Miller's All-Pairs Comparisons Test for Unreplicated Blocked Data.
The p-values are computed from the chi-square distribution [1]_, [2]_,
[3]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of
block design, i.e. rows are blocks, and columns are groups. In this
case you do not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (strings).
y_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
Pandas DataFrame containing p values.
Notes
-----
For all-pairs comparisons in a two factorial unreplicated complete block
design with non-normally distributed residuals, Miller's test can be
performed on Friedman-type ranked data.
References
----------
.. [1] J. Bortz J, G. A. Lienert, K. Boehnke (1990), Verteilungsfreie
Methoden in der Biostatistik. Berlin: Springerself.
.. [2] R. G. Miller Jr. (1996), Simultaneous statistical inference. New
York: McGraw-Hill.
.. [3] E. L. Wike (2006), Data Analysis. A Statistical Primer for Psychology
Students. New Brunswick: Aldine Transaction.
Examples
--------
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_miller_friedman(x)
'''
if melted and not all([block_col, group_col, y_col]):
raise ValueError('block_col, group_col, y_col should be explicitly specified if using melted data')
def compare_stats(i, j):
dif = np.abs(R[groups[i]] - R[groups[j]])
qval = dif / np.sqrt(k * (k + 1.) / (6. * n))
return qval
x, y_col, group_col, block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
#if not sort:
# x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
# x[block_col] = Categorical(x[block_col], categories=x[block_col].unique(), ordered=True)
x.sort_values(by=[group_col,block_col], ascending=True, inplace=True)
x.dropna(inplace=True)
groups = x[group_col].unique()
k = groups.size
n = x[block_col].unique().size
x['mat'] = x.groupby(block_col)[y_col].rank()
R = x.groupby(group_col)['mat'].mean()
vs = np.zeros((k, k), dtype=np.float)
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
for i, j in combs:
vs[i, j] = compare_stats(i, j)
vs = vs ** 2
vs = ss.chi2.sf(vs, k - 1)
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_durbin(a, y_col=None, block_col=None, group_col=None, melted=False, sort=False, p_adjust=None):
'''Pairwise post hoc test for multiple comparisons of rank sums according to
Durbin and Conover for a two-way balanced incomplete block design (BIBD). See
references for additional information [1]_, [2]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of block design,
i.e. rows are blocks, and columns are groups. In this case you do
not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (string).
y_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values. See statsmodels.sandbox.stats.multicomp
for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
Pandas DataFrame containing p values.
References
----------
.. [1] W. J. Conover and R. L. Iman (1979), On multiple-comparisons procedures,
Tech. Rep. LA-7677-MS, Los Alamos Scientific Laboratory.
.. [2] W. J. Conover (1999), Practical nonparametric Statistics,
3rd. edition, Wiley.
Examples
--------
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_durbin(x)
'''
if melted and not all([block_col, group_col, y_col]):
raise ValueError('block_col, group_col, y_col should be explicitly specified if using melted data')
def compare_stats(i, j):
dif = np.abs(Rj[groups[i]] - Rj[groups[j]])
tval = dif / denom
pval = 2. * ss.t.sf(np.abs(tval), df = df)
return pval
x, y_col, group_col, block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
if not sort:
x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
x[block_col] = Categorical(x[block_col], categories=x[block_col].unique(), ordered=True)
x.sort_values(by=[block_col, group_col], ascending=True, inplace=True)
x.dropna(inplace=True)
groups = x[group_col].unique()
t = len(groups)
b = x[block_col].unique().size
r = b
k = t
x['y_ranked'] = x.groupby(block_col)[y_col].rank()
Rj = x.groupby(group_col)['y_ranked'].sum()
A = (x['y_ranked'] ** 2).sum()
C = (b * k * (k + 1) ** 2) / 4.
D = (Rj ** 2).sum() - r * C
T1 = (t - 1) / (A - C) * D
denom = np.sqrt(((A - C) * 2 * r) / (b * k - b - t + 1) * (1 - T1 / (b * (k -1))))
df = b * k - b - t + 1
vs = np.zeros((t, t), dtype=np.float)
combs = it.combinations(range(t), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
for i, j in combs:
vs[i, j] = compare_stats(i, j)
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_anderson(a, val_col=None, group_col=None, midrank=True, sort=False, p_adjust=None):
'''Anderson-Darling Pairwise Test for k-samples. Tests the null hypothesis
that k-samples are drawn from the same population without having to specify
the distribution function of that population [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
midrank : bool, optional
Type of Anderson-Darling test which is computed. If set to True (default), the
midrank test applicable to continuous and discrete populations is performed. If
False, the right side empirical distribution is used.
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values. See statsmodels.sandbox.stats.multicomp for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
result : pandas DataFrame
P values.
References
----------
.. [1] F.W. Scholz, M.A. Stephens (1987), K-Sample Anderson-Darling Tests,
Journal of the American Statistical Association, Vol. 82, pp. 918-924.
Examples
--------
>>> x = np.array([[2.9, 3.0, 2.5, 2.6, 3.2], [3.8, 2.7, 4.0, 2.4], [2.8, 3.4, 3.7, 2.2, 2.0]])
>>> sp.posthoc_anderson(x)
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = x[_group_col].unique()
k = groups.size
vs = np.zeros((k, k), dtype=np.float)
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
for i, j in combs:
vs[i, j] = ss.anderson_ksamp([x.loc[x[_group_col] == groups[i], _val_col], x.loc[x[_group_col] == groups[j], _val_col]])[2]
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_quade(a, y_col=None, block_col=None, group_col=None, dist='t', melted=False, sort=False, p_adjust=None):
'''Calculate pairwise comparisons using Quade's post hoc test for
unreplicated blocked data. This test is usually conducted if significant
results were obtained by the omnibus test [1]_, [2]_, [3]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of
block design, i.e. rows are blocks, and columns are groups. In this
case you do not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (string).
y_col : str or int, optional
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
dist : str, optional
Method for determining p values.
The default distribution is "t", else "normal".
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
Pandas DataFrame containing p values.
References
----------
.. [1] W. J. Conover (1999), Practical nonparametric Statistics, 3rd. Edition,
Wiley.
.. [2] N. A. Heckert and J. J. Filliben (2003). NIST Handbook 148: Dataplot
Reference Manual, Volume 2: Let Subcommands and Library Functions.
National Institute of Standards and Technology Handbook Series, June 2003.
.. [3] D. Quade (1979), Using weighted rankings in the analysis of complete
blocks with additive block effects. Journal of the American Statistical
Association, 74, 680-683.
Examples
--------
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_quade(x)
'''
if melted and not all([block_col, group_col, y_col]):
raise ValueError('block_col, group_col, y_col should be explicitly specified if using melted data')
def compare_stats_t(i, j):
dif = np.abs(S[groups[i]] - S[groups[j]])
tval = dif / denom
pval = 2. * ss.t.sf(np.abs(tval), df = (b - 1) * (k - 1))
return pval
def compare_stats_norm(i, j):
dif = np.abs(W[groups[i]] * ff - W[groups[j]] * ff)
zval = dif / denom
pval = 2. * ss.norm.sf(np.abs(zval))
return pval
x, y_col, group_col, block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
if not sort:
x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
x[block_col] = Categorical(x[block_col], categories=x[block_col].unique(), ordered=True)
x.sort_values(by=[block_col, group_col], ascending=True, inplace=True)
x.dropna(inplace=True)
groups = x[group_col].unique()
k = len(groups)
b = x[block_col].unique().size
x['r'] = x.groupby(block_col)[y_col].rank()
q = (x.groupby(block_col)[y_col].max() - x.groupby(block_col)[y_col].min()).rank()
x['rr'] = x['r'] - (k + 1)/2
x['s'] = x.apply(lambda x, y: x['rr'] * y[x['blocks']], axis=1, args=(q,))
x['w'] = x.apply(lambda x, y: x['r'] * y[x['blocks']], axis=1, args=(q,))
A = (x['s'] ** 2).sum()
S = x.groupby(group_col)['s'].sum()
B = np.sum(S ** 2) / b
W = x.groupby(group_col)['w'].sum()
vs = np.zeros((k, k), dtype=np.float)
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
if dist == 't':
denom = np.sqrt((2 * b * (A - B)) / ((b - 1) * (k - 1)))
for i, j in combs:
vs[i, j] = compare_stats_t(i, j)
else:
n = b * k
denom = np.sqrt((k * (k + 1) * (2 * n + 1) * (k-1)) / (18 * n * (n + 1)))
ff = 1. / (b * (b + 1)/2)
for i, j in combs:
vs[i, j] = compare_stats_norm(i, j)
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_mackwolfe(a, val_col, group_col, p=None, n_perm=100, sort=False, p_adjust=None):
'''Mack-Wolfe Test for Umbrella Alternatives.
In dose-finding studies one may assume an increasing treatment effect with
increasing dose level. However, the test subject may actually succumb to
toxic effects at high doses, which leads to decresing treatment
effects [1]_, [2]_.
The scope of the Mack-Wolfe Test is to test for umbrella alternatives for
either a known or unknown point P (i.e. dose-level), where the peak
(umbrella point) is present.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
p : int, optional
The a-priori known peak as an ordinal number of the treatment group
including the zero dose level, i.e. p = {1, ..., k}. Defaults to None.
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
p : float
P value.
stat : float
Statistic.
References
----------
.. [1] Chen, I.Y. (1991) Notes on the Mack-Wolfe and Chen-Wolfe Tests for
Umbrella Alternatives. Biom. J., 33, 281-290.
.. [2] Mack, G.A., Wolfe, D. A. (1981) K-sample rank tests for umbrella
alternatives. J. Amer. Statist. Assoc., 76, 175-181.
Examples
--------
>>> x = np.array([[10,'a'], [59,'a'], [76,'b'], [10, 'b']])
>>> sp.posthoc_mackwolfe(x, val_col = 0, group_col = 1)
'''
x = __convert_to_df(a, val_col, group_col)
if not sort:
x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
x.sort_values(by=[group_col], ascending=True, inplace=True)
k = x[group_col].unique().size
if p:
if p > k:
print("Selected 'p' > number of groups:", str(p), " > ", str(k))
return False
elif p < 1:
print("Selected 'p' < 1: ", str(p))
return False
Rij = x[val_col].rank()
n = x.groupby(group_col)[val_col].count()
def _fn(Ri, Rj):
return np.sum(Ri.apply(lambda x: Rj[Rj > x].size))
def _ustat(Rij, g, k):
levels = np.unique(g)
U = np.identity(k)
for i in range(k):
for j in range(i):
U[i,j] = _fn(Rij[x[group_col] == levels[i]], Rij[x[group_col] == levels[j]])
U[j,i] = _fn(Rij[x[group_col] == levels[j]], Rij[x[group_col] == levels[i]])
return U
def _ap(p, U):
tmp1 = 0
if p > 0:
for i in range(p):
for j in range(i+1, p+1):
tmp1 += U[i,j]
tmp2 = 0
if p < k:
for i in range(p, k):
for j in range(i+1, k):
tmp2 += U[j,i]
return tmp1 + tmp2
def _n1(p, n):
return np.sum(n[:p+1])
def _n2(p, n):
return np.sum(n[p:k])
def _mean_at(p, n):
N1 = _n1(p, n)
N2 = _n2(p, n)
return (N1**2 + N2**2 - np.sum(n**2) - n.iloc[p]**2)/4
def _var_at(p, n):
N1 = _n1(p, n)
N2 = _n2(p, n)
N = np.sum(n)
var = (2 * (N1**3 + N2**3) + 3 * (N1**2 + N2**2) -\
np.sum(n**2 * (2*n + 3)) - n.iloc[p]**2 * (2 * n.iloc[p] + 3) +\
12. * n.iloc[p] * N1 * N2 - 12. * n.iloc[p] ** 2 * N) / 72.
return var
if p:
if (x.groupby(val_col).count() > 1).any().any():
print("Ties are present")
U = _ustat(Rij, x[group_col], k)
est = _ap(p, U)
mean = _mean_at(p, n)
sd = np.sqrt(_var_at(p, n))
stat = (est - mean)/sd
p_value = ss.norm.sf(stat)
else:
U = _ustat(Rij, x[group_col], k)
Ap = np.array([_ap(i, U) for i in range(k)]).ravel()
mean = np.array([_mean_at(i, n) for i in range(k)]).ravel()
var = np.array([_var_at(i, n) for i in range(k)]).ravel()
A = (Ap - mean) / np.sqrt(var)
stat = np.max(A)
p = A == stat
est = None
mt = []
for i in range(n_perm):
ix = Series(np.random.permutation(Rij))
Uix = _ustat(ix, x[group_col], k)
Apix = np.array([_ap(i, Uix) for i in range(k)])
Astarix = (Apix - mean) / np.sqrt(var)
mt.append(np.max(Astarix))
mt = np.array(mt)
p_value = mt[mt > stat] / n_perm
return p_value, stat
def posthoc_ttest(a, val_col=None, group_col=None, pool_sd=False, equal_var=True, p_adjust=None, sort=True):
'''Pairwise T test for multiple comparisons of independent groups. May be
used after a parametric ANOVA to do pairwise comparisons.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame. Array must be two-dimensional.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
equal_var : bool, optional
If True (default), perform a standard independent test
that assumes equal population variances [1]_.
If False, perform Welch's t-test, which does not assume equal
population variance [2]_.
pool_sd : bool, optional
Calculate a common SD for all groups and use that for all
comparisons (this can be useful if some groups are small).
This method does not actually call scipy ttest_ind() function,
so extra arguments are ignored. Default is False.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by group_col or not. Recommended
unless you sort your data manually.
Returns
-------
Numpy ndarray or pandas DataFrame of p values depending on input
data type.
References
----------
.. [1] http://en.wikipedia.org/wiki/T-test#Independent_two-sample_t-test
.. [2] http://en.wikipedia.org/wiki/Welch%27s_t_test
Examples
--------
>>> x = [[1,2,3,5,1], [12,31,54, np.nan], [10,12,6,74,11]]
>>> sp.posthoc_ttest(x, p_adjust = 'holm')
array([[-1. , 0.04600899, 0.31269089],
[ 0.04600899, -1. , 0.6327077 ],
[ 0.31269089, 0.6327077 , -1. ]])
'''
if isinstance(a, DataFrame):
x = a.copy()
if not sort:
x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
x.sort_values(by=[group_col, val_col], ascending=True, inplace=True)
x_lens = x.groupby(by=group_col)[val_col].count().values
x_lens_cumsum = np.insert(np.cumsum(x_lens), 0, 0)[:-1]
x_grouped = np.array([x[val_col][j:(j + x_lens[i])] for i, j in enumerate(x_lens_cumsum)])
#x_grouped = [x.loc[v, val_col].values.tolist() for g, v in x.groupby(group_col, sort=False).groups.items()]
else:
x = np.array(a)
x_grouped = np.array([np.asarray(a)[~np.isnan(a)] for a in x])
x_lens = np.asarray([len(a) for a in x_grouped])
x_lens_cumsum = np.insert(np.cumsum(x_lens), 0, 0)[:-1]
if any(x_lens == 0):
raise ValueError("All groups must contain data")
x_len = len(x_grouped)
vs = np.zeros((x_len, x_len), dtype=np.float)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
def compare_pooled(i, j):
diff = x_means[i] - x_means[j]
se_diff = pooled_sd * np.sqrt(1 / x_lens[i] + 1 / x_lens[j])
t_value = diff / se_diff
return 2 * ss.t.cdf(-np.abs(t_value), x_totaldegf)
combs = it.combinations(range(x_len), 2)
if pool_sd:
x_means = np.asarray([np.mean(xi) for xi in x_grouped])
x_sd = np.asarray([np.std(xi, ddof=1) for xi in x_grouped])
x_degf = x_lens - 1
x_totaldegf = np.sum(x_degf)
pooled_sd = np.sqrt(np.sum(x_sd ** 2 * x_degf) / x_totaldegf)
for i, j in combs:
vs[i, j] = compare_pooled(i, j)
else:
for i,j in combs:
vs[i, j] = ss.ttest_ind(x_grouped[i], x_grouped[j], equal_var=equal_var)[1]
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
if isinstance(x, DataFrame):
groups_unique = x[group_col].unique()
return DataFrame(vs, index=groups_unique, columns=groups_unique)
else:
return vs
def posthoc_tukey_hsd(x, g, alpha=0.05):
'''Pairwise comparisons with TukeyHSD confidence intervals. This is a
convenience function to make statsmodels `pairwise_tukeyhsd` method more
applicable for further use.
Parameters
----------
x : array_like or pandas Series object, 1d
An array, any object exposing the array interface, containing dependent
variable values (test or response variable). Values should have a
non-nominal scale. NaN values will cause an error (please handle
manually).
g : array_like or pandas Series object, 1d
An array, any object exposing the array interface, containing
independent variable values (grouping or predictor variable). Values
should have a nominal scale (categorical).
alpha : float, optional
Significance level for the test. Default is 0.05.
Returns
-------
result : pandas DataFrame
DataFrame with 0, 1, and -1 values, where 0 is False (not significant),
1 is True (significant), and -1 is for diagonal elements.
Examples
--------
>>> x = [[1,2,3,4,5], [35,31,75,40,21], [10,6,9,6,1]]
>>> g = [['a'] * 5, ['b'] * 5, ['c'] * 5]
>>> sp.posthoc_tukey_hsd(np.concatenate(x), np.concatenate(g))
'''
result = pairwise_tukeyhsd(x, g, alpha=0.05)
groups = np.array(result.groupsunique, dtype=np.str)
groups_len = len(groups)
vs = np.zeros((groups_len, groups_len), dtype=np.int)
for a in result.summary()[1:]:
a0 = str(a[0])
a1 = str(a[1])
a0i = np.where(groups == a0)[0][0]
a1i = np.where(groups == a1)[0][0]
vs[a0i, a1i] = 1 if str(a[5]) == 'True' else 0
vs = np.triu(vs)
np.fill_diagonal(vs, -1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[tri_lower] = vs.T[tri_lower]
return DataFrame(vs, index=groups, columns=groups)
def posthoc_mannwhitney(a, val_col=None, group_col=None, use_continuity=True, alternative='two-sided', p_adjust=None, sort=True):
'''Pairwise comparisons with Mann-Whitney rank test.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame. Array must be two-dimensional.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
use_continuity : bool, optional
Whether a continuity correction (1/2.) should be taken into account.
Default is True.
alternative : ['two-sided', 'less', or 'greater'], optional
Whether to get the p-value for the one-sided hypothesis
('less' or 'greater') or for the two-sided hypothesis ('two-sided').
Defaults to 'two-sided'.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by group_col or not. Recommended
unless you sort your data manually.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
Refer to `scipy.stats.mannwhitneyu` reference page for further details.
Examples
--------
>>> x = [[1,2,3,4,5], [35,31,75,40,21], [10,6,9,6,1]]
>>> sp.posthoc_mannwhitney(x, p_adjust = 'holm')
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col, _val_col], ascending=True, inplace=True)
groups = np.unique(x[_group_col])
x_len = groups.size
vs = np.zeros((x_len, x_len))
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(x_len), 2)
for i,j in combs:
vs[i, j] = ss.mannwhitneyu(x.loc[x[_group_col] == groups[i], _val_col],
x.loc[x[_group_col] == groups[j], _val_col],
use_continuity=use_continuity,
alternative=alternative)[1]
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_wilcoxon(a, val_col=None, group_col=None, zero_method='wilcox', correction=False, p_adjust=None, sort=False):
'''Pairwise comparisons with Wilcoxon signed-rank test. It is a non-parametric
version of the paired T-test for use with non-parametric ANOVA.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame. Array must be two-dimensional.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
zero_method : string, {"pratt", "wilcox", "zsplit"}, optional
"pratt": Pratt treatment, includes zero-differences in the ranking
process (more conservative)
"wilcox": Wilcox treatment, discards all zero-differences
"zsplit": Zero rank split, just like Pratt, but spliting the zero rank
between positive and negative ones
correction : bool, optional
If True, apply continuity correction by adjusting the Wilcoxon rank
statistic by 0.5 towards the mean value when computing the z-statistic.
Default is False.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by group_col and val_col or not.
Default is False.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
Refer to `scipy.stats.wilcoxon` reference page for further details.
Examples
--------
>>> x = [[1,2,3,4,5], [35,31,75,40,21], [10,6,9,6,1]]
>>> sp.posthoc_wilcoxon(x)
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
#x.sort_values(by=[_group_col, _val_col], ascending=True, inplace=True)
groups = np.unique(x[_group_col])
x_len = groups.size
vs = np.zeros((x_len, x_len))
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(x_len), 2)
for i,j in combs:
vs[i, j] = ss.wilcoxon(x.loc[x[_group_col] == groups[i], _val_col],
x.loc[x[_group_col] == groups[j], _val_col],
zero_method=zero_method, correction=correction)[1]
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method=p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_scheffe(a, val_col=None, group_col=None, sort=False, p_adjust=None):
'''Scheffe's all-pairs comparisons test for normally distributed data with equal
group variances. For all-pairs comparisons in an one-factorial layout with
normally distributed residuals and equal variances Scheffe's test can be
performed with parametric ANOVA [1]_, [2]_, [3]_.
A total of m = k(k-1)/2 hypotheses can be tested.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
The p values are computed from the F-distribution.
References
----------
.. [1] J. Bortz (1993) Statistik für Sozialwissenschaftler. 4. Aufl., Berlin:
Springer.
.. [2] L. Sachs (1997) Angewandte Statistik, New York: Springer.
.. [3] H. Scheffe (1953) A Method for Judging all Contrasts in the Analysis
of Variance. Biometrika 40, 87-110.
Examples
--------
>>> import scikit_posthocs as sp
>>> import pandas as pd
>>> x = pd.DataFrame({"a": [1,2,3,5,1], "b": [12,31,54,62,12], "c": [10,12,6,74,11]})
>>> x = x.melt(var_name='groups', value_name='values')
>>> sp.posthoc_scheffe(x, val_col='values', group_col='groups')
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = np.unique(x[_group_col])
x_grouped = x.groupby(_group_col)[_val_col]
ni = x_grouped.count()
n = ni.sum()
xi = x_grouped.mean()
si = x_grouped.var()
sin = 1. / (n - groups.size) * np.sum(si * (ni - 1.))
def compare(i, j):
dif = xi.loc[i] - xi.loc[j]
A = sin * (1. / ni[i] + 1. / ni[j]) * (groups.size - 1.)
f_val = dif ** 2. / A
return f_val
vs = np.zeros((groups.size, groups.size), dtype=np.float)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(groups.size), 2)
for i,j in combs:
vs[i, j] = compare(groups[i], groups[j])
vs[tri_lower] = vs.T[tri_lower]
p_values = ss.f.sf(vs, groups.size - 1., n - groups.size)
np.fill_diagonal(p_values, -1)
return DataFrame(p_values, index=groups, columns=groups)
def posthoc_tamhane(a, val_col=None, group_col=None, welch=True, sort=False):
'''Tamhane's T2 all-pairs comparison test for normally distributed data with
unequal variances. Tamhane's T2 test can be performed for all-pairs
comparisons in an one-factorial layout with normally distributed residuals
but unequal groups variances. A total of m = k(k-1)/2 hypotheses can be
tested. The null hypothesis is tested in the two-tailed test against the
alternative hypothesis [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
welch : bool, optional
If True, use Welch's approximate solution for calculating the degree of
freedom. T2 test uses the usual df = N - 2 approximation.
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
The p values are computed from the t-distribution and adjusted according to
Dunn-Sidak.
References
----------
.. [1] A.C. Tamhane (1979), A Comparison of Procedures for Multiple Comparisons of
Means with Unequal Variances. Journal of the American Statistical Association,
74, 471-480.
Examples
--------
>>> import scikit_posthocs as sp
>>> import pandas as pd
>>> x = pd.DataFrame({"a": [1,2,3,5,1], "b": [12,31,54,62,12], "c": [10,12,6,74,11]})
>>> x = x.melt(var_name='groups', value_name='values')
>>> sp.posthoc_tamhane(x, val_col='values', group_col='groups')
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = x[_group_col].unique()
x_grouped = x.groupby(_group_col)[_val_col]
ni = x_grouped.count()
n = ni.sum()
xi = x_grouped.mean()
si = x_grouped.var()
sin = 1. / (n - groups.size) * np.sum(si * (ni - 1))
def compare(i, j):
dif = xi[i] - xi[j]
A = si[i] / ni[i] + si[j] / ni[j]
t_val = dif / np.sqrt(A)
if welch:
df = A ** 2. / (si[i] ** 2. / (ni[i] ** 2. * (ni[i] - 1.)) + si[j] ** 2. / (ni[j] ** 2. * (ni[j] - 1.)))
else:
## checks according to Tamhane (1979, p. 474)
ok1 = (9./10. <= ni[i]/ni[j]) and (ni[i]/ni[j] <= 10./9.)
ok2 = (9./10. <= (s2i[i] / ni[i]) / (s2i[j] / ni[j])) and ((s2i[i] / ni[i]) / (s2i[j] / ni[j]) <= 10./9.)
ok3 = (4./5. <= ni[i]/ni[j]) and (ni[i]/ni[j] <= 5./4.) and (1./2. <= (s2i[i] / ni[i]) / (s2i[j] / ni[j])) and ((s2i[i] / ni[i]) / (s2i[j] / ni[j]) <= 2.)
ok4 = (2./3. <= ni[i]/ni[j]) and (ni[i]/ni[j] <= 3./2.) and (3./4. <= (s2i[i] / ni[i]) / (s2i[j] / ni[j])) and ((s2i[i] / ni[i]) / (s2i[j] / ni[j]) <= 4./3.)
OK = any(ok1, ok2, ok3, ok4)
if not OK:
print("Sample sizes or standard errors are not balanced. T2 test is recommended.")
df = ni[i] + ni[j] - 2.
p_val = 2. * ss.t.sf(np.abs(t_val), df=df)
return p_val
vs = np.zeros((groups.size, groups.size), dtype=np.float)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(groups.size), 2)
for i,j in combs:
vs[i, j] = compare(groups[i], groups[j])
vs[tri_upper] = 1. - (1. - vs[tri_upper]) ** groups.size
vs[tri_lower] = vs.T[tri_lower]
vs[vs > 1] = 1
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_tukey(a, val_col = None, group_col = None, sort = False):
'''Performs Tukey's all-pairs comparisons test for normally distributed data
with equal group variances. For all-pairs comparisons in an
one-factorial layout with normally distributed residuals and equal variances
Tukey's test can be performed. A total of m = k(k-1)/2 hypotheses can be
tested. The null hypothesis is tested in the two-tailed test against
the alternative hypothesis [1]_, [2]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
The p values are computed from the Tukey-distribution.
References
----------
.. [1] L. Sachs (1997) Angewandte Statistik, New York: Springer.
.. [2] J. Tukey (1949) Comparing Individual Means in the Analysis of Variance,
Biometrics 5, 99-114.
Examples
--------
>>> import scikit_posthocs as sp
>>> import pandas as pd
>>> x = pd.DataFrame({"a": [1,2,3,5,1], "b": [12,31,54,62,12], "c": [10,12,6,74,11]})
>>> x = x.melt(var_name='groups', value_name='values')
>>> sp.posthoc_tukey(x, val_col='values', group_col='groups')
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = np.unique(x[_group_col])
x_grouped = x.groupby(_group_col)[_val_col]
ni = x_grouped.count()
n = ni.sum()
xi = x_grouped.mean()
si = x_grouped.var()
sin = 1. / (n - groups.size) * np.sum(si * (ni - 1))
def compare(i, j):
dif = xi[i] - xi[j]
A = sin * 0.5 * (1. / ni.loc[i] + 1. / ni.loc[j])
q_val = dif / np.sqrt(A)
return q_val
vs = np.zeros((groups.size, groups.size), dtype=np.float)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(groups.size), 2)
for i,j in combs:
vs[i, j] = compare(groups[i], groups[j])
vs[tri_upper] = psturng(np.abs(vs[tri_upper]), groups.size, n - groups.size)
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_dscf(a, val_col=None, group_col=None, sort=False):
'''Dwass, Steel, Critchlow and Fligner all-pairs comparison test for a
one-factorial layout with non-normally distributed residuals. As opposed to
the all-pairs comparison procedures that depend on Kruskal ranks, the DSCF
test is basically an extension of the U-test as re-ranking is conducted for
each pairwise test [1]_, [2]_, [3]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
Pandas DataFrame containing p values.
Notes
-----
The p values are computed from the Tukey-distribution.
References
----------
.. [1] Douglas, C. E., Fligner, A. M. (1991) On distribution-free multiple
comparisons in the one-way analysis of variance, Communications in
Statistics - Theory and Methods, 20, 127-139.
.. [2] Dwass, M. (1960) Some k-sample rank-order tests. In Contributions to
Probability and Statistics, Edited by: I. Olkin, Stanford: Stanford
University Press.
.. [3] Steel, R. G. D. (1960) A rank sum test for comparing all pairs of
treatments, Technometrics, 2, 197-207.
Examples
--------
>>> import scikit_posthocs as sp
>>> import pandas as pd
>>> x = pd.DataFrame({"a": [1,2,3,5,1], "b": [12,31,54,62,12], "c": [10,12,6,74,11]})
>>> x = x.melt(var_name='groups', value_name='values')
>>> sp.posthoc_dscf(x, val_col='values', group_col='groups')
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = np.unique(x[_group_col])
x_grouped = x.groupby(_group_col)[_val_col]
n = x_grouped.count()
k = groups.size
def get_ties(x):
t = x.value_counts().values
c = np.sum((t ** 3 - t) / 12.)
return c
def compare(i, j):
ni = n.loc[i]
nj = n.loc[j]
x_raw = x.loc[(x[_group_col] == i) | (x[_group_col] == j)].copy()
x_raw['ranks'] = x_raw.loc[:, _val_col].rank()
r = x_raw.groupby(_group_col)['ranks'].sum().loc[[i, j]]
u = np.array([nj * ni + (nj * (nj + 1) / 2),
nj * ni + (ni * (ni + 1) / 2)]) - r
u_min = np.min(u)
s = ni + nj
var = (nj*ni/(s*(s - 1.))) * ((s**3 - s)/12. - get_ties(x_raw['ranks']))
p = np.sqrt(2.) * (u_min - nj * ni / 2.) / np.sqrt(var)
return p
vs = np.zeros((k, k))
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(k), 2)
for i, j in combs:
vs[i, j] = compare(groups[i], groups[j])
vs[tri_upper] = psturng(np.abs(vs[tri_upper]), k, np.inf)
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
|
maximtrp/scikit-posthocs | scikit_posthocs/_posthocs.py | posthoc_tukey_hsd | python | def posthoc_tukey_hsd(x, g, alpha=0.05):
'''Pairwise comparisons with TukeyHSD confidence intervals. This is a
convenience function to make statsmodels `pairwise_tukeyhsd` method more
applicable for further use.
Parameters
----------
x : array_like or pandas Series object, 1d
An array, any object exposing the array interface, containing dependent
variable values (test or response variable). Values should have a
non-nominal scale. NaN values will cause an error (please handle
manually).
g : array_like or pandas Series object, 1d
An array, any object exposing the array interface, containing
independent variable values (grouping or predictor variable). Values
should have a nominal scale (categorical).
alpha : float, optional
Significance level for the test. Default is 0.05.
Returns
-------
result : pandas DataFrame
DataFrame with 0, 1, and -1 values, where 0 is False (not significant),
1 is True (significant), and -1 is for diagonal elements.
Examples
--------
>>> x = [[1,2,3,4,5], [35,31,75,40,21], [10,6,9,6,1]]
>>> g = [['a'] * 5, ['b'] * 5, ['c'] * 5]
>>> sp.posthoc_tukey_hsd(np.concatenate(x), np.concatenate(g))
'''
result = pairwise_tukeyhsd(x, g, alpha=0.05)
groups = np.array(result.groupsunique, dtype=np.str)
groups_len = len(groups)
vs = np.zeros((groups_len, groups_len), dtype=np.int)
for a in result.summary()[1:]:
a0 = str(a[0])
a1 = str(a[1])
a0i = np.where(groups == a0)[0][0]
a1i = np.where(groups == a1)[0][0]
vs[a0i, a1i] = 1 if str(a[5]) == 'True' else 0
vs = np.triu(vs)
np.fill_diagonal(vs, -1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[tri_lower] = vs.T[tri_lower]
return DataFrame(vs, index=groups, columns=groups) | Pairwise comparisons with TukeyHSD confidence intervals. This is a
convenience function to make statsmodels `pairwise_tukeyhsd` method more
applicable for further use.
Parameters
----------
x : array_like or pandas Series object, 1d
An array, any object exposing the array interface, containing dependent
variable values (test or response variable). Values should have a
non-nominal scale. NaN values will cause an error (please handle
manually).
g : array_like or pandas Series object, 1d
An array, any object exposing the array interface, containing
independent variable values (grouping or predictor variable). Values
should have a nominal scale (categorical).
alpha : float, optional
Significance level for the test. Default is 0.05.
Returns
-------
result : pandas DataFrame
DataFrame with 0, 1, and -1 values, where 0 is False (not significant),
1 is True (significant), and -1 is for diagonal elements.
Examples
--------
>>> x = [[1,2,3,4,5], [35,31,75,40,21], [10,6,9,6,1]]
>>> g = [['a'] * 5, ['b'] * 5, ['c'] * 5]
>>> sp.posthoc_tukey_hsd(np.concatenate(x), np.concatenate(g)) | train | https://github.com/maximtrp/scikit-posthocs/blob/5476b09e2a325cd4e31c0b0bc6906ab5cd77fc5d/scikit_posthocs/_posthocs.py#L1845-L1897 | null | # -*- coding: utf-8 -*-
import numpy as np
import scipy.stats as ss
import itertools as it
from statsmodels.sandbox.stats.multicomp import multipletests
from statsmodels.stats.multicomp import pairwise_tukeyhsd
from statsmodels.stats.libqsturng import psturng
from pandas import DataFrame, Categorical, Series
def __convert_to_df(a, val_col=None, group_col=None, val_id=None, group_id=None):
'''Hidden helper method to create a DataFrame with input data for further
processing.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas DataFrame.
Array must be two-dimensional. Second dimension may vary,
i.e. groups may have different lengths.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
val_id : int, optional
Index of a column that contains dependent variable values (test or
response variable). Should be specified if a NumPy ndarray is used as an
input. It will be inferred from data, if not specified.
group_id : int, optional
Index of a column that contains independent variable values (grouping or
predictor variable). Should be specified if a NumPy ndarray is used as
an input. It will be inferred from data, if not specified.
Returns
-------
x : pandas DataFrame
DataFrame with input data, `val_col` column contains numerical values and
`group_col` column contains categorical values.
val_col : str
Name of a DataFrame column that contains dependent variable values (test
or response variable).
group_col : str
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable).
Notes
-----
Inferrence algorithm for determining `val_id` and `group_id` args is rather
simple, so it is better to specify them explicitly to prevent errors.
'''
if not group_col:
group_col = 'groups'
if not val_col:
val_col = 'vals'
if isinstance(a, DataFrame):
x = a.copy()
if not {group_col, val_col}.issubset(a.columns):
raise ValueError('Specify correct column names using `group_col` and `val_col` args')
return x, val_col, group_col
elif isinstance(a, list) or (isinstance(a, np.ndarray) and not a.shape.count(2)):
grps_len = map(len, a)
grps = list(it.chain(*[[i+1] * l for i, l in enumerate(grps_len)]))
vals = list(it.chain(*a))
return DataFrame({val_col: vals, group_col: grps}), val_col, group_col
elif isinstance(a, np.ndarray):
# cols ids not defined
# trying to infer
if not(all([val_id, group_id])):
if np.argmax(a.shape):
a = a.T
ax = [np.unique(a[:, 0]).size, np.unique(a[:, 1]).size]
if np.asscalar(np.diff(ax)):
__val_col = np.argmax(ax)
__group_col = np.argmin(ax)
else:
raise ValueError('Cannot infer input format.\nPlease specify `val_id` and `group_id` args')
cols = {__val_col: val_col,
__group_col: group_col}
else:
cols = {val_id: val_col,
group_id: group_col}
cols_vals = dict(sorted(cols.items())).values()
return DataFrame(a, columns=cols_vals), val_col, group_col
def __convert_to_block_df(a, y_col=None, group_col=None, block_col=None, melted=False):
if isinstance(a, DataFrame) and not melted:
x = a.copy(deep=True)
group_col = 'groups'
block_col = 'blocks'
y_col = 'y'
x.columns.name = group_col
x.index.name = block_col
x = x.reset_index().melt(id_vars=block_col, var_name=group_col, value_name=y_col)
elif melted:
x = DataFrame.from_dict({'groups': a[group_col],
'blocks': a[block_col],
'y': a[y_col]})
elif not isinstance(a, DataFrame):
x = np.array(a)
x = DataFrame(x, index=np.arange(x.shape[0]), columns=np.arange(x.shape[1]))
if not melted:
group_col = 'groups'
block_col = 'blocks'
y_col = 'y'
x.columns.name = group_col
x.index.name = block_col
x = x.reset_index().melt(id_vars=block_col, var_name=group_col, value_name=y_col)
else:
x.columns[group_col] = 'groups'
x.columns[block_col] = 'blocks'
x.columns[y_col] = 'y'
group_col = 'groups'
block_col = 'blocks'
y_col = 'y'
return x, 'y', 'groups', 'blocks'
def posthoc_conover(a, val_col=None, group_col=None, p_adjust=None, sort=True):
'''Post hoc pairwise test for multiple comparisons of mean rank sums
(Conover's test). May be used after Kruskal-Wallis one-way analysis of
variance by ranks to do pairwise comparisons [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas DataFrame.
Array must be two-dimensional. Second dimension may vary,
i.e. groups may have different lengths.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
p_adjust : str, optional
Method for adjusting p values. See `statsmodels.sandbox.stats.multicomp`
for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by `group_col` or not. Recommended
unless you sort your data manually.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
A tie correction are employed according to Conover [1]_.
References
----------
.. [1] W. J. Conover and R. L. Iman (1979), On multiple-comparisons procedures,
Tech. Rep. LA-7677-MS, Los Alamos Scientific Laboratory.
Examples
--------
>>> x = [[1,2,3,5,1], [12,31,54, np.nan], [10,12,6,74,11]]
>>> sp.posthoc_conover(x, p_adjust = 'holm')
'''
def compare_conover(i, j):
diff = np.abs(x_ranks_avg.loc[i] - x_ranks_avg.loc[j])
B = (1. / x_lens.loc[i] + 1. / x_lens.loc[j])
D = (n - 1. - H_cor) / (n - x_len)
t_value = diff / np.sqrt(S2 * B * D)
p_value = 2. * ss.t.sf(np.abs(t_value), df = n - x_len)
return p_value
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col, _val_col], ascending=True, inplace=True)
n = len(x.index)
x_groups_unique = np.unique(x[_group_col])
x_len = x_groups_unique.size
x_lens = x.groupby(_group_col)[_val_col].count()
x['ranks'] = x[_val_col].rank()
x_ranks_avg = x.groupby(_group_col)['ranks'].mean()
x_ranks_sum = x.groupby(_group_col)['ranks'].sum()
# ties
vals = x.groupby('ranks').count()[_val_col].values
tie_sum = np.sum(vals[vals != 1] ** 3 - vals[vals != 1])
tie_sum = 0 if not tie_sum else tie_sum
x_ties = np.min([1., 1. - tie_sum / (n ** 3. - n)])
H = (12. / (n * (n + 1.))) * np.sum(x_ranks_sum**2 / x_lens) - 3. * (n + 1.)
H_cor = H / x_ties
if x_ties == 1:
S2 = n * (n + 1.) / 12.
else:
S2 = (1. / (n - 1.)) * (np.sum(x['ranks'] ** 2.) - (n * (((n + 1.)**2.) / 4.)))
vs = np.zeros((x_len, x_len))
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(x_len), 2)
for i, j in combs:
vs[i, j] = compare_conover(x_groups_unique[i], x_groups_unique[j])
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=x_groups_unique, columns=x_groups_unique)
def posthoc_dunn(a, val_col=None, group_col=None, p_adjust=None, sort=True):
'''Post hoc pairwise test for multiple comparisons of mean rank sums
(Dunn's test). May be used after Kruskal-Wallis one-way analysis of
variance by ranks to do pairwise comparisons [1]_, [2]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas DataFrame.
Array must be two-dimensional. Second dimension may vary,
i.e. groups may have different lengths.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
p_adjust : str, optional
Method for adjusting p values. See `statsmodels.sandbox.stats.multicomp`
for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by group_col or not. Recommended
unless you sort your data manually.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
A tie correction will be employed according to Glantz (2012).
References
----------
.. [1] O.J. Dunn (1964). Multiple comparisons using rank sums.
Technometrics, 6, 241-252.
.. [2] S.A. Glantz (2012), Primer of Biostatistics. New York: McGraw Hill.
Examples
--------
>>> x = [[1,2,3,5,1], [12,31,54, np.nan], [10,12,6,74,11]]
>>> sp.posthoc_dunn(x, p_adjust = 'holm')
'''
def compare_dunn(i, j):
diff = np.abs(x_ranks_avg.loc[i] - x_ranks_avg.loc[j])
A = n * (n + 1.) / 12.
B = (1. / x_lens.loc[i] + 1. / x_lens.loc[j])
z_value = diff / np.sqrt((A - x_ties) * B)
p_value = 2. * ss.norm.sf(np.abs(z_value))
return p_value
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col, _val_col], ascending=True, inplace=True)
n = len(x.index)
x_groups_unique = np.unique(x[_group_col])
x_len = x_groups_unique.size
x_lens = x.groupby(_group_col)[_val_col].count()
x['ranks'] = x[_val_col].rank()
x_ranks_avg = x.groupby(_group_col)['ranks'].mean()
# ties
vals = x.groupby('ranks').count()[_val_col].values
tie_sum = np.sum(vals[vals != 1] ** 3 - vals[vals != 1])
tie_sum = 0 if not tie_sum else tie_sum
x_ties = tie_sum / (12. * (n - 1))
vs = np.zeros((x_len, x_len))
combs = it.combinations(range(x_len), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
for i,j in combs:
vs[i, j] = compare_dunn(x_groups_unique[i], x_groups_unique[j])
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=x_groups_unique, columns=x_groups_unique)
def posthoc_nemenyi(a, val_col=None, group_col=None, dist='chi', sort=True):
'''Post hoc pairwise test for multiple comparisons of mean rank sums
(Nemenyi's test). May be used after Kruskal-Wallis one-way analysis of
variance by ranks to do pairwise comparisons [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame. Array must be two-dimensional. Second dimension may vary,
i.e. groups may have different lengths.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
dist : str, optional
Method for determining the p value. The default distribution is "chi"
(chi-squared), else "tukey" (studentized range).
sort : bool, optional
Specifies whether to sort DataFrame by group_col or not. Recommended
unless you sort your data manually.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
A tie correction will be employed according to Glantz (2012).
References
----------
.. [1] Lothar Sachs (1997), Angewandte Statistik. Berlin: Springer.
Pages: 395-397, 662-664.
Examples
--------
>>> x = [[1,2,3,5,1], [12,31,54, np.nan], [10,12,6,74,11]]
>>> sp.posthoc_nemenyi(x)
'''
def compare_stats_chi(i, j):
diff = np.abs(x_ranks_avg.loc[i] - x_ranks_avg.loc[j])
A = n * (n + 1.) / 12.
B = (1. / x_lens.loc[i] + 1. / x_lens.loc[j])
chi = diff ** 2. / (A * B)
return chi
def compare_stats_tukey(i, j):
diff = np.abs(x_ranks_avg.loc[i] - x_ranks_avg.loc[j])
B = (1. / x_lens.loc[i] + 1. / x_lens.loc[j])
q = diff / np.sqrt((n * (n + 1.) / 12.) * B)
return q
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col, _val_col], ascending=True, inplace=True)
n = len(x.index)
x_groups_unique = np.unique(x[_group_col])
x_len = x_groups_unique.size
x_lens = x.groupby(_group_col)[_val_col].count()
x['ranks'] = x[_val_col].rank()
x_ranks_avg = x.groupby(_group_col)['ranks'].mean()
# ties
vals = x.groupby('ranks').count()[_val_col].values
tie_sum = np.sum(vals[vals != 1] ** 3 - vals[vals != 1])
tie_sum = 0 if not tie_sum else tie_sum
x_ties = np.min([1., 1. - tie_sum / (n ** 3. - n)])
vs = np.zeros((x_len, x_len))
combs = it.combinations(range(x_len), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
if dist == 'chi':
for i,j in combs:
vs[i, j] = compare_stats_chi(x_groups_unique[i], x_groups_unique[j]) / x_ties
vs[tri_upper] = ss.chi2.sf(vs[tri_upper], x_len - 1)
elif dist == 'tukey':
for i,j in combs:
vs[i, j] = compare_stats_tukey(x_groups_unique[i], x_groups_unique[j]) * np.sqrt(2.)
vs[tri_upper] = psturng(vs[tri_upper], x_len, np.inf)
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=x_groups_unique, columns=x_groups_unique)
def posthoc_nemenyi_friedman(a, y_col=None, block_col=None, group_col=None, melted=False, sort=False):
'''Calculate pairwise comparisons using Nemenyi post hoc test for
unreplicated blocked data. This test is usually conducted post hoc if
significant results of the Friedman's test are obtained. The statistics
refer to upper quantiles of the studentized range distribution (Tukey) [1]_,
[2]_, [3]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of
block design, i.e. rows are blocks, and columns are groups. In this
case you do not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (strings).
y_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
A one-way ANOVA with repeated measures that is also referred to as ANOVA
with unreplicated block design can also be conducted via Friedman's
test. The consequent post hoc pairwise multiple comparison test
according to Nemenyi is conducted with this function.
This function does not test for ties.
References
----------
.. [1] J. Demsar (2006), Statistical comparisons of classifiers over
multiple data sets, Journal of Machine Learning Research, 7, 1-30.
.. [2] P. Nemenyi (1963) Distribution-free Multiple Comparisons. Ph.D.
thesis, Princeton University.
.. [3] L. Sachs (1997), Angewandte Statistik. Berlin: Springer.
Pages: 668-675.
Examples
--------
>>> # Non-melted case, x is a block design matrix, i.e. rows are blocks
>>> # and columns are groups.
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_nemenyi_friedman(x)
'''
if melted and not all([block_col, group_col, y_col]):
raise ValueError('block_col, group_col, y_col should be explicitly specified if using melted data')
def compare_stats(i, j):
dif = np.abs(R[groups[i]] - R[groups[j]])
qval = dif / np.sqrt(k * (k + 1.) / (6. * n))
return qval
x, _y_col, _group_col, _block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
#if not sort:
# x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
# x[block_col] = Categorical(x[block_col], categories=x[block_col].unique(), ordered=True)
x.sort_values(by=[_group_col, _block_col], ascending=True, inplace=True)
x.dropna(inplace=True)
groups = x[_group_col].unique()
k = groups.size
n = x[_block_col].unique().size
x['mat'] = x.groupby(_block_col)[_y_col].rank()
R = x.groupby(_group_col)['mat'].mean()
vs = np.zeros((k, k))
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
for i, j in combs:
vs[i, j] = compare_stats(i, j)
vs *= np.sqrt(2.)
vs[tri_upper] = psturng(vs[tri_upper], k, np.inf)
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_conover_friedman(a, y_col=None, block_col=None, group_col=None, melted=False, sort=False, p_adjust=None):
'''Calculate pairwise comparisons using Conover post hoc test for unreplicated
blocked data. This test is usually conducted post hoc after
significant results of the Friedman test. The statistics refer to
the Student t distribution [1]_, [2]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of
block design, i.e. rows are blocks, and columns are groups. In this
case you do not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (strings).
y_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values. See statsmodels.sandbox.stats.multicomp
for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
'single-step' : uses Tukey distribution for multiple comparisons
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
A one-way ANOVA with repeated measures that is also referred to as ANOVA
with unreplicated block design can also be conducted via the
friedman.test. The consequent post hoc pairwise multiple comparison test
according to Conover is conducted with this function.
If y is a matrix, than the columns refer to the treatment and the rows
indicate the block.
References
----------
.. [1] W. J. Conover and R. L. Iman (1979), On multiple-comparisons procedures,
Tech. Rep. LA-7677-MS, Los Alamos Scientific Laboratory.
.. [2] W. J. Conover (1999), Practical nonparametric Statistics, 3rd. Edition,
Wiley.
Examples
--------
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_conover_friedman(x)
'''
if melted and not all([block_col, group_col, y_col]):
raise ValueError('block_col, group_col, y_col should be explicitly specified if using melted data')
def compare_stats(i, j):
dif = np.abs(R.loc[groups[i]] - R.loc[groups[j]])
tval = dif / np.sqrt(A) / np.sqrt(B)
pval = 2. * ss.t.sf(np.abs(tval), df = (m*n*k - k - n + 1))
return pval
def compare_tukey(i, j):
dif = np.abs(R.loc[groups[i]] - R.loc[groups[j]])
qval = np.sqrt(2.) * dif / (np.sqrt(A) * np.sqrt(B))
pval = psturng(qval, k, np.inf)
return pval
x, _y_col, _group_col, _block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
#if not sort:
# x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
# x[block_col] = Categorical(x[block_col], categories=x[block_col].unique(), ordered=True)
x.sort_values(by=[_group_col,_block_col], ascending=True, inplace=True)
x.dropna(inplace=True)
groups = np.unique(x[_group_col])
k = groups.size
n = x[_block_col].unique().size
x['mat'] = x.groupby(_block_col)[_y_col].rank()
R = x.groupby(_group_col)['mat'].sum()
A1 = (x['mat'] ** 2).sum()
m = 1
S2 = m/(m*k - 1.) * (A1 - m*k*n*(m*k + 1.)**2./4.)
T2 = 1 / S2 * (np.sum(R) - n * m * ((m * k + 1.) / 2.)**2.)
A = S2 * (2. * n * (m * k - 1.)) / ( m * n * k - k - n + 1.)
B = 1. - T2 / (n * (m * k - 1.))
vs = np.zeros((k, k))
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
if p_adjust == 'single-step':
for i, j in combs:
vs[i, j] = compare_tukey(i, j)
else:
for i, j in combs:
vs[i, j] = compare_stats(i, j)
if p_adjust is not None:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_npm_test(a, val_col=None, group_col=None, sort=False, p_adjust=None):
'''Calculate pairwise comparisons using Nashimoto and Wright's all-pairs
comparison procedure (NPM test) for simply ordered mean ranksums.
NPM test is basically an extension of Nemenyi's procedure for testing
increasingly ordered alternatives [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values. See `statsmodels.sandbox.stats.multicomp`
for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
The p values are estimated from the studentized range distribution. If
the medians are already increasingly ordered, than the NPM-test
simplifies to the ordinary Nemenyi test
References
----------
.. [1] Nashimoto, K., Wright, F.T., (2005), Multiple comparison procedures for
detecting differences in simply ordered means. Comput. Statist. Data
Anal. 48, 291--306.
Examples
--------
>>> x = np.array([[102,109,114,120,124],
[110,112,123,130,145],
[132,141,156,160,172]])
>>> sp.posthoc_npm_test(x)
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
#if not sort:
# x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = np.unique(x[_group_col])
x['ranks'] = x[_val_col].rank()
Ri = x.groupby(_group_col)['ranks'].mean()
ni = x.groupby(_group_col)[_val_col].count()
k = groups.size
n = x.shape[0]
sigma = np.sqrt(n * (n + 1) / 12.)
df = np.inf
def compare(m, u):
a = [(Ri.loc[groups[u]]-Ri.loc[groups[_mi]])/(sigma/np.sqrt(2)*np.sqrt(1./ni.loc[groups[_mi]] + 1./ni.loc[groups[u]])) for _mi in m]
return np.array(a)
stat = np.zeros((k, k))
for i in range(k-1):
for j in range(i+1, k):
u = j
m = np.arange(i, u)
tmp = compare(m, u)
stat[j, i] = np.max(tmp)
stat[stat < 0] = 0
p_values = psturng(stat, k, np.inf)
tri_upper = np.triu_indices(p_values.shape[0], 1)
tri_lower = np.tril_indices(p_values.shape[0], -1)
p_values[tri_lower] = p_values.T[tri_lower]
#if p_adjust:
# p_values[tri_upper] = multipletests(p_values[tri_upper], method = p_adjust)[1]
np.fill_diagonal(p_values, -1)
return DataFrame(p_values, index=groups, columns=groups)
def posthoc_siegel_friedman(a, y_col=None, block_col=None, group_col=None, melted=False, sort=False, p_adjust=None):
'''Siegel and Castellan's All-Pairs Comparisons Test for Unreplicated Blocked
Data. See authors' paper for additional information [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of
block design, i.e. rows are blocks, and columns are groups. In this
case you do not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (strings).
y_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values. See statsmodels.sandbox.stats.multicomp for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
For all-pairs comparisons in a two factorial unreplicated complete block design
with non-normally distributed residuals, Siegel and Castellan's test can be
performed on Friedman-type ranked data.
References
----------
.. [1] S. Siegel, N. J. Castellan Jr. (1988), Nonparametric Statistics for the
Behavioral Sciences. 2nd ed. New York: McGraw-Hill.
Examples
--------
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_siegel_friedman(x)
'''
if melted and not all([block_col, group_col, y_col]):
raise ValueError('block_col, group_col, y_col should be explicitly specified if using melted data')
def compare_stats(i, j):
dif = np.abs(R[groups[i]] - R[groups[j]])
zval = dif / np.sqrt(k * (k + 1.) / (6. * n))
return zval
x, y_col, group_col, block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
#if not sort:
# x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
# x[block_col] = Categorical(x[block_col], categories=x[block_col].unique(), ordered=True)
x.sort_values(by=[group_col,block_col], ascending=True, inplace=True)
x.dropna(inplace=True)
groups = x[group_col].unique()
k = groups.size
n = x[block_col].unique().size
x['mat'] = x.groupby(block_col)[y_col].rank()
R = x.groupby(group_col)['mat'].mean()
vs = np.zeros((k, k), dtype=np.float)
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
for i, j in combs:
vs[i, j] = compare_stats(i, j)
vs = 2. * ss.norm.sf(np.abs(vs))
vs[vs > 1] = 1.
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_miller_friedman(a, y_col=None, block_col=None, group_col=None, melted=False, sort=False):
'''Miller's All-Pairs Comparisons Test for Unreplicated Blocked Data.
The p-values are computed from the chi-square distribution [1]_, [2]_,
[3]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of
block design, i.e. rows are blocks, and columns are groups. In this
case you do not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (strings).
y_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
Pandas DataFrame containing p values.
Notes
-----
For all-pairs comparisons in a two factorial unreplicated complete block
design with non-normally distributed residuals, Miller's test can be
performed on Friedman-type ranked data.
References
----------
.. [1] J. Bortz J, G. A. Lienert, K. Boehnke (1990), Verteilungsfreie
Methoden in der Biostatistik. Berlin: Springerself.
.. [2] R. G. Miller Jr. (1996), Simultaneous statistical inference. New
York: McGraw-Hill.
.. [3] E. L. Wike (2006), Data Analysis. A Statistical Primer for Psychology
Students. New Brunswick: Aldine Transaction.
Examples
--------
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_miller_friedman(x)
'''
if melted and not all([block_col, group_col, y_col]):
raise ValueError('block_col, group_col, y_col should be explicitly specified if using melted data')
def compare_stats(i, j):
dif = np.abs(R[groups[i]] - R[groups[j]])
qval = dif / np.sqrt(k * (k + 1.) / (6. * n))
return qval
x, y_col, group_col, block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
#if not sort:
# x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
# x[block_col] = Categorical(x[block_col], categories=x[block_col].unique(), ordered=True)
x.sort_values(by=[group_col,block_col], ascending=True, inplace=True)
x.dropna(inplace=True)
groups = x[group_col].unique()
k = groups.size
n = x[block_col].unique().size
x['mat'] = x.groupby(block_col)[y_col].rank()
R = x.groupby(group_col)['mat'].mean()
vs = np.zeros((k, k), dtype=np.float)
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
for i, j in combs:
vs[i, j] = compare_stats(i, j)
vs = vs ** 2
vs = ss.chi2.sf(vs, k - 1)
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_durbin(a, y_col=None, block_col=None, group_col=None, melted=False, sort=False, p_adjust=None):
'''Pairwise post hoc test for multiple comparisons of rank sums according to
Durbin and Conover for a two-way balanced incomplete block design (BIBD). See
references for additional information [1]_, [2]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of block design,
i.e. rows are blocks, and columns are groups. In this case you do
not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (string).
y_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values. See statsmodels.sandbox.stats.multicomp
for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
Pandas DataFrame containing p values.
References
----------
.. [1] W. J. Conover and R. L. Iman (1979), On multiple-comparisons procedures,
Tech. Rep. LA-7677-MS, Los Alamos Scientific Laboratory.
.. [2] W. J. Conover (1999), Practical nonparametric Statistics,
3rd. edition, Wiley.
Examples
--------
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_durbin(x)
'''
if melted and not all([block_col, group_col, y_col]):
raise ValueError('block_col, group_col, y_col should be explicitly specified if using melted data')
def compare_stats(i, j):
dif = np.abs(Rj[groups[i]] - Rj[groups[j]])
tval = dif / denom
pval = 2. * ss.t.sf(np.abs(tval), df = df)
return pval
x, y_col, group_col, block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
if not sort:
x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
x[block_col] = Categorical(x[block_col], categories=x[block_col].unique(), ordered=True)
x.sort_values(by=[block_col, group_col], ascending=True, inplace=True)
x.dropna(inplace=True)
groups = x[group_col].unique()
t = len(groups)
b = x[block_col].unique().size
r = b
k = t
x['y_ranked'] = x.groupby(block_col)[y_col].rank()
Rj = x.groupby(group_col)['y_ranked'].sum()
A = (x['y_ranked'] ** 2).sum()
C = (b * k * (k + 1) ** 2) / 4.
D = (Rj ** 2).sum() - r * C
T1 = (t - 1) / (A - C) * D
denom = np.sqrt(((A - C) * 2 * r) / (b * k - b - t + 1) * (1 - T1 / (b * (k -1))))
df = b * k - b - t + 1
vs = np.zeros((t, t), dtype=np.float)
combs = it.combinations(range(t), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
for i, j in combs:
vs[i, j] = compare_stats(i, j)
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_anderson(a, val_col=None, group_col=None, midrank=True, sort=False, p_adjust=None):
'''Anderson-Darling Pairwise Test for k-samples. Tests the null hypothesis
that k-samples are drawn from the same population without having to specify
the distribution function of that population [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
midrank : bool, optional
Type of Anderson-Darling test which is computed. If set to True (default), the
midrank test applicable to continuous and discrete populations is performed. If
False, the right side empirical distribution is used.
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values. See statsmodels.sandbox.stats.multicomp for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
result : pandas DataFrame
P values.
References
----------
.. [1] F.W. Scholz, M.A. Stephens (1987), K-Sample Anderson-Darling Tests,
Journal of the American Statistical Association, Vol. 82, pp. 918-924.
Examples
--------
>>> x = np.array([[2.9, 3.0, 2.5, 2.6, 3.2], [3.8, 2.7, 4.0, 2.4], [2.8, 3.4, 3.7, 2.2, 2.0]])
>>> sp.posthoc_anderson(x)
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = x[_group_col].unique()
k = groups.size
vs = np.zeros((k, k), dtype=np.float)
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
for i, j in combs:
vs[i, j] = ss.anderson_ksamp([x.loc[x[_group_col] == groups[i], _val_col], x.loc[x[_group_col] == groups[j], _val_col]])[2]
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_quade(a, y_col=None, block_col=None, group_col=None, dist='t', melted=False, sort=False, p_adjust=None):
'''Calculate pairwise comparisons using Quade's post hoc test for
unreplicated blocked data. This test is usually conducted if significant
results were obtained by the omnibus test [1]_, [2]_, [3]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of
block design, i.e. rows are blocks, and columns are groups. In this
case you do not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (string).
y_col : str or int, optional
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
dist : str, optional
Method for determining p values.
The default distribution is "t", else "normal".
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
Pandas DataFrame containing p values.
References
----------
.. [1] W. J. Conover (1999), Practical nonparametric Statistics, 3rd. Edition,
Wiley.
.. [2] N. A. Heckert and J. J. Filliben (2003). NIST Handbook 148: Dataplot
Reference Manual, Volume 2: Let Subcommands and Library Functions.
National Institute of Standards and Technology Handbook Series, June 2003.
.. [3] D. Quade (1979), Using weighted rankings in the analysis of complete
blocks with additive block effects. Journal of the American Statistical
Association, 74, 680-683.
Examples
--------
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_quade(x)
'''
if melted and not all([block_col, group_col, y_col]):
raise ValueError('block_col, group_col, y_col should be explicitly specified if using melted data')
def compare_stats_t(i, j):
dif = np.abs(S[groups[i]] - S[groups[j]])
tval = dif / denom
pval = 2. * ss.t.sf(np.abs(tval), df = (b - 1) * (k - 1))
return pval
def compare_stats_norm(i, j):
dif = np.abs(W[groups[i]] * ff - W[groups[j]] * ff)
zval = dif / denom
pval = 2. * ss.norm.sf(np.abs(zval))
return pval
x, y_col, group_col, block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
if not sort:
x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
x[block_col] = Categorical(x[block_col], categories=x[block_col].unique(), ordered=True)
x.sort_values(by=[block_col, group_col], ascending=True, inplace=True)
x.dropna(inplace=True)
groups = x[group_col].unique()
k = len(groups)
b = x[block_col].unique().size
x['r'] = x.groupby(block_col)[y_col].rank()
q = (x.groupby(block_col)[y_col].max() - x.groupby(block_col)[y_col].min()).rank()
x['rr'] = x['r'] - (k + 1)/2
x['s'] = x.apply(lambda x, y: x['rr'] * y[x['blocks']], axis=1, args=(q,))
x['w'] = x.apply(lambda x, y: x['r'] * y[x['blocks']], axis=1, args=(q,))
A = (x['s'] ** 2).sum()
S = x.groupby(group_col)['s'].sum()
B = np.sum(S ** 2) / b
W = x.groupby(group_col)['w'].sum()
vs = np.zeros((k, k), dtype=np.float)
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
if dist == 't':
denom = np.sqrt((2 * b * (A - B)) / ((b - 1) * (k - 1)))
for i, j in combs:
vs[i, j] = compare_stats_t(i, j)
else:
n = b * k
denom = np.sqrt((k * (k + 1) * (2 * n + 1) * (k-1)) / (18 * n * (n + 1)))
ff = 1. / (b * (b + 1)/2)
for i, j in combs:
vs[i, j] = compare_stats_norm(i, j)
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_mackwolfe(a, val_col, group_col, p=None, n_perm=100, sort=False, p_adjust=None):
'''Mack-Wolfe Test for Umbrella Alternatives.
In dose-finding studies one may assume an increasing treatment effect with
increasing dose level. However, the test subject may actually succumb to
toxic effects at high doses, which leads to decresing treatment
effects [1]_, [2]_.
The scope of the Mack-Wolfe Test is to test for umbrella alternatives for
either a known or unknown point P (i.e. dose-level), where the peak
(umbrella point) is present.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
p : int, optional
The a-priori known peak as an ordinal number of the treatment group
including the zero dose level, i.e. p = {1, ..., k}. Defaults to None.
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
p : float
P value.
stat : float
Statistic.
References
----------
.. [1] Chen, I.Y. (1991) Notes on the Mack-Wolfe and Chen-Wolfe Tests for
Umbrella Alternatives. Biom. J., 33, 281-290.
.. [2] Mack, G.A., Wolfe, D. A. (1981) K-sample rank tests for umbrella
alternatives. J. Amer. Statist. Assoc., 76, 175-181.
Examples
--------
>>> x = np.array([[10,'a'], [59,'a'], [76,'b'], [10, 'b']])
>>> sp.posthoc_mackwolfe(x, val_col = 0, group_col = 1)
'''
x = __convert_to_df(a, val_col, group_col)
if not sort:
x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
x.sort_values(by=[group_col], ascending=True, inplace=True)
k = x[group_col].unique().size
if p:
if p > k:
print("Selected 'p' > number of groups:", str(p), " > ", str(k))
return False
elif p < 1:
print("Selected 'p' < 1: ", str(p))
return False
Rij = x[val_col].rank()
n = x.groupby(group_col)[val_col].count()
def _fn(Ri, Rj):
return np.sum(Ri.apply(lambda x: Rj[Rj > x].size))
def _ustat(Rij, g, k):
levels = np.unique(g)
U = np.identity(k)
for i in range(k):
for j in range(i):
U[i,j] = _fn(Rij[x[group_col] == levels[i]], Rij[x[group_col] == levels[j]])
U[j,i] = _fn(Rij[x[group_col] == levels[j]], Rij[x[group_col] == levels[i]])
return U
def _ap(p, U):
tmp1 = 0
if p > 0:
for i in range(p):
for j in range(i+1, p+1):
tmp1 += U[i,j]
tmp2 = 0
if p < k:
for i in range(p, k):
for j in range(i+1, k):
tmp2 += U[j,i]
return tmp1 + tmp2
def _n1(p, n):
return np.sum(n[:p+1])
def _n2(p, n):
return np.sum(n[p:k])
def _mean_at(p, n):
N1 = _n1(p, n)
N2 = _n2(p, n)
return (N1**2 + N2**2 - np.sum(n**2) - n.iloc[p]**2)/4
def _var_at(p, n):
N1 = _n1(p, n)
N2 = _n2(p, n)
N = np.sum(n)
var = (2 * (N1**3 + N2**3) + 3 * (N1**2 + N2**2) -\
np.sum(n**2 * (2*n + 3)) - n.iloc[p]**2 * (2 * n.iloc[p] + 3) +\
12. * n.iloc[p] * N1 * N2 - 12. * n.iloc[p] ** 2 * N) / 72.
return var
if p:
if (x.groupby(val_col).count() > 1).any().any():
print("Ties are present")
U = _ustat(Rij, x[group_col], k)
est = _ap(p, U)
mean = _mean_at(p, n)
sd = np.sqrt(_var_at(p, n))
stat = (est - mean)/sd
p_value = ss.norm.sf(stat)
else:
U = _ustat(Rij, x[group_col], k)
Ap = np.array([_ap(i, U) for i in range(k)]).ravel()
mean = np.array([_mean_at(i, n) for i in range(k)]).ravel()
var = np.array([_var_at(i, n) for i in range(k)]).ravel()
A = (Ap - mean) / np.sqrt(var)
stat = np.max(A)
p = A == stat
est = None
mt = []
for i in range(n_perm):
ix = Series(np.random.permutation(Rij))
Uix = _ustat(ix, x[group_col], k)
Apix = np.array([_ap(i, Uix) for i in range(k)])
Astarix = (Apix - mean) / np.sqrt(var)
mt.append(np.max(Astarix))
mt = np.array(mt)
p_value = mt[mt > stat] / n_perm
return p_value, stat
def posthoc_vanwaerden(a, val_col=None, group_col=None, sort=True, p_adjust=None):
'''Van der Waerden's test for pairwise multiple comparisons between group
levels. See references for additional information [1]_, [2]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
For one-factorial designs with samples that do not meet the assumptions for
one-way-ANOVA and subsequent post hoc tests, the van der Waerden test using
normal scores can be employed. Provided that significant differences were
detected by this global test, one may be interested in applying post hoc
tests according to van der Waerden for pairwise multiple comparisons of the
group levels.
There is no tie correction applied in this function.
References
----------
.. [1] W. J. Conover and R. L. Iman (1979), On multiple-comparisons procedures,
Tech. Rep. LA-7677-MS, Los Alamos Scientific Laboratory.
.. [2] B. L. van der Waerden (1952) Order tests for the two-sample problem and
their power, Indagationes Mathematicae, 14, 453-458.
Examples
--------
>>> x = np.array([[10,'a'], [59,'a'], [76,'b'], [10, 'b']])
>>> sp.posthoc_vanwaerden(x, val_col = 0, group_col = 1)
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = x[_group_col].unique()
n = x[_val_col].size
k = groups.size
r = ss.rankdata(x[_val_col])
x['z_scores'] = ss.norm.ppf(r / (n + 1))
aj = x.groupby(_group_col)['z_scores'].sum()
nj = x.groupby(_group_col)['z_scores'].count()
s2 = (1. / (n - 1.)) * (x['z_scores'] ** 2.).sum()
sts = (1. / s2) * np.sum(aj ** 2. / nj)
param = k - 1
A = aj / nj
vs = np.zeros((k, k), dtype=np.float)
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
def compare_stats(i, j):
dif = np.abs(A[groups[i]] - A[groups[j]])
B = 1. / nj[groups[i]] + 1. / nj[groups[j]]
tval = dif / np.sqrt(s2 * (n - 1. - sts)/(n - k) * B)
pval = 2. * ss.t.sf(np.abs(tval), df = n - k)
return pval
for i, j in combs:
vs[i, j] = compare_stats(i, j)
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_ttest(a, val_col=None, group_col=None, pool_sd=False, equal_var=True, p_adjust=None, sort=True):
'''Pairwise T test for multiple comparisons of independent groups. May be
used after a parametric ANOVA to do pairwise comparisons.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame. Array must be two-dimensional.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
equal_var : bool, optional
If True (default), perform a standard independent test
that assumes equal population variances [1]_.
If False, perform Welch's t-test, which does not assume equal
population variance [2]_.
pool_sd : bool, optional
Calculate a common SD for all groups and use that for all
comparisons (this can be useful if some groups are small).
This method does not actually call scipy ttest_ind() function,
so extra arguments are ignored. Default is False.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by group_col or not. Recommended
unless you sort your data manually.
Returns
-------
Numpy ndarray or pandas DataFrame of p values depending on input
data type.
References
----------
.. [1] http://en.wikipedia.org/wiki/T-test#Independent_two-sample_t-test
.. [2] http://en.wikipedia.org/wiki/Welch%27s_t_test
Examples
--------
>>> x = [[1,2,3,5,1], [12,31,54, np.nan], [10,12,6,74,11]]
>>> sp.posthoc_ttest(x, p_adjust = 'holm')
array([[-1. , 0.04600899, 0.31269089],
[ 0.04600899, -1. , 0.6327077 ],
[ 0.31269089, 0.6327077 , -1. ]])
'''
if isinstance(a, DataFrame):
x = a.copy()
if not sort:
x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
x.sort_values(by=[group_col, val_col], ascending=True, inplace=True)
x_lens = x.groupby(by=group_col)[val_col].count().values
x_lens_cumsum = np.insert(np.cumsum(x_lens), 0, 0)[:-1]
x_grouped = np.array([x[val_col][j:(j + x_lens[i])] for i, j in enumerate(x_lens_cumsum)])
#x_grouped = [x.loc[v, val_col].values.tolist() for g, v in x.groupby(group_col, sort=False).groups.items()]
else:
x = np.array(a)
x_grouped = np.array([np.asarray(a)[~np.isnan(a)] for a in x])
x_lens = np.asarray([len(a) for a in x_grouped])
x_lens_cumsum = np.insert(np.cumsum(x_lens), 0, 0)[:-1]
if any(x_lens == 0):
raise ValueError("All groups must contain data")
x_len = len(x_grouped)
vs = np.zeros((x_len, x_len), dtype=np.float)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
def compare_pooled(i, j):
diff = x_means[i] - x_means[j]
se_diff = pooled_sd * np.sqrt(1 / x_lens[i] + 1 / x_lens[j])
t_value = diff / se_diff
return 2 * ss.t.cdf(-np.abs(t_value), x_totaldegf)
combs = it.combinations(range(x_len), 2)
if pool_sd:
x_means = np.asarray([np.mean(xi) for xi in x_grouped])
x_sd = np.asarray([np.std(xi, ddof=1) for xi in x_grouped])
x_degf = x_lens - 1
x_totaldegf = np.sum(x_degf)
pooled_sd = np.sqrt(np.sum(x_sd ** 2 * x_degf) / x_totaldegf)
for i, j in combs:
vs[i, j] = compare_pooled(i, j)
else:
for i,j in combs:
vs[i, j] = ss.ttest_ind(x_grouped[i], x_grouped[j], equal_var=equal_var)[1]
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
if isinstance(x, DataFrame):
groups_unique = x[group_col].unique()
return DataFrame(vs, index=groups_unique, columns=groups_unique)
else:
return vs
def posthoc_mannwhitney(a, val_col=None, group_col=None, use_continuity=True, alternative='two-sided', p_adjust=None, sort=True):
'''Pairwise comparisons with Mann-Whitney rank test.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame. Array must be two-dimensional.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
use_continuity : bool, optional
Whether a continuity correction (1/2.) should be taken into account.
Default is True.
alternative : ['two-sided', 'less', or 'greater'], optional
Whether to get the p-value for the one-sided hypothesis
('less' or 'greater') or for the two-sided hypothesis ('two-sided').
Defaults to 'two-sided'.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by group_col or not. Recommended
unless you sort your data manually.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
Refer to `scipy.stats.mannwhitneyu` reference page for further details.
Examples
--------
>>> x = [[1,2,3,4,5], [35,31,75,40,21], [10,6,9,6,1]]
>>> sp.posthoc_mannwhitney(x, p_adjust = 'holm')
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col, _val_col], ascending=True, inplace=True)
groups = np.unique(x[_group_col])
x_len = groups.size
vs = np.zeros((x_len, x_len))
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(x_len), 2)
for i,j in combs:
vs[i, j] = ss.mannwhitneyu(x.loc[x[_group_col] == groups[i], _val_col],
x.loc[x[_group_col] == groups[j], _val_col],
use_continuity=use_continuity,
alternative=alternative)[1]
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_wilcoxon(a, val_col=None, group_col=None, zero_method='wilcox', correction=False, p_adjust=None, sort=False):
'''Pairwise comparisons with Wilcoxon signed-rank test. It is a non-parametric
version of the paired T-test for use with non-parametric ANOVA.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame. Array must be two-dimensional.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
zero_method : string, {"pratt", "wilcox", "zsplit"}, optional
"pratt": Pratt treatment, includes zero-differences in the ranking
process (more conservative)
"wilcox": Wilcox treatment, discards all zero-differences
"zsplit": Zero rank split, just like Pratt, but spliting the zero rank
between positive and negative ones
correction : bool, optional
If True, apply continuity correction by adjusting the Wilcoxon rank
statistic by 0.5 towards the mean value when computing the z-statistic.
Default is False.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by group_col and val_col or not.
Default is False.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
Refer to `scipy.stats.wilcoxon` reference page for further details.
Examples
--------
>>> x = [[1,2,3,4,5], [35,31,75,40,21], [10,6,9,6,1]]
>>> sp.posthoc_wilcoxon(x)
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
#x.sort_values(by=[_group_col, _val_col], ascending=True, inplace=True)
groups = np.unique(x[_group_col])
x_len = groups.size
vs = np.zeros((x_len, x_len))
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(x_len), 2)
for i,j in combs:
vs[i, j] = ss.wilcoxon(x.loc[x[_group_col] == groups[i], _val_col],
x.loc[x[_group_col] == groups[j], _val_col],
zero_method=zero_method, correction=correction)[1]
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method=p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_scheffe(a, val_col=None, group_col=None, sort=False, p_adjust=None):
'''Scheffe's all-pairs comparisons test for normally distributed data with equal
group variances. For all-pairs comparisons in an one-factorial layout with
normally distributed residuals and equal variances Scheffe's test can be
performed with parametric ANOVA [1]_, [2]_, [3]_.
A total of m = k(k-1)/2 hypotheses can be tested.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
The p values are computed from the F-distribution.
References
----------
.. [1] J. Bortz (1993) Statistik für Sozialwissenschaftler. 4. Aufl., Berlin:
Springer.
.. [2] L. Sachs (1997) Angewandte Statistik, New York: Springer.
.. [3] H. Scheffe (1953) A Method for Judging all Contrasts in the Analysis
of Variance. Biometrika 40, 87-110.
Examples
--------
>>> import scikit_posthocs as sp
>>> import pandas as pd
>>> x = pd.DataFrame({"a": [1,2,3,5,1], "b": [12,31,54,62,12], "c": [10,12,6,74,11]})
>>> x = x.melt(var_name='groups', value_name='values')
>>> sp.posthoc_scheffe(x, val_col='values', group_col='groups')
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = np.unique(x[_group_col])
x_grouped = x.groupby(_group_col)[_val_col]
ni = x_grouped.count()
n = ni.sum()
xi = x_grouped.mean()
si = x_grouped.var()
sin = 1. / (n - groups.size) * np.sum(si * (ni - 1.))
def compare(i, j):
dif = xi.loc[i] - xi.loc[j]
A = sin * (1. / ni[i] + 1. / ni[j]) * (groups.size - 1.)
f_val = dif ** 2. / A
return f_val
vs = np.zeros((groups.size, groups.size), dtype=np.float)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(groups.size), 2)
for i,j in combs:
vs[i, j] = compare(groups[i], groups[j])
vs[tri_lower] = vs.T[tri_lower]
p_values = ss.f.sf(vs, groups.size - 1., n - groups.size)
np.fill_diagonal(p_values, -1)
return DataFrame(p_values, index=groups, columns=groups)
def posthoc_tamhane(a, val_col=None, group_col=None, welch=True, sort=False):
'''Tamhane's T2 all-pairs comparison test for normally distributed data with
unequal variances. Tamhane's T2 test can be performed for all-pairs
comparisons in an one-factorial layout with normally distributed residuals
but unequal groups variances. A total of m = k(k-1)/2 hypotheses can be
tested. The null hypothesis is tested in the two-tailed test against the
alternative hypothesis [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
welch : bool, optional
If True, use Welch's approximate solution for calculating the degree of
freedom. T2 test uses the usual df = N - 2 approximation.
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
The p values are computed from the t-distribution and adjusted according to
Dunn-Sidak.
References
----------
.. [1] A.C. Tamhane (1979), A Comparison of Procedures for Multiple Comparisons of
Means with Unequal Variances. Journal of the American Statistical Association,
74, 471-480.
Examples
--------
>>> import scikit_posthocs as sp
>>> import pandas as pd
>>> x = pd.DataFrame({"a": [1,2,3,5,1], "b": [12,31,54,62,12], "c": [10,12,6,74,11]})
>>> x = x.melt(var_name='groups', value_name='values')
>>> sp.posthoc_tamhane(x, val_col='values', group_col='groups')
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = x[_group_col].unique()
x_grouped = x.groupby(_group_col)[_val_col]
ni = x_grouped.count()
n = ni.sum()
xi = x_grouped.mean()
si = x_grouped.var()
sin = 1. / (n - groups.size) * np.sum(si * (ni - 1))
def compare(i, j):
dif = xi[i] - xi[j]
A = si[i] / ni[i] + si[j] / ni[j]
t_val = dif / np.sqrt(A)
if welch:
df = A ** 2. / (si[i] ** 2. / (ni[i] ** 2. * (ni[i] - 1.)) + si[j] ** 2. / (ni[j] ** 2. * (ni[j] - 1.)))
else:
## checks according to Tamhane (1979, p. 474)
ok1 = (9./10. <= ni[i]/ni[j]) and (ni[i]/ni[j] <= 10./9.)
ok2 = (9./10. <= (s2i[i] / ni[i]) / (s2i[j] / ni[j])) and ((s2i[i] / ni[i]) / (s2i[j] / ni[j]) <= 10./9.)
ok3 = (4./5. <= ni[i]/ni[j]) and (ni[i]/ni[j] <= 5./4.) and (1./2. <= (s2i[i] / ni[i]) / (s2i[j] / ni[j])) and ((s2i[i] / ni[i]) / (s2i[j] / ni[j]) <= 2.)
ok4 = (2./3. <= ni[i]/ni[j]) and (ni[i]/ni[j] <= 3./2.) and (3./4. <= (s2i[i] / ni[i]) / (s2i[j] / ni[j])) and ((s2i[i] / ni[i]) / (s2i[j] / ni[j]) <= 4./3.)
OK = any(ok1, ok2, ok3, ok4)
if not OK:
print("Sample sizes or standard errors are not balanced. T2 test is recommended.")
df = ni[i] + ni[j] - 2.
p_val = 2. * ss.t.sf(np.abs(t_val), df=df)
return p_val
vs = np.zeros((groups.size, groups.size), dtype=np.float)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(groups.size), 2)
for i,j in combs:
vs[i, j] = compare(groups[i], groups[j])
vs[tri_upper] = 1. - (1. - vs[tri_upper]) ** groups.size
vs[tri_lower] = vs.T[tri_lower]
vs[vs > 1] = 1
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_tukey(a, val_col = None, group_col = None, sort = False):
'''Performs Tukey's all-pairs comparisons test for normally distributed data
with equal group variances. For all-pairs comparisons in an
one-factorial layout with normally distributed residuals and equal variances
Tukey's test can be performed. A total of m = k(k-1)/2 hypotheses can be
tested. The null hypothesis is tested in the two-tailed test against
the alternative hypothesis [1]_, [2]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
The p values are computed from the Tukey-distribution.
References
----------
.. [1] L. Sachs (1997) Angewandte Statistik, New York: Springer.
.. [2] J. Tukey (1949) Comparing Individual Means in the Analysis of Variance,
Biometrics 5, 99-114.
Examples
--------
>>> import scikit_posthocs as sp
>>> import pandas as pd
>>> x = pd.DataFrame({"a": [1,2,3,5,1], "b": [12,31,54,62,12], "c": [10,12,6,74,11]})
>>> x = x.melt(var_name='groups', value_name='values')
>>> sp.posthoc_tukey(x, val_col='values', group_col='groups')
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = np.unique(x[_group_col])
x_grouped = x.groupby(_group_col)[_val_col]
ni = x_grouped.count()
n = ni.sum()
xi = x_grouped.mean()
si = x_grouped.var()
sin = 1. / (n - groups.size) * np.sum(si * (ni - 1))
def compare(i, j):
dif = xi[i] - xi[j]
A = sin * 0.5 * (1. / ni.loc[i] + 1. / ni.loc[j])
q_val = dif / np.sqrt(A)
return q_val
vs = np.zeros((groups.size, groups.size), dtype=np.float)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(groups.size), 2)
for i,j in combs:
vs[i, j] = compare(groups[i], groups[j])
vs[tri_upper] = psturng(np.abs(vs[tri_upper]), groups.size, n - groups.size)
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_dscf(a, val_col=None, group_col=None, sort=False):
'''Dwass, Steel, Critchlow and Fligner all-pairs comparison test for a
one-factorial layout with non-normally distributed residuals. As opposed to
the all-pairs comparison procedures that depend on Kruskal ranks, the DSCF
test is basically an extension of the U-test as re-ranking is conducted for
each pairwise test [1]_, [2]_, [3]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
Pandas DataFrame containing p values.
Notes
-----
The p values are computed from the Tukey-distribution.
References
----------
.. [1] Douglas, C. E., Fligner, A. M. (1991) On distribution-free multiple
comparisons in the one-way analysis of variance, Communications in
Statistics - Theory and Methods, 20, 127-139.
.. [2] Dwass, M. (1960) Some k-sample rank-order tests. In Contributions to
Probability and Statistics, Edited by: I. Olkin, Stanford: Stanford
University Press.
.. [3] Steel, R. G. D. (1960) A rank sum test for comparing all pairs of
treatments, Technometrics, 2, 197-207.
Examples
--------
>>> import scikit_posthocs as sp
>>> import pandas as pd
>>> x = pd.DataFrame({"a": [1,2,3,5,1], "b": [12,31,54,62,12], "c": [10,12,6,74,11]})
>>> x = x.melt(var_name='groups', value_name='values')
>>> sp.posthoc_dscf(x, val_col='values', group_col='groups')
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = np.unique(x[_group_col])
x_grouped = x.groupby(_group_col)[_val_col]
n = x_grouped.count()
k = groups.size
def get_ties(x):
t = x.value_counts().values
c = np.sum((t ** 3 - t) / 12.)
return c
def compare(i, j):
ni = n.loc[i]
nj = n.loc[j]
x_raw = x.loc[(x[_group_col] == i) | (x[_group_col] == j)].copy()
x_raw['ranks'] = x_raw.loc[:, _val_col].rank()
r = x_raw.groupby(_group_col)['ranks'].sum().loc[[i, j]]
u = np.array([nj * ni + (nj * (nj + 1) / 2),
nj * ni + (ni * (ni + 1) / 2)]) - r
u_min = np.min(u)
s = ni + nj
var = (nj*ni/(s*(s - 1.))) * ((s**3 - s)/12. - get_ties(x_raw['ranks']))
p = np.sqrt(2.) * (u_min - nj * ni / 2.) / np.sqrt(var)
return p
vs = np.zeros((k, k))
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(k), 2)
for i, j in combs:
vs[i, j] = compare(groups[i], groups[j])
vs[tri_upper] = psturng(np.abs(vs[tri_upper]), k, np.inf)
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
|
maximtrp/scikit-posthocs | scikit_posthocs/_posthocs.py | posthoc_mannwhitney | python | def posthoc_mannwhitney(a, val_col=None, group_col=None, use_continuity=True, alternative='two-sided', p_adjust=None, sort=True):
'''Pairwise comparisons with Mann-Whitney rank test.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame. Array must be two-dimensional.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
use_continuity : bool, optional
Whether a continuity correction (1/2.) should be taken into account.
Default is True.
alternative : ['two-sided', 'less', or 'greater'], optional
Whether to get the p-value for the one-sided hypothesis
('less' or 'greater') or for the two-sided hypothesis ('two-sided').
Defaults to 'two-sided'.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by group_col or not. Recommended
unless you sort your data manually.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
Refer to `scipy.stats.mannwhitneyu` reference page for further details.
Examples
--------
>>> x = [[1,2,3,4,5], [35,31,75,40,21], [10,6,9,6,1]]
>>> sp.posthoc_mannwhitney(x, p_adjust = 'holm')
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col, _val_col], ascending=True, inplace=True)
groups = np.unique(x[_group_col])
x_len = groups.size
vs = np.zeros((x_len, x_len))
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(x_len), 2)
for i,j in combs:
vs[i, j] = ss.mannwhitneyu(x.loc[x[_group_col] == groups[i], _val_col],
x.loc[x[_group_col] == groups[j], _val_col],
use_continuity=use_continuity,
alternative=alternative)[1]
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups) | Pairwise comparisons with Mann-Whitney rank test.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame. Array must be two-dimensional.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
use_continuity : bool, optional
Whether a continuity correction (1/2.) should be taken into account.
Default is True.
alternative : ['two-sided', 'less', or 'greater'], optional
Whether to get the p-value for the one-sided hypothesis
('less' or 'greater') or for the two-sided hypothesis ('two-sided').
Defaults to 'two-sided'.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by group_col or not. Recommended
unless you sort your data manually.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
Refer to `scipy.stats.mannwhitneyu` reference page for further details.
Examples
--------
>>> x = [[1,2,3,4,5], [35,31,75,40,21], [10,6,9,6,1]]
>>> sp.posthoc_mannwhitney(x, p_adjust = 'holm') | train | https://github.com/maximtrp/scikit-posthocs/blob/5476b09e2a325cd4e31c0b0bc6906ab5cd77fc5d/scikit_posthocs/_posthocs.py#L1900-L1991 | [
"def __convert_to_df(a, val_col=None, group_col=None, val_id=None, group_id=None):\n\n '''Hidden helper method to create a DataFrame with input data for further\n processing.\n\n Parameters\n ----------\n a : array_like or pandas DataFrame object\n An array, any object exposing the array inter... | # -*- coding: utf-8 -*-
import numpy as np
import scipy.stats as ss
import itertools as it
from statsmodels.sandbox.stats.multicomp import multipletests
from statsmodels.stats.multicomp import pairwise_tukeyhsd
from statsmodels.stats.libqsturng import psturng
from pandas import DataFrame, Categorical, Series
def __convert_to_df(a, val_col=None, group_col=None, val_id=None, group_id=None):
'''Hidden helper method to create a DataFrame with input data for further
processing.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas DataFrame.
Array must be two-dimensional. Second dimension may vary,
i.e. groups may have different lengths.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
val_id : int, optional
Index of a column that contains dependent variable values (test or
response variable). Should be specified if a NumPy ndarray is used as an
input. It will be inferred from data, if not specified.
group_id : int, optional
Index of a column that contains independent variable values (grouping or
predictor variable). Should be specified if a NumPy ndarray is used as
an input. It will be inferred from data, if not specified.
Returns
-------
x : pandas DataFrame
DataFrame with input data, `val_col` column contains numerical values and
`group_col` column contains categorical values.
val_col : str
Name of a DataFrame column that contains dependent variable values (test
or response variable).
group_col : str
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable).
Notes
-----
Inferrence algorithm for determining `val_id` and `group_id` args is rather
simple, so it is better to specify them explicitly to prevent errors.
'''
if not group_col:
group_col = 'groups'
if not val_col:
val_col = 'vals'
if isinstance(a, DataFrame):
x = a.copy()
if not {group_col, val_col}.issubset(a.columns):
raise ValueError('Specify correct column names using `group_col` and `val_col` args')
return x, val_col, group_col
elif isinstance(a, list) or (isinstance(a, np.ndarray) and not a.shape.count(2)):
grps_len = map(len, a)
grps = list(it.chain(*[[i+1] * l for i, l in enumerate(grps_len)]))
vals = list(it.chain(*a))
return DataFrame({val_col: vals, group_col: grps}), val_col, group_col
elif isinstance(a, np.ndarray):
# cols ids not defined
# trying to infer
if not(all([val_id, group_id])):
if np.argmax(a.shape):
a = a.T
ax = [np.unique(a[:, 0]).size, np.unique(a[:, 1]).size]
if np.asscalar(np.diff(ax)):
__val_col = np.argmax(ax)
__group_col = np.argmin(ax)
else:
raise ValueError('Cannot infer input format.\nPlease specify `val_id` and `group_id` args')
cols = {__val_col: val_col,
__group_col: group_col}
else:
cols = {val_id: val_col,
group_id: group_col}
cols_vals = dict(sorted(cols.items())).values()
return DataFrame(a, columns=cols_vals), val_col, group_col
def __convert_to_block_df(a, y_col=None, group_col=None, block_col=None, melted=False):
if isinstance(a, DataFrame) and not melted:
x = a.copy(deep=True)
group_col = 'groups'
block_col = 'blocks'
y_col = 'y'
x.columns.name = group_col
x.index.name = block_col
x = x.reset_index().melt(id_vars=block_col, var_name=group_col, value_name=y_col)
elif melted:
x = DataFrame.from_dict({'groups': a[group_col],
'blocks': a[block_col],
'y': a[y_col]})
elif not isinstance(a, DataFrame):
x = np.array(a)
x = DataFrame(x, index=np.arange(x.shape[0]), columns=np.arange(x.shape[1]))
if not melted:
group_col = 'groups'
block_col = 'blocks'
y_col = 'y'
x.columns.name = group_col
x.index.name = block_col
x = x.reset_index().melt(id_vars=block_col, var_name=group_col, value_name=y_col)
else:
x.columns[group_col] = 'groups'
x.columns[block_col] = 'blocks'
x.columns[y_col] = 'y'
group_col = 'groups'
block_col = 'blocks'
y_col = 'y'
return x, 'y', 'groups', 'blocks'
def posthoc_conover(a, val_col=None, group_col=None, p_adjust=None, sort=True):
'''Post hoc pairwise test for multiple comparisons of mean rank sums
(Conover's test). May be used after Kruskal-Wallis one-way analysis of
variance by ranks to do pairwise comparisons [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas DataFrame.
Array must be two-dimensional. Second dimension may vary,
i.e. groups may have different lengths.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
p_adjust : str, optional
Method for adjusting p values. See `statsmodels.sandbox.stats.multicomp`
for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by `group_col` or not. Recommended
unless you sort your data manually.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
A tie correction are employed according to Conover [1]_.
References
----------
.. [1] W. J. Conover and R. L. Iman (1979), On multiple-comparisons procedures,
Tech. Rep. LA-7677-MS, Los Alamos Scientific Laboratory.
Examples
--------
>>> x = [[1,2,3,5,1], [12,31,54, np.nan], [10,12,6,74,11]]
>>> sp.posthoc_conover(x, p_adjust = 'holm')
'''
def compare_conover(i, j):
diff = np.abs(x_ranks_avg.loc[i] - x_ranks_avg.loc[j])
B = (1. / x_lens.loc[i] + 1. / x_lens.loc[j])
D = (n - 1. - H_cor) / (n - x_len)
t_value = diff / np.sqrt(S2 * B * D)
p_value = 2. * ss.t.sf(np.abs(t_value), df = n - x_len)
return p_value
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col, _val_col], ascending=True, inplace=True)
n = len(x.index)
x_groups_unique = np.unique(x[_group_col])
x_len = x_groups_unique.size
x_lens = x.groupby(_group_col)[_val_col].count()
x['ranks'] = x[_val_col].rank()
x_ranks_avg = x.groupby(_group_col)['ranks'].mean()
x_ranks_sum = x.groupby(_group_col)['ranks'].sum()
# ties
vals = x.groupby('ranks').count()[_val_col].values
tie_sum = np.sum(vals[vals != 1] ** 3 - vals[vals != 1])
tie_sum = 0 if not tie_sum else tie_sum
x_ties = np.min([1., 1. - tie_sum / (n ** 3. - n)])
H = (12. / (n * (n + 1.))) * np.sum(x_ranks_sum**2 / x_lens) - 3. * (n + 1.)
H_cor = H / x_ties
if x_ties == 1:
S2 = n * (n + 1.) / 12.
else:
S2 = (1. / (n - 1.)) * (np.sum(x['ranks'] ** 2.) - (n * (((n + 1.)**2.) / 4.)))
vs = np.zeros((x_len, x_len))
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(x_len), 2)
for i, j in combs:
vs[i, j] = compare_conover(x_groups_unique[i], x_groups_unique[j])
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=x_groups_unique, columns=x_groups_unique)
def posthoc_dunn(a, val_col=None, group_col=None, p_adjust=None, sort=True):
'''Post hoc pairwise test for multiple comparisons of mean rank sums
(Dunn's test). May be used after Kruskal-Wallis one-way analysis of
variance by ranks to do pairwise comparisons [1]_, [2]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas DataFrame.
Array must be two-dimensional. Second dimension may vary,
i.e. groups may have different lengths.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
p_adjust : str, optional
Method for adjusting p values. See `statsmodels.sandbox.stats.multicomp`
for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by group_col or not. Recommended
unless you sort your data manually.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
A tie correction will be employed according to Glantz (2012).
References
----------
.. [1] O.J. Dunn (1964). Multiple comparisons using rank sums.
Technometrics, 6, 241-252.
.. [2] S.A. Glantz (2012), Primer of Biostatistics. New York: McGraw Hill.
Examples
--------
>>> x = [[1,2,3,5,1], [12,31,54, np.nan], [10,12,6,74,11]]
>>> sp.posthoc_dunn(x, p_adjust = 'holm')
'''
def compare_dunn(i, j):
diff = np.abs(x_ranks_avg.loc[i] - x_ranks_avg.loc[j])
A = n * (n + 1.) / 12.
B = (1. / x_lens.loc[i] + 1. / x_lens.loc[j])
z_value = diff / np.sqrt((A - x_ties) * B)
p_value = 2. * ss.norm.sf(np.abs(z_value))
return p_value
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col, _val_col], ascending=True, inplace=True)
n = len(x.index)
x_groups_unique = np.unique(x[_group_col])
x_len = x_groups_unique.size
x_lens = x.groupby(_group_col)[_val_col].count()
x['ranks'] = x[_val_col].rank()
x_ranks_avg = x.groupby(_group_col)['ranks'].mean()
# ties
vals = x.groupby('ranks').count()[_val_col].values
tie_sum = np.sum(vals[vals != 1] ** 3 - vals[vals != 1])
tie_sum = 0 if not tie_sum else tie_sum
x_ties = tie_sum / (12. * (n - 1))
vs = np.zeros((x_len, x_len))
combs = it.combinations(range(x_len), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
for i,j in combs:
vs[i, j] = compare_dunn(x_groups_unique[i], x_groups_unique[j])
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=x_groups_unique, columns=x_groups_unique)
def posthoc_nemenyi(a, val_col=None, group_col=None, dist='chi', sort=True):
'''Post hoc pairwise test for multiple comparisons of mean rank sums
(Nemenyi's test). May be used after Kruskal-Wallis one-way analysis of
variance by ranks to do pairwise comparisons [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame. Array must be two-dimensional. Second dimension may vary,
i.e. groups may have different lengths.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
dist : str, optional
Method for determining the p value. The default distribution is "chi"
(chi-squared), else "tukey" (studentized range).
sort : bool, optional
Specifies whether to sort DataFrame by group_col or not. Recommended
unless you sort your data manually.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
A tie correction will be employed according to Glantz (2012).
References
----------
.. [1] Lothar Sachs (1997), Angewandte Statistik. Berlin: Springer.
Pages: 395-397, 662-664.
Examples
--------
>>> x = [[1,2,3,5,1], [12,31,54, np.nan], [10,12,6,74,11]]
>>> sp.posthoc_nemenyi(x)
'''
def compare_stats_chi(i, j):
diff = np.abs(x_ranks_avg.loc[i] - x_ranks_avg.loc[j])
A = n * (n + 1.) / 12.
B = (1. / x_lens.loc[i] + 1. / x_lens.loc[j])
chi = diff ** 2. / (A * B)
return chi
def compare_stats_tukey(i, j):
diff = np.abs(x_ranks_avg.loc[i] - x_ranks_avg.loc[j])
B = (1. / x_lens.loc[i] + 1. / x_lens.loc[j])
q = diff / np.sqrt((n * (n + 1.) / 12.) * B)
return q
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col, _val_col], ascending=True, inplace=True)
n = len(x.index)
x_groups_unique = np.unique(x[_group_col])
x_len = x_groups_unique.size
x_lens = x.groupby(_group_col)[_val_col].count()
x['ranks'] = x[_val_col].rank()
x_ranks_avg = x.groupby(_group_col)['ranks'].mean()
# ties
vals = x.groupby('ranks').count()[_val_col].values
tie_sum = np.sum(vals[vals != 1] ** 3 - vals[vals != 1])
tie_sum = 0 if not tie_sum else tie_sum
x_ties = np.min([1., 1. - tie_sum / (n ** 3. - n)])
vs = np.zeros((x_len, x_len))
combs = it.combinations(range(x_len), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
if dist == 'chi':
for i,j in combs:
vs[i, j] = compare_stats_chi(x_groups_unique[i], x_groups_unique[j]) / x_ties
vs[tri_upper] = ss.chi2.sf(vs[tri_upper], x_len - 1)
elif dist == 'tukey':
for i,j in combs:
vs[i, j] = compare_stats_tukey(x_groups_unique[i], x_groups_unique[j]) * np.sqrt(2.)
vs[tri_upper] = psturng(vs[tri_upper], x_len, np.inf)
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=x_groups_unique, columns=x_groups_unique)
def posthoc_nemenyi_friedman(a, y_col=None, block_col=None, group_col=None, melted=False, sort=False):
'''Calculate pairwise comparisons using Nemenyi post hoc test for
unreplicated blocked data. This test is usually conducted post hoc if
significant results of the Friedman's test are obtained. The statistics
refer to upper quantiles of the studentized range distribution (Tukey) [1]_,
[2]_, [3]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of
block design, i.e. rows are blocks, and columns are groups. In this
case you do not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (strings).
y_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
A one-way ANOVA with repeated measures that is also referred to as ANOVA
with unreplicated block design can also be conducted via Friedman's
test. The consequent post hoc pairwise multiple comparison test
according to Nemenyi is conducted with this function.
This function does not test for ties.
References
----------
.. [1] J. Demsar (2006), Statistical comparisons of classifiers over
multiple data sets, Journal of Machine Learning Research, 7, 1-30.
.. [2] P. Nemenyi (1963) Distribution-free Multiple Comparisons. Ph.D.
thesis, Princeton University.
.. [3] L. Sachs (1997), Angewandte Statistik. Berlin: Springer.
Pages: 668-675.
Examples
--------
>>> # Non-melted case, x is a block design matrix, i.e. rows are blocks
>>> # and columns are groups.
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_nemenyi_friedman(x)
'''
if melted and not all([block_col, group_col, y_col]):
raise ValueError('block_col, group_col, y_col should be explicitly specified if using melted data')
def compare_stats(i, j):
dif = np.abs(R[groups[i]] - R[groups[j]])
qval = dif / np.sqrt(k * (k + 1.) / (6. * n))
return qval
x, _y_col, _group_col, _block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
#if not sort:
# x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
# x[block_col] = Categorical(x[block_col], categories=x[block_col].unique(), ordered=True)
x.sort_values(by=[_group_col, _block_col], ascending=True, inplace=True)
x.dropna(inplace=True)
groups = x[_group_col].unique()
k = groups.size
n = x[_block_col].unique().size
x['mat'] = x.groupby(_block_col)[_y_col].rank()
R = x.groupby(_group_col)['mat'].mean()
vs = np.zeros((k, k))
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
for i, j in combs:
vs[i, j] = compare_stats(i, j)
vs *= np.sqrt(2.)
vs[tri_upper] = psturng(vs[tri_upper], k, np.inf)
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_conover_friedman(a, y_col=None, block_col=None, group_col=None, melted=False, sort=False, p_adjust=None):
'''Calculate pairwise comparisons using Conover post hoc test for unreplicated
blocked data. This test is usually conducted post hoc after
significant results of the Friedman test. The statistics refer to
the Student t distribution [1]_, [2]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of
block design, i.e. rows are blocks, and columns are groups. In this
case you do not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (strings).
y_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values. See statsmodels.sandbox.stats.multicomp
for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
'single-step' : uses Tukey distribution for multiple comparisons
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
A one-way ANOVA with repeated measures that is also referred to as ANOVA
with unreplicated block design can also be conducted via the
friedman.test. The consequent post hoc pairwise multiple comparison test
according to Conover is conducted with this function.
If y is a matrix, than the columns refer to the treatment and the rows
indicate the block.
References
----------
.. [1] W. J. Conover and R. L. Iman (1979), On multiple-comparisons procedures,
Tech. Rep. LA-7677-MS, Los Alamos Scientific Laboratory.
.. [2] W. J. Conover (1999), Practical nonparametric Statistics, 3rd. Edition,
Wiley.
Examples
--------
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_conover_friedman(x)
'''
if melted and not all([block_col, group_col, y_col]):
raise ValueError('block_col, group_col, y_col should be explicitly specified if using melted data')
def compare_stats(i, j):
dif = np.abs(R.loc[groups[i]] - R.loc[groups[j]])
tval = dif / np.sqrt(A) / np.sqrt(B)
pval = 2. * ss.t.sf(np.abs(tval), df = (m*n*k - k - n + 1))
return pval
def compare_tukey(i, j):
dif = np.abs(R.loc[groups[i]] - R.loc[groups[j]])
qval = np.sqrt(2.) * dif / (np.sqrt(A) * np.sqrt(B))
pval = psturng(qval, k, np.inf)
return pval
x, _y_col, _group_col, _block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
#if not sort:
# x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
# x[block_col] = Categorical(x[block_col], categories=x[block_col].unique(), ordered=True)
x.sort_values(by=[_group_col,_block_col], ascending=True, inplace=True)
x.dropna(inplace=True)
groups = np.unique(x[_group_col])
k = groups.size
n = x[_block_col].unique().size
x['mat'] = x.groupby(_block_col)[_y_col].rank()
R = x.groupby(_group_col)['mat'].sum()
A1 = (x['mat'] ** 2).sum()
m = 1
S2 = m/(m*k - 1.) * (A1 - m*k*n*(m*k + 1.)**2./4.)
T2 = 1 / S2 * (np.sum(R) - n * m * ((m * k + 1.) / 2.)**2.)
A = S2 * (2. * n * (m * k - 1.)) / ( m * n * k - k - n + 1.)
B = 1. - T2 / (n * (m * k - 1.))
vs = np.zeros((k, k))
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
if p_adjust == 'single-step':
for i, j in combs:
vs[i, j] = compare_tukey(i, j)
else:
for i, j in combs:
vs[i, j] = compare_stats(i, j)
if p_adjust is not None:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_npm_test(a, val_col=None, group_col=None, sort=False, p_adjust=None):
'''Calculate pairwise comparisons using Nashimoto and Wright's all-pairs
comparison procedure (NPM test) for simply ordered mean ranksums.
NPM test is basically an extension of Nemenyi's procedure for testing
increasingly ordered alternatives [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values. See `statsmodels.sandbox.stats.multicomp`
for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
The p values are estimated from the studentized range distribution. If
the medians are already increasingly ordered, than the NPM-test
simplifies to the ordinary Nemenyi test
References
----------
.. [1] Nashimoto, K., Wright, F.T., (2005), Multiple comparison procedures for
detecting differences in simply ordered means. Comput. Statist. Data
Anal. 48, 291--306.
Examples
--------
>>> x = np.array([[102,109,114,120,124],
[110,112,123,130,145],
[132,141,156,160,172]])
>>> sp.posthoc_npm_test(x)
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
#if not sort:
# x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = np.unique(x[_group_col])
x['ranks'] = x[_val_col].rank()
Ri = x.groupby(_group_col)['ranks'].mean()
ni = x.groupby(_group_col)[_val_col].count()
k = groups.size
n = x.shape[0]
sigma = np.sqrt(n * (n + 1) / 12.)
df = np.inf
def compare(m, u):
a = [(Ri.loc[groups[u]]-Ri.loc[groups[_mi]])/(sigma/np.sqrt(2)*np.sqrt(1./ni.loc[groups[_mi]] + 1./ni.loc[groups[u]])) for _mi in m]
return np.array(a)
stat = np.zeros((k, k))
for i in range(k-1):
for j in range(i+1, k):
u = j
m = np.arange(i, u)
tmp = compare(m, u)
stat[j, i] = np.max(tmp)
stat[stat < 0] = 0
p_values = psturng(stat, k, np.inf)
tri_upper = np.triu_indices(p_values.shape[0], 1)
tri_lower = np.tril_indices(p_values.shape[0], -1)
p_values[tri_lower] = p_values.T[tri_lower]
#if p_adjust:
# p_values[tri_upper] = multipletests(p_values[tri_upper], method = p_adjust)[1]
np.fill_diagonal(p_values, -1)
return DataFrame(p_values, index=groups, columns=groups)
def posthoc_siegel_friedman(a, y_col=None, block_col=None, group_col=None, melted=False, sort=False, p_adjust=None):
'''Siegel and Castellan's All-Pairs Comparisons Test for Unreplicated Blocked
Data. See authors' paper for additional information [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of
block design, i.e. rows are blocks, and columns are groups. In this
case you do not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (strings).
y_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values. See statsmodels.sandbox.stats.multicomp for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
For all-pairs comparisons in a two factorial unreplicated complete block design
with non-normally distributed residuals, Siegel and Castellan's test can be
performed on Friedman-type ranked data.
References
----------
.. [1] S. Siegel, N. J. Castellan Jr. (1988), Nonparametric Statistics for the
Behavioral Sciences. 2nd ed. New York: McGraw-Hill.
Examples
--------
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_siegel_friedman(x)
'''
if melted and not all([block_col, group_col, y_col]):
raise ValueError('block_col, group_col, y_col should be explicitly specified if using melted data')
def compare_stats(i, j):
dif = np.abs(R[groups[i]] - R[groups[j]])
zval = dif / np.sqrt(k * (k + 1.) / (6. * n))
return zval
x, y_col, group_col, block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
#if not sort:
# x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
# x[block_col] = Categorical(x[block_col], categories=x[block_col].unique(), ordered=True)
x.sort_values(by=[group_col,block_col], ascending=True, inplace=True)
x.dropna(inplace=True)
groups = x[group_col].unique()
k = groups.size
n = x[block_col].unique().size
x['mat'] = x.groupby(block_col)[y_col].rank()
R = x.groupby(group_col)['mat'].mean()
vs = np.zeros((k, k), dtype=np.float)
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
for i, j in combs:
vs[i, j] = compare_stats(i, j)
vs = 2. * ss.norm.sf(np.abs(vs))
vs[vs > 1] = 1.
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_miller_friedman(a, y_col=None, block_col=None, group_col=None, melted=False, sort=False):
'''Miller's All-Pairs Comparisons Test for Unreplicated Blocked Data.
The p-values are computed from the chi-square distribution [1]_, [2]_,
[3]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of
block design, i.e. rows are blocks, and columns are groups. In this
case you do not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (strings).
y_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
Pandas DataFrame containing p values.
Notes
-----
For all-pairs comparisons in a two factorial unreplicated complete block
design with non-normally distributed residuals, Miller's test can be
performed on Friedman-type ranked data.
References
----------
.. [1] J. Bortz J, G. A. Lienert, K. Boehnke (1990), Verteilungsfreie
Methoden in der Biostatistik. Berlin: Springerself.
.. [2] R. G. Miller Jr. (1996), Simultaneous statistical inference. New
York: McGraw-Hill.
.. [3] E. L. Wike (2006), Data Analysis. A Statistical Primer for Psychology
Students. New Brunswick: Aldine Transaction.
Examples
--------
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_miller_friedman(x)
'''
if melted and not all([block_col, group_col, y_col]):
raise ValueError('block_col, group_col, y_col should be explicitly specified if using melted data')
def compare_stats(i, j):
dif = np.abs(R[groups[i]] - R[groups[j]])
qval = dif / np.sqrt(k * (k + 1.) / (6. * n))
return qval
x, y_col, group_col, block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
#if not sort:
# x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
# x[block_col] = Categorical(x[block_col], categories=x[block_col].unique(), ordered=True)
x.sort_values(by=[group_col,block_col], ascending=True, inplace=True)
x.dropna(inplace=True)
groups = x[group_col].unique()
k = groups.size
n = x[block_col].unique().size
x['mat'] = x.groupby(block_col)[y_col].rank()
R = x.groupby(group_col)['mat'].mean()
vs = np.zeros((k, k), dtype=np.float)
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
for i, j in combs:
vs[i, j] = compare_stats(i, j)
vs = vs ** 2
vs = ss.chi2.sf(vs, k - 1)
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_durbin(a, y_col=None, block_col=None, group_col=None, melted=False, sort=False, p_adjust=None):
'''Pairwise post hoc test for multiple comparisons of rank sums according to
Durbin and Conover for a two-way balanced incomplete block design (BIBD). See
references for additional information [1]_, [2]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of block design,
i.e. rows are blocks, and columns are groups. In this case you do
not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (string).
y_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values. See statsmodels.sandbox.stats.multicomp
for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
Pandas DataFrame containing p values.
References
----------
.. [1] W. J. Conover and R. L. Iman (1979), On multiple-comparisons procedures,
Tech. Rep. LA-7677-MS, Los Alamos Scientific Laboratory.
.. [2] W. J. Conover (1999), Practical nonparametric Statistics,
3rd. edition, Wiley.
Examples
--------
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_durbin(x)
'''
if melted and not all([block_col, group_col, y_col]):
raise ValueError('block_col, group_col, y_col should be explicitly specified if using melted data')
def compare_stats(i, j):
dif = np.abs(Rj[groups[i]] - Rj[groups[j]])
tval = dif / denom
pval = 2. * ss.t.sf(np.abs(tval), df = df)
return pval
x, y_col, group_col, block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
if not sort:
x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
x[block_col] = Categorical(x[block_col], categories=x[block_col].unique(), ordered=True)
x.sort_values(by=[block_col, group_col], ascending=True, inplace=True)
x.dropna(inplace=True)
groups = x[group_col].unique()
t = len(groups)
b = x[block_col].unique().size
r = b
k = t
x['y_ranked'] = x.groupby(block_col)[y_col].rank()
Rj = x.groupby(group_col)['y_ranked'].sum()
A = (x['y_ranked'] ** 2).sum()
C = (b * k * (k + 1) ** 2) / 4.
D = (Rj ** 2).sum() - r * C
T1 = (t - 1) / (A - C) * D
denom = np.sqrt(((A - C) * 2 * r) / (b * k - b - t + 1) * (1 - T1 / (b * (k -1))))
df = b * k - b - t + 1
vs = np.zeros((t, t), dtype=np.float)
combs = it.combinations(range(t), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
for i, j in combs:
vs[i, j] = compare_stats(i, j)
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_anderson(a, val_col=None, group_col=None, midrank=True, sort=False, p_adjust=None):
'''Anderson-Darling Pairwise Test for k-samples. Tests the null hypothesis
that k-samples are drawn from the same population without having to specify
the distribution function of that population [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
midrank : bool, optional
Type of Anderson-Darling test which is computed. If set to True (default), the
midrank test applicable to continuous and discrete populations is performed. If
False, the right side empirical distribution is used.
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values. See statsmodels.sandbox.stats.multicomp for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
result : pandas DataFrame
P values.
References
----------
.. [1] F.W. Scholz, M.A. Stephens (1987), K-Sample Anderson-Darling Tests,
Journal of the American Statistical Association, Vol. 82, pp. 918-924.
Examples
--------
>>> x = np.array([[2.9, 3.0, 2.5, 2.6, 3.2], [3.8, 2.7, 4.0, 2.4], [2.8, 3.4, 3.7, 2.2, 2.0]])
>>> sp.posthoc_anderson(x)
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = x[_group_col].unique()
k = groups.size
vs = np.zeros((k, k), dtype=np.float)
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
for i, j in combs:
vs[i, j] = ss.anderson_ksamp([x.loc[x[_group_col] == groups[i], _val_col], x.loc[x[_group_col] == groups[j], _val_col]])[2]
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_quade(a, y_col=None, block_col=None, group_col=None, dist='t', melted=False, sort=False, p_adjust=None):
'''Calculate pairwise comparisons using Quade's post hoc test for
unreplicated blocked data. This test is usually conducted if significant
results were obtained by the omnibus test [1]_, [2]_, [3]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of
block design, i.e. rows are blocks, and columns are groups. In this
case you do not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (string).
y_col : str or int, optional
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
dist : str, optional
Method for determining p values.
The default distribution is "t", else "normal".
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
Pandas DataFrame containing p values.
References
----------
.. [1] W. J. Conover (1999), Practical nonparametric Statistics, 3rd. Edition,
Wiley.
.. [2] N. A. Heckert and J. J. Filliben (2003). NIST Handbook 148: Dataplot
Reference Manual, Volume 2: Let Subcommands and Library Functions.
National Institute of Standards and Technology Handbook Series, June 2003.
.. [3] D. Quade (1979), Using weighted rankings in the analysis of complete
blocks with additive block effects. Journal of the American Statistical
Association, 74, 680-683.
Examples
--------
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_quade(x)
'''
if melted and not all([block_col, group_col, y_col]):
raise ValueError('block_col, group_col, y_col should be explicitly specified if using melted data')
def compare_stats_t(i, j):
dif = np.abs(S[groups[i]] - S[groups[j]])
tval = dif / denom
pval = 2. * ss.t.sf(np.abs(tval), df = (b - 1) * (k - 1))
return pval
def compare_stats_norm(i, j):
dif = np.abs(W[groups[i]] * ff - W[groups[j]] * ff)
zval = dif / denom
pval = 2. * ss.norm.sf(np.abs(zval))
return pval
x, y_col, group_col, block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
if not sort:
x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
x[block_col] = Categorical(x[block_col], categories=x[block_col].unique(), ordered=True)
x.sort_values(by=[block_col, group_col], ascending=True, inplace=True)
x.dropna(inplace=True)
groups = x[group_col].unique()
k = len(groups)
b = x[block_col].unique().size
x['r'] = x.groupby(block_col)[y_col].rank()
q = (x.groupby(block_col)[y_col].max() - x.groupby(block_col)[y_col].min()).rank()
x['rr'] = x['r'] - (k + 1)/2
x['s'] = x.apply(lambda x, y: x['rr'] * y[x['blocks']], axis=1, args=(q,))
x['w'] = x.apply(lambda x, y: x['r'] * y[x['blocks']], axis=1, args=(q,))
A = (x['s'] ** 2).sum()
S = x.groupby(group_col)['s'].sum()
B = np.sum(S ** 2) / b
W = x.groupby(group_col)['w'].sum()
vs = np.zeros((k, k), dtype=np.float)
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
if dist == 't':
denom = np.sqrt((2 * b * (A - B)) / ((b - 1) * (k - 1)))
for i, j in combs:
vs[i, j] = compare_stats_t(i, j)
else:
n = b * k
denom = np.sqrt((k * (k + 1) * (2 * n + 1) * (k-1)) / (18 * n * (n + 1)))
ff = 1. / (b * (b + 1)/2)
for i, j in combs:
vs[i, j] = compare_stats_norm(i, j)
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_mackwolfe(a, val_col, group_col, p=None, n_perm=100, sort=False, p_adjust=None):
'''Mack-Wolfe Test for Umbrella Alternatives.
In dose-finding studies one may assume an increasing treatment effect with
increasing dose level. However, the test subject may actually succumb to
toxic effects at high doses, which leads to decresing treatment
effects [1]_, [2]_.
The scope of the Mack-Wolfe Test is to test for umbrella alternatives for
either a known or unknown point P (i.e. dose-level), where the peak
(umbrella point) is present.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
p : int, optional
The a-priori known peak as an ordinal number of the treatment group
including the zero dose level, i.e. p = {1, ..., k}. Defaults to None.
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
p : float
P value.
stat : float
Statistic.
References
----------
.. [1] Chen, I.Y. (1991) Notes on the Mack-Wolfe and Chen-Wolfe Tests for
Umbrella Alternatives. Biom. J., 33, 281-290.
.. [2] Mack, G.A., Wolfe, D. A. (1981) K-sample rank tests for umbrella
alternatives. J. Amer. Statist. Assoc., 76, 175-181.
Examples
--------
>>> x = np.array([[10,'a'], [59,'a'], [76,'b'], [10, 'b']])
>>> sp.posthoc_mackwolfe(x, val_col = 0, group_col = 1)
'''
x = __convert_to_df(a, val_col, group_col)
if not sort:
x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
x.sort_values(by=[group_col], ascending=True, inplace=True)
k = x[group_col].unique().size
if p:
if p > k:
print("Selected 'p' > number of groups:", str(p), " > ", str(k))
return False
elif p < 1:
print("Selected 'p' < 1: ", str(p))
return False
Rij = x[val_col].rank()
n = x.groupby(group_col)[val_col].count()
def _fn(Ri, Rj):
return np.sum(Ri.apply(lambda x: Rj[Rj > x].size))
def _ustat(Rij, g, k):
levels = np.unique(g)
U = np.identity(k)
for i in range(k):
for j in range(i):
U[i,j] = _fn(Rij[x[group_col] == levels[i]], Rij[x[group_col] == levels[j]])
U[j,i] = _fn(Rij[x[group_col] == levels[j]], Rij[x[group_col] == levels[i]])
return U
def _ap(p, U):
tmp1 = 0
if p > 0:
for i in range(p):
for j in range(i+1, p+1):
tmp1 += U[i,j]
tmp2 = 0
if p < k:
for i in range(p, k):
for j in range(i+1, k):
tmp2 += U[j,i]
return tmp1 + tmp2
def _n1(p, n):
return np.sum(n[:p+1])
def _n2(p, n):
return np.sum(n[p:k])
def _mean_at(p, n):
N1 = _n1(p, n)
N2 = _n2(p, n)
return (N1**2 + N2**2 - np.sum(n**2) - n.iloc[p]**2)/4
def _var_at(p, n):
N1 = _n1(p, n)
N2 = _n2(p, n)
N = np.sum(n)
var = (2 * (N1**3 + N2**3) + 3 * (N1**2 + N2**2) -\
np.sum(n**2 * (2*n + 3)) - n.iloc[p]**2 * (2 * n.iloc[p] + 3) +\
12. * n.iloc[p] * N1 * N2 - 12. * n.iloc[p] ** 2 * N) / 72.
return var
if p:
if (x.groupby(val_col).count() > 1).any().any():
print("Ties are present")
U = _ustat(Rij, x[group_col], k)
est = _ap(p, U)
mean = _mean_at(p, n)
sd = np.sqrt(_var_at(p, n))
stat = (est - mean)/sd
p_value = ss.norm.sf(stat)
else:
U = _ustat(Rij, x[group_col], k)
Ap = np.array([_ap(i, U) for i in range(k)]).ravel()
mean = np.array([_mean_at(i, n) for i in range(k)]).ravel()
var = np.array([_var_at(i, n) for i in range(k)]).ravel()
A = (Ap - mean) / np.sqrt(var)
stat = np.max(A)
p = A == stat
est = None
mt = []
for i in range(n_perm):
ix = Series(np.random.permutation(Rij))
Uix = _ustat(ix, x[group_col], k)
Apix = np.array([_ap(i, Uix) for i in range(k)])
Astarix = (Apix - mean) / np.sqrt(var)
mt.append(np.max(Astarix))
mt = np.array(mt)
p_value = mt[mt > stat] / n_perm
return p_value, stat
def posthoc_vanwaerden(a, val_col=None, group_col=None, sort=True, p_adjust=None):
'''Van der Waerden's test for pairwise multiple comparisons between group
levels. See references for additional information [1]_, [2]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
For one-factorial designs with samples that do not meet the assumptions for
one-way-ANOVA and subsequent post hoc tests, the van der Waerden test using
normal scores can be employed. Provided that significant differences were
detected by this global test, one may be interested in applying post hoc
tests according to van der Waerden for pairwise multiple comparisons of the
group levels.
There is no tie correction applied in this function.
References
----------
.. [1] W. J. Conover and R. L. Iman (1979), On multiple-comparisons procedures,
Tech. Rep. LA-7677-MS, Los Alamos Scientific Laboratory.
.. [2] B. L. van der Waerden (1952) Order tests for the two-sample problem and
their power, Indagationes Mathematicae, 14, 453-458.
Examples
--------
>>> x = np.array([[10,'a'], [59,'a'], [76,'b'], [10, 'b']])
>>> sp.posthoc_vanwaerden(x, val_col = 0, group_col = 1)
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = x[_group_col].unique()
n = x[_val_col].size
k = groups.size
r = ss.rankdata(x[_val_col])
x['z_scores'] = ss.norm.ppf(r / (n + 1))
aj = x.groupby(_group_col)['z_scores'].sum()
nj = x.groupby(_group_col)['z_scores'].count()
s2 = (1. / (n - 1.)) * (x['z_scores'] ** 2.).sum()
sts = (1. / s2) * np.sum(aj ** 2. / nj)
param = k - 1
A = aj / nj
vs = np.zeros((k, k), dtype=np.float)
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
def compare_stats(i, j):
dif = np.abs(A[groups[i]] - A[groups[j]])
B = 1. / nj[groups[i]] + 1. / nj[groups[j]]
tval = dif / np.sqrt(s2 * (n - 1. - sts)/(n - k) * B)
pval = 2. * ss.t.sf(np.abs(tval), df = n - k)
return pval
for i, j in combs:
vs[i, j] = compare_stats(i, j)
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_ttest(a, val_col=None, group_col=None, pool_sd=False, equal_var=True, p_adjust=None, sort=True):
'''Pairwise T test for multiple comparisons of independent groups. May be
used after a parametric ANOVA to do pairwise comparisons.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame. Array must be two-dimensional.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
equal_var : bool, optional
If True (default), perform a standard independent test
that assumes equal population variances [1]_.
If False, perform Welch's t-test, which does not assume equal
population variance [2]_.
pool_sd : bool, optional
Calculate a common SD for all groups and use that for all
comparisons (this can be useful if some groups are small).
This method does not actually call scipy ttest_ind() function,
so extra arguments are ignored. Default is False.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by group_col or not. Recommended
unless you sort your data manually.
Returns
-------
Numpy ndarray or pandas DataFrame of p values depending on input
data type.
References
----------
.. [1] http://en.wikipedia.org/wiki/T-test#Independent_two-sample_t-test
.. [2] http://en.wikipedia.org/wiki/Welch%27s_t_test
Examples
--------
>>> x = [[1,2,3,5,1], [12,31,54, np.nan], [10,12,6,74,11]]
>>> sp.posthoc_ttest(x, p_adjust = 'holm')
array([[-1. , 0.04600899, 0.31269089],
[ 0.04600899, -1. , 0.6327077 ],
[ 0.31269089, 0.6327077 , -1. ]])
'''
if isinstance(a, DataFrame):
x = a.copy()
if not sort:
x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
x.sort_values(by=[group_col, val_col], ascending=True, inplace=True)
x_lens = x.groupby(by=group_col)[val_col].count().values
x_lens_cumsum = np.insert(np.cumsum(x_lens), 0, 0)[:-1]
x_grouped = np.array([x[val_col][j:(j + x_lens[i])] for i, j in enumerate(x_lens_cumsum)])
#x_grouped = [x.loc[v, val_col].values.tolist() for g, v in x.groupby(group_col, sort=False).groups.items()]
else:
x = np.array(a)
x_grouped = np.array([np.asarray(a)[~np.isnan(a)] for a in x])
x_lens = np.asarray([len(a) for a in x_grouped])
x_lens_cumsum = np.insert(np.cumsum(x_lens), 0, 0)[:-1]
if any(x_lens == 0):
raise ValueError("All groups must contain data")
x_len = len(x_grouped)
vs = np.zeros((x_len, x_len), dtype=np.float)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
def compare_pooled(i, j):
diff = x_means[i] - x_means[j]
se_diff = pooled_sd * np.sqrt(1 / x_lens[i] + 1 / x_lens[j])
t_value = diff / se_diff
return 2 * ss.t.cdf(-np.abs(t_value), x_totaldegf)
combs = it.combinations(range(x_len), 2)
if pool_sd:
x_means = np.asarray([np.mean(xi) for xi in x_grouped])
x_sd = np.asarray([np.std(xi, ddof=1) for xi in x_grouped])
x_degf = x_lens - 1
x_totaldegf = np.sum(x_degf)
pooled_sd = np.sqrt(np.sum(x_sd ** 2 * x_degf) / x_totaldegf)
for i, j in combs:
vs[i, j] = compare_pooled(i, j)
else:
for i,j in combs:
vs[i, j] = ss.ttest_ind(x_grouped[i], x_grouped[j], equal_var=equal_var)[1]
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
if isinstance(x, DataFrame):
groups_unique = x[group_col].unique()
return DataFrame(vs, index=groups_unique, columns=groups_unique)
else:
return vs
def posthoc_tukey_hsd(x, g, alpha=0.05):
'''Pairwise comparisons with TukeyHSD confidence intervals. This is a
convenience function to make statsmodels `pairwise_tukeyhsd` method more
applicable for further use.
Parameters
----------
x : array_like or pandas Series object, 1d
An array, any object exposing the array interface, containing dependent
variable values (test or response variable). Values should have a
non-nominal scale. NaN values will cause an error (please handle
manually).
g : array_like or pandas Series object, 1d
An array, any object exposing the array interface, containing
independent variable values (grouping or predictor variable). Values
should have a nominal scale (categorical).
alpha : float, optional
Significance level for the test. Default is 0.05.
Returns
-------
result : pandas DataFrame
DataFrame with 0, 1, and -1 values, where 0 is False (not significant),
1 is True (significant), and -1 is for diagonal elements.
Examples
--------
>>> x = [[1,2,3,4,5], [35,31,75,40,21], [10,6,9,6,1]]
>>> g = [['a'] * 5, ['b'] * 5, ['c'] * 5]
>>> sp.posthoc_tukey_hsd(np.concatenate(x), np.concatenate(g))
'''
result = pairwise_tukeyhsd(x, g, alpha=0.05)
groups = np.array(result.groupsunique, dtype=np.str)
groups_len = len(groups)
vs = np.zeros((groups_len, groups_len), dtype=np.int)
for a in result.summary()[1:]:
a0 = str(a[0])
a1 = str(a[1])
a0i = np.where(groups == a0)[0][0]
a1i = np.where(groups == a1)[0][0]
vs[a0i, a1i] = 1 if str(a[5]) == 'True' else 0
vs = np.triu(vs)
np.fill_diagonal(vs, -1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[tri_lower] = vs.T[tri_lower]
return DataFrame(vs, index=groups, columns=groups)
def posthoc_wilcoxon(a, val_col=None, group_col=None, zero_method='wilcox', correction=False, p_adjust=None, sort=False):
'''Pairwise comparisons with Wilcoxon signed-rank test. It is a non-parametric
version of the paired T-test for use with non-parametric ANOVA.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame. Array must be two-dimensional.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
zero_method : string, {"pratt", "wilcox", "zsplit"}, optional
"pratt": Pratt treatment, includes zero-differences in the ranking
process (more conservative)
"wilcox": Wilcox treatment, discards all zero-differences
"zsplit": Zero rank split, just like Pratt, but spliting the zero rank
between positive and negative ones
correction : bool, optional
If True, apply continuity correction by adjusting the Wilcoxon rank
statistic by 0.5 towards the mean value when computing the z-statistic.
Default is False.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by group_col and val_col or not.
Default is False.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
Refer to `scipy.stats.wilcoxon` reference page for further details.
Examples
--------
>>> x = [[1,2,3,4,5], [35,31,75,40,21], [10,6,9,6,1]]
>>> sp.posthoc_wilcoxon(x)
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
#x.sort_values(by=[_group_col, _val_col], ascending=True, inplace=True)
groups = np.unique(x[_group_col])
x_len = groups.size
vs = np.zeros((x_len, x_len))
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(x_len), 2)
for i,j in combs:
vs[i, j] = ss.wilcoxon(x.loc[x[_group_col] == groups[i], _val_col],
x.loc[x[_group_col] == groups[j], _val_col],
zero_method=zero_method, correction=correction)[1]
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method=p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_scheffe(a, val_col=None, group_col=None, sort=False, p_adjust=None):
'''Scheffe's all-pairs comparisons test for normally distributed data with equal
group variances. For all-pairs comparisons in an one-factorial layout with
normally distributed residuals and equal variances Scheffe's test can be
performed with parametric ANOVA [1]_, [2]_, [3]_.
A total of m = k(k-1)/2 hypotheses can be tested.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
The p values are computed from the F-distribution.
References
----------
.. [1] J. Bortz (1993) Statistik für Sozialwissenschaftler. 4. Aufl., Berlin:
Springer.
.. [2] L. Sachs (1997) Angewandte Statistik, New York: Springer.
.. [3] H. Scheffe (1953) A Method for Judging all Contrasts in the Analysis
of Variance. Biometrika 40, 87-110.
Examples
--------
>>> import scikit_posthocs as sp
>>> import pandas as pd
>>> x = pd.DataFrame({"a": [1,2,3,5,1], "b": [12,31,54,62,12], "c": [10,12,6,74,11]})
>>> x = x.melt(var_name='groups', value_name='values')
>>> sp.posthoc_scheffe(x, val_col='values', group_col='groups')
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = np.unique(x[_group_col])
x_grouped = x.groupby(_group_col)[_val_col]
ni = x_grouped.count()
n = ni.sum()
xi = x_grouped.mean()
si = x_grouped.var()
sin = 1. / (n - groups.size) * np.sum(si * (ni - 1.))
def compare(i, j):
dif = xi.loc[i] - xi.loc[j]
A = sin * (1. / ni[i] + 1. / ni[j]) * (groups.size - 1.)
f_val = dif ** 2. / A
return f_val
vs = np.zeros((groups.size, groups.size), dtype=np.float)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(groups.size), 2)
for i,j in combs:
vs[i, j] = compare(groups[i], groups[j])
vs[tri_lower] = vs.T[tri_lower]
p_values = ss.f.sf(vs, groups.size - 1., n - groups.size)
np.fill_diagonal(p_values, -1)
return DataFrame(p_values, index=groups, columns=groups)
def posthoc_tamhane(a, val_col=None, group_col=None, welch=True, sort=False):
'''Tamhane's T2 all-pairs comparison test for normally distributed data with
unequal variances. Tamhane's T2 test can be performed for all-pairs
comparisons in an one-factorial layout with normally distributed residuals
but unequal groups variances. A total of m = k(k-1)/2 hypotheses can be
tested. The null hypothesis is tested in the two-tailed test against the
alternative hypothesis [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
welch : bool, optional
If True, use Welch's approximate solution for calculating the degree of
freedom. T2 test uses the usual df = N - 2 approximation.
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
The p values are computed from the t-distribution and adjusted according to
Dunn-Sidak.
References
----------
.. [1] A.C. Tamhane (1979), A Comparison of Procedures for Multiple Comparisons of
Means with Unequal Variances. Journal of the American Statistical Association,
74, 471-480.
Examples
--------
>>> import scikit_posthocs as sp
>>> import pandas as pd
>>> x = pd.DataFrame({"a": [1,2,3,5,1], "b": [12,31,54,62,12], "c": [10,12,6,74,11]})
>>> x = x.melt(var_name='groups', value_name='values')
>>> sp.posthoc_tamhane(x, val_col='values', group_col='groups')
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = x[_group_col].unique()
x_grouped = x.groupby(_group_col)[_val_col]
ni = x_grouped.count()
n = ni.sum()
xi = x_grouped.mean()
si = x_grouped.var()
sin = 1. / (n - groups.size) * np.sum(si * (ni - 1))
def compare(i, j):
dif = xi[i] - xi[j]
A = si[i] / ni[i] + si[j] / ni[j]
t_val = dif / np.sqrt(A)
if welch:
df = A ** 2. / (si[i] ** 2. / (ni[i] ** 2. * (ni[i] - 1.)) + si[j] ** 2. / (ni[j] ** 2. * (ni[j] - 1.)))
else:
## checks according to Tamhane (1979, p. 474)
ok1 = (9./10. <= ni[i]/ni[j]) and (ni[i]/ni[j] <= 10./9.)
ok2 = (9./10. <= (s2i[i] / ni[i]) / (s2i[j] / ni[j])) and ((s2i[i] / ni[i]) / (s2i[j] / ni[j]) <= 10./9.)
ok3 = (4./5. <= ni[i]/ni[j]) and (ni[i]/ni[j] <= 5./4.) and (1./2. <= (s2i[i] / ni[i]) / (s2i[j] / ni[j])) and ((s2i[i] / ni[i]) / (s2i[j] / ni[j]) <= 2.)
ok4 = (2./3. <= ni[i]/ni[j]) and (ni[i]/ni[j] <= 3./2.) and (3./4. <= (s2i[i] / ni[i]) / (s2i[j] / ni[j])) and ((s2i[i] / ni[i]) / (s2i[j] / ni[j]) <= 4./3.)
OK = any(ok1, ok2, ok3, ok4)
if not OK:
print("Sample sizes or standard errors are not balanced. T2 test is recommended.")
df = ni[i] + ni[j] - 2.
p_val = 2. * ss.t.sf(np.abs(t_val), df=df)
return p_val
vs = np.zeros((groups.size, groups.size), dtype=np.float)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(groups.size), 2)
for i,j in combs:
vs[i, j] = compare(groups[i], groups[j])
vs[tri_upper] = 1. - (1. - vs[tri_upper]) ** groups.size
vs[tri_lower] = vs.T[tri_lower]
vs[vs > 1] = 1
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_tukey(a, val_col = None, group_col = None, sort = False):
'''Performs Tukey's all-pairs comparisons test for normally distributed data
with equal group variances. For all-pairs comparisons in an
one-factorial layout with normally distributed residuals and equal variances
Tukey's test can be performed. A total of m = k(k-1)/2 hypotheses can be
tested. The null hypothesis is tested in the two-tailed test against
the alternative hypothesis [1]_, [2]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
The p values are computed from the Tukey-distribution.
References
----------
.. [1] L. Sachs (1997) Angewandte Statistik, New York: Springer.
.. [2] J. Tukey (1949) Comparing Individual Means in the Analysis of Variance,
Biometrics 5, 99-114.
Examples
--------
>>> import scikit_posthocs as sp
>>> import pandas as pd
>>> x = pd.DataFrame({"a": [1,2,3,5,1], "b": [12,31,54,62,12], "c": [10,12,6,74,11]})
>>> x = x.melt(var_name='groups', value_name='values')
>>> sp.posthoc_tukey(x, val_col='values', group_col='groups')
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = np.unique(x[_group_col])
x_grouped = x.groupby(_group_col)[_val_col]
ni = x_grouped.count()
n = ni.sum()
xi = x_grouped.mean()
si = x_grouped.var()
sin = 1. / (n - groups.size) * np.sum(si * (ni - 1))
def compare(i, j):
dif = xi[i] - xi[j]
A = sin * 0.5 * (1. / ni.loc[i] + 1. / ni.loc[j])
q_val = dif / np.sqrt(A)
return q_val
vs = np.zeros((groups.size, groups.size), dtype=np.float)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(groups.size), 2)
for i,j in combs:
vs[i, j] = compare(groups[i], groups[j])
vs[tri_upper] = psturng(np.abs(vs[tri_upper]), groups.size, n - groups.size)
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_dscf(a, val_col=None, group_col=None, sort=False):
'''Dwass, Steel, Critchlow and Fligner all-pairs comparison test for a
one-factorial layout with non-normally distributed residuals. As opposed to
the all-pairs comparison procedures that depend on Kruskal ranks, the DSCF
test is basically an extension of the U-test as re-ranking is conducted for
each pairwise test [1]_, [2]_, [3]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
Pandas DataFrame containing p values.
Notes
-----
The p values are computed from the Tukey-distribution.
References
----------
.. [1] Douglas, C. E., Fligner, A. M. (1991) On distribution-free multiple
comparisons in the one-way analysis of variance, Communications in
Statistics - Theory and Methods, 20, 127-139.
.. [2] Dwass, M. (1960) Some k-sample rank-order tests. In Contributions to
Probability and Statistics, Edited by: I. Olkin, Stanford: Stanford
University Press.
.. [3] Steel, R. G. D. (1960) A rank sum test for comparing all pairs of
treatments, Technometrics, 2, 197-207.
Examples
--------
>>> import scikit_posthocs as sp
>>> import pandas as pd
>>> x = pd.DataFrame({"a": [1,2,3,5,1], "b": [12,31,54,62,12], "c": [10,12,6,74,11]})
>>> x = x.melt(var_name='groups', value_name='values')
>>> sp.posthoc_dscf(x, val_col='values', group_col='groups')
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = np.unique(x[_group_col])
x_grouped = x.groupby(_group_col)[_val_col]
n = x_grouped.count()
k = groups.size
def get_ties(x):
t = x.value_counts().values
c = np.sum((t ** 3 - t) / 12.)
return c
def compare(i, j):
ni = n.loc[i]
nj = n.loc[j]
x_raw = x.loc[(x[_group_col] == i) | (x[_group_col] == j)].copy()
x_raw['ranks'] = x_raw.loc[:, _val_col].rank()
r = x_raw.groupby(_group_col)['ranks'].sum().loc[[i, j]]
u = np.array([nj * ni + (nj * (nj + 1) / 2),
nj * ni + (ni * (ni + 1) / 2)]) - r
u_min = np.min(u)
s = ni + nj
var = (nj*ni/(s*(s - 1.))) * ((s**3 - s)/12. - get_ties(x_raw['ranks']))
p = np.sqrt(2.) * (u_min - nj * ni / 2.) / np.sqrt(var)
return p
vs = np.zeros((k, k))
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(k), 2)
for i, j in combs:
vs[i, j] = compare(groups[i], groups[j])
vs[tri_upper] = psturng(np.abs(vs[tri_upper]), k, np.inf)
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
|
maximtrp/scikit-posthocs | scikit_posthocs/_posthocs.py | posthoc_wilcoxon | python | def posthoc_wilcoxon(a, val_col=None, group_col=None, zero_method='wilcox', correction=False, p_adjust=None, sort=False):
'''Pairwise comparisons with Wilcoxon signed-rank test. It is a non-parametric
version of the paired T-test for use with non-parametric ANOVA.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame. Array must be two-dimensional.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
zero_method : string, {"pratt", "wilcox", "zsplit"}, optional
"pratt": Pratt treatment, includes zero-differences in the ranking
process (more conservative)
"wilcox": Wilcox treatment, discards all zero-differences
"zsplit": Zero rank split, just like Pratt, but spliting the zero rank
between positive and negative ones
correction : bool, optional
If True, apply continuity correction by adjusting the Wilcoxon rank
statistic by 0.5 towards the mean value when computing the z-statistic.
Default is False.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by group_col and val_col or not.
Default is False.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
Refer to `scipy.stats.wilcoxon` reference page for further details.
Examples
--------
>>> x = [[1,2,3,4,5], [35,31,75,40,21], [10,6,9,6,1]]
>>> sp.posthoc_wilcoxon(x)
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
#x.sort_values(by=[_group_col, _val_col], ascending=True, inplace=True)
groups = np.unique(x[_group_col])
x_len = groups.size
vs = np.zeros((x_len, x_len))
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(x_len), 2)
for i,j in combs:
vs[i, j] = ss.wilcoxon(x.loc[x[_group_col] == groups[i], _val_col],
x.loc[x[_group_col] == groups[j], _val_col],
zero_method=zero_method, correction=correction)[1]
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method=p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups) | Pairwise comparisons with Wilcoxon signed-rank test. It is a non-parametric
version of the paired T-test for use with non-parametric ANOVA.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame. Array must be two-dimensional.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
zero_method : string, {"pratt", "wilcox", "zsplit"}, optional
"pratt": Pratt treatment, includes zero-differences in the ranking
process (more conservative)
"wilcox": Wilcox treatment, discards all zero-differences
"zsplit": Zero rank split, just like Pratt, but spliting the zero rank
between positive and negative ones
correction : bool, optional
If True, apply continuity correction by adjusting the Wilcoxon rank
statistic by 0.5 towards the mean value when computing the z-statistic.
Default is False.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by group_col and val_col or not.
Default is False.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
Refer to `scipy.stats.wilcoxon` reference page for further details.
Examples
--------
>>> x = [[1,2,3,4,5], [35,31,75,40,21], [10,6,9,6,1]]
>>> sp.posthoc_wilcoxon(x) | train | https://github.com/maximtrp/scikit-posthocs/blob/5476b09e2a325cd4e31c0b0bc6906ab5cd77fc5d/scikit_posthocs/_posthocs.py#L1994-L2086 | [
"def __convert_to_df(a, val_col=None, group_col=None, val_id=None, group_id=None):\n\n '''Hidden helper method to create a DataFrame with input data for further\n processing.\n\n Parameters\n ----------\n a : array_like or pandas DataFrame object\n An array, any object exposing the array inter... | # -*- coding: utf-8 -*-
import numpy as np
import scipy.stats as ss
import itertools as it
from statsmodels.sandbox.stats.multicomp import multipletests
from statsmodels.stats.multicomp import pairwise_tukeyhsd
from statsmodels.stats.libqsturng import psturng
from pandas import DataFrame, Categorical, Series
def __convert_to_df(a, val_col=None, group_col=None, val_id=None, group_id=None):
'''Hidden helper method to create a DataFrame with input data for further
processing.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas DataFrame.
Array must be two-dimensional. Second dimension may vary,
i.e. groups may have different lengths.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
val_id : int, optional
Index of a column that contains dependent variable values (test or
response variable). Should be specified if a NumPy ndarray is used as an
input. It will be inferred from data, if not specified.
group_id : int, optional
Index of a column that contains independent variable values (grouping or
predictor variable). Should be specified if a NumPy ndarray is used as
an input. It will be inferred from data, if not specified.
Returns
-------
x : pandas DataFrame
DataFrame with input data, `val_col` column contains numerical values and
`group_col` column contains categorical values.
val_col : str
Name of a DataFrame column that contains dependent variable values (test
or response variable).
group_col : str
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable).
Notes
-----
Inferrence algorithm for determining `val_id` and `group_id` args is rather
simple, so it is better to specify them explicitly to prevent errors.
'''
if not group_col:
group_col = 'groups'
if not val_col:
val_col = 'vals'
if isinstance(a, DataFrame):
x = a.copy()
if not {group_col, val_col}.issubset(a.columns):
raise ValueError('Specify correct column names using `group_col` and `val_col` args')
return x, val_col, group_col
elif isinstance(a, list) or (isinstance(a, np.ndarray) and not a.shape.count(2)):
grps_len = map(len, a)
grps = list(it.chain(*[[i+1] * l for i, l in enumerate(grps_len)]))
vals = list(it.chain(*a))
return DataFrame({val_col: vals, group_col: grps}), val_col, group_col
elif isinstance(a, np.ndarray):
# cols ids not defined
# trying to infer
if not(all([val_id, group_id])):
if np.argmax(a.shape):
a = a.T
ax = [np.unique(a[:, 0]).size, np.unique(a[:, 1]).size]
if np.asscalar(np.diff(ax)):
__val_col = np.argmax(ax)
__group_col = np.argmin(ax)
else:
raise ValueError('Cannot infer input format.\nPlease specify `val_id` and `group_id` args')
cols = {__val_col: val_col,
__group_col: group_col}
else:
cols = {val_id: val_col,
group_id: group_col}
cols_vals = dict(sorted(cols.items())).values()
return DataFrame(a, columns=cols_vals), val_col, group_col
def __convert_to_block_df(a, y_col=None, group_col=None, block_col=None, melted=False):
if isinstance(a, DataFrame) and not melted:
x = a.copy(deep=True)
group_col = 'groups'
block_col = 'blocks'
y_col = 'y'
x.columns.name = group_col
x.index.name = block_col
x = x.reset_index().melt(id_vars=block_col, var_name=group_col, value_name=y_col)
elif melted:
x = DataFrame.from_dict({'groups': a[group_col],
'blocks': a[block_col],
'y': a[y_col]})
elif not isinstance(a, DataFrame):
x = np.array(a)
x = DataFrame(x, index=np.arange(x.shape[0]), columns=np.arange(x.shape[1]))
if not melted:
group_col = 'groups'
block_col = 'blocks'
y_col = 'y'
x.columns.name = group_col
x.index.name = block_col
x = x.reset_index().melt(id_vars=block_col, var_name=group_col, value_name=y_col)
else:
x.columns[group_col] = 'groups'
x.columns[block_col] = 'blocks'
x.columns[y_col] = 'y'
group_col = 'groups'
block_col = 'blocks'
y_col = 'y'
return x, 'y', 'groups', 'blocks'
def posthoc_conover(a, val_col=None, group_col=None, p_adjust=None, sort=True):
'''Post hoc pairwise test for multiple comparisons of mean rank sums
(Conover's test). May be used after Kruskal-Wallis one-way analysis of
variance by ranks to do pairwise comparisons [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas DataFrame.
Array must be two-dimensional. Second dimension may vary,
i.e. groups may have different lengths.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
p_adjust : str, optional
Method for adjusting p values. See `statsmodels.sandbox.stats.multicomp`
for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by `group_col` or not. Recommended
unless you sort your data manually.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
A tie correction are employed according to Conover [1]_.
References
----------
.. [1] W. J. Conover and R. L. Iman (1979), On multiple-comparisons procedures,
Tech. Rep. LA-7677-MS, Los Alamos Scientific Laboratory.
Examples
--------
>>> x = [[1,2,3,5,1], [12,31,54, np.nan], [10,12,6,74,11]]
>>> sp.posthoc_conover(x, p_adjust = 'holm')
'''
def compare_conover(i, j):
diff = np.abs(x_ranks_avg.loc[i] - x_ranks_avg.loc[j])
B = (1. / x_lens.loc[i] + 1. / x_lens.loc[j])
D = (n - 1. - H_cor) / (n - x_len)
t_value = diff / np.sqrt(S2 * B * D)
p_value = 2. * ss.t.sf(np.abs(t_value), df = n - x_len)
return p_value
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col, _val_col], ascending=True, inplace=True)
n = len(x.index)
x_groups_unique = np.unique(x[_group_col])
x_len = x_groups_unique.size
x_lens = x.groupby(_group_col)[_val_col].count()
x['ranks'] = x[_val_col].rank()
x_ranks_avg = x.groupby(_group_col)['ranks'].mean()
x_ranks_sum = x.groupby(_group_col)['ranks'].sum()
# ties
vals = x.groupby('ranks').count()[_val_col].values
tie_sum = np.sum(vals[vals != 1] ** 3 - vals[vals != 1])
tie_sum = 0 if not tie_sum else tie_sum
x_ties = np.min([1., 1. - tie_sum / (n ** 3. - n)])
H = (12. / (n * (n + 1.))) * np.sum(x_ranks_sum**2 / x_lens) - 3. * (n + 1.)
H_cor = H / x_ties
if x_ties == 1:
S2 = n * (n + 1.) / 12.
else:
S2 = (1. / (n - 1.)) * (np.sum(x['ranks'] ** 2.) - (n * (((n + 1.)**2.) / 4.)))
vs = np.zeros((x_len, x_len))
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(x_len), 2)
for i, j in combs:
vs[i, j] = compare_conover(x_groups_unique[i], x_groups_unique[j])
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=x_groups_unique, columns=x_groups_unique)
def posthoc_dunn(a, val_col=None, group_col=None, p_adjust=None, sort=True):
'''Post hoc pairwise test for multiple comparisons of mean rank sums
(Dunn's test). May be used after Kruskal-Wallis one-way analysis of
variance by ranks to do pairwise comparisons [1]_, [2]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas DataFrame.
Array must be two-dimensional. Second dimension may vary,
i.e. groups may have different lengths.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
p_adjust : str, optional
Method for adjusting p values. See `statsmodels.sandbox.stats.multicomp`
for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by group_col or not. Recommended
unless you sort your data manually.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
A tie correction will be employed according to Glantz (2012).
References
----------
.. [1] O.J. Dunn (1964). Multiple comparisons using rank sums.
Technometrics, 6, 241-252.
.. [2] S.A. Glantz (2012), Primer of Biostatistics. New York: McGraw Hill.
Examples
--------
>>> x = [[1,2,3,5,1], [12,31,54, np.nan], [10,12,6,74,11]]
>>> sp.posthoc_dunn(x, p_adjust = 'holm')
'''
def compare_dunn(i, j):
diff = np.abs(x_ranks_avg.loc[i] - x_ranks_avg.loc[j])
A = n * (n + 1.) / 12.
B = (1. / x_lens.loc[i] + 1. / x_lens.loc[j])
z_value = diff / np.sqrt((A - x_ties) * B)
p_value = 2. * ss.norm.sf(np.abs(z_value))
return p_value
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col, _val_col], ascending=True, inplace=True)
n = len(x.index)
x_groups_unique = np.unique(x[_group_col])
x_len = x_groups_unique.size
x_lens = x.groupby(_group_col)[_val_col].count()
x['ranks'] = x[_val_col].rank()
x_ranks_avg = x.groupby(_group_col)['ranks'].mean()
# ties
vals = x.groupby('ranks').count()[_val_col].values
tie_sum = np.sum(vals[vals != 1] ** 3 - vals[vals != 1])
tie_sum = 0 if not tie_sum else tie_sum
x_ties = tie_sum / (12. * (n - 1))
vs = np.zeros((x_len, x_len))
combs = it.combinations(range(x_len), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
for i,j in combs:
vs[i, j] = compare_dunn(x_groups_unique[i], x_groups_unique[j])
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=x_groups_unique, columns=x_groups_unique)
def posthoc_nemenyi(a, val_col=None, group_col=None, dist='chi', sort=True):
'''Post hoc pairwise test for multiple comparisons of mean rank sums
(Nemenyi's test). May be used after Kruskal-Wallis one-way analysis of
variance by ranks to do pairwise comparisons [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame. Array must be two-dimensional. Second dimension may vary,
i.e. groups may have different lengths.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
dist : str, optional
Method for determining the p value. The default distribution is "chi"
(chi-squared), else "tukey" (studentized range).
sort : bool, optional
Specifies whether to sort DataFrame by group_col or not. Recommended
unless you sort your data manually.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
A tie correction will be employed according to Glantz (2012).
References
----------
.. [1] Lothar Sachs (1997), Angewandte Statistik. Berlin: Springer.
Pages: 395-397, 662-664.
Examples
--------
>>> x = [[1,2,3,5,1], [12,31,54, np.nan], [10,12,6,74,11]]
>>> sp.posthoc_nemenyi(x)
'''
def compare_stats_chi(i, j):
diff = np.abs(x_ranks_avg.loc[i] - x_ranks_avg.loc[j])
A = n * (n + 1.) / 12.
B = (1. / x_lens.loc[i] + 1. / x_lens.loc[j])
chi = diff ** 2. / (A * B)
return chi
def compare_stats_tukey(i, j):
diff = np.abs(x_ranks_avg.loc[i] - x_ranks_avg.loc[j])
B = (1. / x_lens.loc[i] + 1. / x_lens.loc[j])
q = diff / np.sqrt((n * (n + 1.) / 12.) * B)
return q
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col, _val_col], ascending=True, inplace=True)
n = len(x.index)
x_groups_unique = np.unique(x[_group_col])
x_len = x_groups_unique.size
x_lens = x.groupby(_group_col)[_val_col].count()
x['ranks'] = x[_val_col].rank()
x_ranks_avg = x.groupby(_group_col)['ranks'].mean()
# ties
vals = x.groupby('ranks').count()[_val_col].values
tie_sum = np.sum(vals[vals != 1] ** 3 - vals[vals != 1])
tie_sum = 0 if not tie_sum else tie_sum
x_ties = np.min([1., 1. - tie_sum / (n ** 3. - n)])
vs = np.zeros((x_len, x_len))
combs = it.combinations(range(x_len), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
if dist == 'chi':
for i,j in combs:
vs[i, j] = compare_stats_chi(x_groups_unique[i], x_groups_unique[j]) / x_ties
vs[tri_upper] = ss.chi2.sf(vs[tri_upper], x_len - 1)
elif dist == 'tukey':
for i,j in combs:
vs[i, j] = compare_stats_tukey(x_groups_unique[i], x_groups_unique[j]) * np.sqrt(2.)
vs[tri_upper] = psturng(vs[tri_upper], x_len, np.inf)
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=x_groups_unique, columns=x_groups_unique)
def posthoc_nemenyi_friedman(a, y_col=None, block_col=None, group_col=None, melted=False, sort=False):
'''Calculate pairwise comparisons using Nemenyi post hoc test for
unreplicated blocked data. This test is usually conducted post hoc if
significant results of the Friedman's test are obtained. The statistics
refer to upper quantiles of the studentized range distribution (Tukey) [1]_,
[2]_, [3]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of
block design, i.e. rows are blocks, and columns are groups. In this
case you do not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (strings).
y_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
A one-way ANOVA with repeated measures that is also referred to as ANOVA
with unreplicated block design can also be conducted via Friedman's
test. The consequent post hoc pairwise multiple comparison test
according to Nemenyi is conducted with this function.
This function does not test for ties.
References
----------
.. [1] J. Demsar (2006), Statistical comparisons of classifiers over
multiple data sets, Journal of Machine Learning Research, 7, 1-30.
.. [2] P. Nemenyi (1963) Distribution-free Multiple Comparisons. Ph.D.
thesis, Princeton University.
.. [3] L. Sachs (1997), Angewandte Statistik. Berlin: Springer.
Pages: 668-675.
Examples
--------
>>> # Non-melted case, x is a block design matrix, i.e. rows are blocks
>>> # and columns are groups.
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_nemenyi_friedman(x)
'''
if melted and not all([block_col, group_col, y_col]):
raise ValueError('block_col, group_col, y_col should be explicitly specified if using melted data')
def compare_stats(i, j):
dif = np.abs(R[groups[i]] - R[groups[j]])
qval = dif / np.sqrt(k * (k + 1.) / (6. * n))
return qval
x, _y_col, _group_col, _block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
#if not sort:
# x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
# x[block_col] = Categorical(x[block_col], categories=x[block_col].unique(), ordered=True)
x.sort_values(by=[_group_col, _block_col], ascending=True, inplace=True)
x.dropna(inplace=True)
groups = x[_group_col].unique()
k = groups.size
n = x[_block_col].unique().size
x['mat'] = x.groupby(_block_col)[_y_col].rank()
R = x.groupby(_group_col)['mat'].mean()
vs = np.zeros((k, k))
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
for i, j in combs:
vs[i, j] = compare_stats(i, j)
vs *= np.sqrt(2.)
vs[tri_upper] = psturng(vs[tri_upper], k, np.inf)
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_conover_friedman(a, y_col=None, block_col=None, group_col=None, melted=False, sort=False, p_adjust=None):
'''Calculate pairwise comparisons using Conover post hoc test for unreplicated
blocked data. This test is usually conducted post hoc after
significant results of the Friedman test. The statistics refer to
the Student t distribution [1]_, [2]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of
block design, i.e. rows are blocks, and columns are groups. In this
case you do not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (strings).
y_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values. See statsmodels.sandbox.stats.multicomp
for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
'single-step' : uses Tukey distribution for multiple comparisons
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
A one-way ANOVA with repeated measures that is also referred to as ANOVA
with unreplicated block design can also be conducted via the
friedman.test. The consequent post hoc pairwise multiple comparison test
according to Conover is conducted with this function.
If y is a matrix, than the columns refer to the treatment and the rows
indicate the block.
References
----------
.. [1] W. J. Conover and R. L. Iman (1979), On multiple-comparisons procedures,
Tech. Rep. LA-7677-MS, Los Alamos Scientific Laboratory.
.. [2] W. J. Conover (1999), Practical nonparametric Statistics, 3rd. Edition,
Wiley.
Examples
--------
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_conover_friedman(x)
'''
if melted and not all([block_col, group_col, y_col]):
raise ValueError('block_col, group_col, y_col should be explicitly specified if using melted data')
def compare_stats(i, j):
dif = np.abs(R.loc[groups[i]] - R.loc[groups[j]])
tval = dif / np.sqrt(A) / np.sqrt(B)
pval = 2. * ss.t.sf(np.abs(tval), df = (m*n*k - k - n + 1))
return pval
def compare_tukey(i, j):
dif = np.abs(R.loc[groups[i]] - R.loc[groups[j]])
qval = np.sqrt(2.) * dif / (np.sqrt(A) * np.sqrt(B))
pval = psturng(qval, k, np.inf)
return pval
x, _y_col, _group_col, _block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
#if not sort:
# x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
# x[block_col] = Categorical(x[block_col], categories=x[block_col].unique(), ordered=True)
x.sort_values(by=[_group_col,_block_col], ascending=True, inplace=True)
x.dropna(inplace=True)
groups = np.unique(x[_group_col])
k = groups.size
n = x[_block_col].unique().size
x['mat'] = x.groupby(_block_col)[_y_col].rank()
R = x.groupby(_group_col)['mat'].sum()
A1 = (x['mat'] ** 2).sum()
m = 1
S2 = m/(m*k - 1.) * (A1 - m*k*n*(m*k + 1.)**2./4.)
T2 = 1 / S2 * (np.sum(R) - n * m * ((m * k + 1.) / 2.)**2.)
A = S2 * (2. * n * (m * k - 1.)) / ( m * n * k - k - n + 1.)
B = 1. - T2 / (n * (m * k - 1.))
vs = np.zeros((k, k))
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
if p_adjust == 'single-step':
for i, j in combs:
vs[i, j] = compare_tukey(i, j)
else:
for i, j in combs:
vs[i, j] = compare_stats(i, j)
if p_adjust is not None:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_npm_test(a, val_col=None, group_col=None, sort=False, p_adjust=None):
'''Calculate pairwise comparisons using Nashimoto and Wright's all-pairs
comparison procedure (NPM test) for simply ordered mean ranksums.
NPM test is basically an extension of Nemenyi's procedure for testing
increasingly ordered alternatives [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values. See `statsmodels.sandbox.stats.multicomp`
for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
The p values are estimated from the studentized range distribution. If
the medians are already increasingly ordered, than the NPM-test
simplifies to the ordinary Nemenyi test
References
----------
.. [1] Nashimoto, K., Wright, F.T., (2005), Multiple comparison procedures for
detecting differences in simply ordered means. Comput. Statist. Data
Anal. 48, 291--306.
Examples
--------
>>> x = np.array([[102,109,114,120,124],
[110,112,123,130,145],
[132,141,156,160,172]])
>>> sp.posthoc_npm_test(x)
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
#if not sort:
# x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = np.unique(x[_group_col])
x['ranks'] = x[_val_col].rank()
Ri = x.groupby(_group_col)['ranks'].mean()
ni = x.groupby(_group_col)[_val_col].count()
k = groups.size
n = x.shape[0]
sigma = np.sqrt(n * (n + 1) / 12.)
df = np.inf
def compare(m, u):
a = [(Ri.loc[groups[u]]-Ri.loc[groups[_mi]])/(sigma/np.sqrt(2)*np.sqrt(1./ni.loc[groups[_mi]] + 1./ni.loc[groups[u]])) for _mi in m]
return np.array(a)
stat = np.zeros((k, k))
for i in range(k-1):
for j in range(i+1, k):
u = j
m = np.arange(i, u)
tmp = compare(m, u)
stat[j, i] = np.max(tmp)
stat[stat < 0] = 0
p_values = psturng(stat, k, np.inf)
tri_upper = np.triu_indices(p_values.shape[0], 1)
tri_lower = np.tril_indices(p_values.shape[0], -1)
p_values[tri_lower] = p_values.T[tri_lower]
#if p_adjust:
# p_values[tri_upper] = multipletests(p_values[tri_upper], method = p_adjust)[1]
np.fill_diagonal(p_values, -1)
return DataFrame(p_values, index=groups, columns=groups)
def posthoc_siegel_friedman(a, y_col=None, block_col=None, group_col=None, melted=False, sort=False, p_adjust=None):
'''Siegel and Castellan's All-Pairs Comparisons Test for Unreplicated Blocked
Data. See authors' paper for additional information [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of
block design, i.e. rows are blocks, and columns are groups. In this
case you do not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (strings).
y_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values. See statsmodels.sandbox.stats.multicomp for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
For all-pairs comparisons in a two factorial unreplicated complete block design
with non-normally distributed residuals, Siegel and Castellan's test can be
performed on Friedman-type ranked data.
References
----------
.. [1] S. Siegel, N. J. Castellan Jr. (1988), Nonparametric Statistics for the
Behavioral Sciences. 2nd ed. New York: McGraw-Hill.
Examples
--------
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_siegel_friedman(x)
'''
if melted and not all([block_col, group_col, y_col]):
raise ValueError('block_col, group_col, y_col should be explicitly specified if using melted data')
def compare_stats(i, j):
dif = np.abs(R[groups[i]] - R[groups[j]])
zval = dif / np.sqrt(k * (k + 1.) / (6. * n))
return zval
x, y_col, group_col, block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
#if not sort:
# x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
# x[block_col] = Categorical(x[block_col], categories=x[block_col].unique(), ordered=True)
x.sort_values(by=[group_col,block_col], ascending=True, inplace=True)
x.dropna(inplace=True)
groups = x[group_col].unique()
k = groups.size
n = x[block_col].unique().size
x['mat'] = x.groupby(block_col)[y_col].rank()
R = x.groupby(group_col)['mat'].mean()
vs = np.zeros((k, k), dtype=np.float)
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
for i, j in combs:
vs[i, j] = compare_stats(i, j)
vs = 2. * ss.norm.sf(np.abs(vs))
vs[vs > 1] = 1.
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_miller_friedman(a, y_col=None, block_col=None, group_col=None, melted=False, sort=False):
'''Miller's All-Pairs Comparisons Test for Unreplicated Blocked Data.
The p-values are computed from the chi-square distribution [1]_, [2]_,
[3]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of
block design, i.e. rows are blocks, and columns are groups. In this
case you do not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (strings).
y_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
Pandas DataFrame containing p values.
Notes
-----
For all-pairs comparisons in a two factorial unreplicated complete block
design with non-normally distributed residuals, Miller's test can be
performed on Friedman-type ranked data.
References
----------
.. [1] J. Bortz J, G. A. Lienert, K. Boehnke (1990), Verteilungsfreie
Methoden in der Biostatistik. Berlin: Springerself.
.. [2] R. G. Miller Jr. (1996), Simultaneous statistical inference. New
York: McGraw-Hill.
.. [3] E. L. Wike (2006), Data Analysis. A Statistical Primer for Psychology
Students. New Brunswick: Aldine Transaction.
Examples
--------
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_miller_friedman(x)
'''
if melted and not all([block_col, group_col, y_col]):
raise ValueError('block_col, group_col, y_col should be explicitly specified if using melted data')
def compare_stats(i, j):
dif = np.abs(R[groups[i]] - R[groups[j]])
qval = dif / np.sqrt(k * (k + 1.) / (6. * n))
return qval
x, y_col, group_col, block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
#if not sort:
# x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
# x[block_col] = Categorical(x[block_col], categories=x[block_col].unique(), ordered=True)
x.sort_values(by=[group_col,block_col], ascending=True, inplace=True)
x.dropna(inplace=True)
groups = x[group_col].unique()
k = groups.size
n = x[block_col].unique().size
x['mat'] = x.groupby(block_col)[y_col].rank()
R = x.groupby(group_col)['mat'].mean()
vs = np.zeros((k, k), dtype=np.float)
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
for i, j in combs:
vs[i, j] = compare_stats(i, j)
vs = vs ** 2
vs = ss.chi2.sf(vs, k - 1)
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_durbin(a, y_col=None, block_col=None, group_col=None, melted=False, sort=False, p_adjust=None):
'''Pairwise post hoc test for multiple comparisons of rank sums according to
Durbin and Conover for a two-way balanced incomplete block design (BIBD). See
references for additional information [1]_, [2]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of block design,
i.e. rows are blocks, and columns are groups. In this case you do
not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (string).
y_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values. See statsmodels.sandbox.stats.multicomp
for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
Pandas DataFrame containing p values.
References
----------
.. [1] W. J. Conover and R. L. Iman (1979), On multiple-comparisons procedures,
Tech. Rep. LA-7677-MS, Los Alamos Scientific Laboratory.
.. [2] W. J. Conover (1999), Practical nonparametric Statistics,
3rd. edition, Wiley.
Examples
--------
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_durbin(x)
'''
if melted and not all([block_col, group_col, y_col]):
raise ValueError('block_col, group_col, y_col should be explicitly specified if using melted data')
def compare_stats(i, j):
dif = np.abs(Rj[groups[i]] - Rj[groups[j]])
tval = dif / denom
pval = 2. * ss.t.sf(np.abs(tval), df = df)
return pval
x, y_col, group_col, block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
if not sort:
x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
x[block_col] = Categorical(x[block_col], categories=x[block_col].unique(), ordered=True)
x.sort_values(by=[block_col, group_col], ascending=True, inplace=True)
x.dropna(inplace=True)
groups = x[group_col].unique()
t = len(groups)
b = x[block_col].unique().size
r = b
k = t
x['y_ranked'] = x.groupby(block_col)[y_col].rank()
Rj = x.groupby(group_col)['y_ranked'].sum()
A = (x['y_ranked'] ** 2).sum()
C = (b * k * (k + 1) ** 2) / 4.
D = (Rj ** 2).sum() - r * C
T1 = (t - 1) / (A - C) * D
denom = np.sqrt(((A - C) * 2 * r) / (b * k - b - t + 1) * (1 - T1 / (b * (k -1))))
df = b * k - b - t + 1
vs = np.zeros((t, t), dtype=np.float)
combs = it.combinations(range(t), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
for i, j in combs:
vs[i, j] = compare_stats(i, j)
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_anderson(a, val_col=None, group_col=None, midrank=True, sort=False, p_adjust=None):
'''Anderson-Darling Pairwise Test for k-samples. Tests the null hypothesis
that k-samples are drawn from the same population without having to specify
the distribution function of that population [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
midrank : bool, optional
Type of Anderson-Darling test which is computed. If set to True (default), the
midrank test applicable to continuous and discrete populations is performed. If
False, the right side empirical distribution is used.
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values. See statsmodels.sandbox.stats.multicomp for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
result : pandas DataFrame
P values.
References
----------
.. [1] F.W. Scholz, M.A. Stephens (1987), K-Sample Anderson-Darling Tests,
Journal of the American Statistical Association, Vol. 82, pp. 918-924.
Examples
--------
>>> x = np.array([[2.9, 3.0, 2.5, 2.6, 3.2], [3.8, 2.7, 4.0, 2.4], [2.8, 3.4, 3.7, 2.2, 2.0]])
>>> sp.posthoc_anderson(x)
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = x[_group_col].unique()
k = groups.size
vs = np.zeros((k, k), dtype=np.float)
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
for i, j in combs:
vs[i, j] = ss.anderson_ksamp([x.loc[x[_group_col] == groups[i], _val_col], x.loc[x[_group_col] == groups[j], _val_col]])[2]
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_quade(a, y_col=None, block_col=None, group_col=None, dist='t', melted=False, sort=False, p_adjust=None):
'''Calculate pairwise comparisons using Quade's post hoc test for
unreplicated blocked data. This test is usually conducted if significant
results were obtained by the omnibus test [1]_, [2]_, [3]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of
block design, i.e. rows are blocks, and columns are groups. In this
case you do not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (string).
y_col : str or int, optional
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
dist : str, optional
Method for determining p values.
The default distribution is "t", else "normal".
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
Pandas DataFrame containing p values.
References
----------
.. [1] W. J. Conover (1999), Practical nonparametric Statistics, 3rd. Edition,
Wiley.
.. [2] N. A. Heckert and J. J. Filliben (2003). NIST Handbook 148: Dataplot
Reference Manual, Volume 2: Let Subcommands and Library Functions.
National Institute of Standards and Technology Handbook Series, June 2003.
.. [3] D. Quade (1979), Using weighted rankings in the analysis of complete
blocks with additive block effects. Journal of the American Statistical
Association, 74, 680-683.
Examples
--------
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_quade(x)
'''
if melted and not all([block_col, group_col, y_col]):
raise ValueError('block_col, group_col, y_col should be explicitly specified if using melted data')
def compare_stats_t(i, j):
dif = np.abs(S[groups[i]] - S[groups[j]])
tval = dif / denom
pval = 2. * ss.t.sf(np.abs(tval), df = (b - 1) * (k - 1))
return pval
def compare_stats_norm(i, j):
dif = np.abs(W[groups[i]] * ff - W[groups[j]] * ff)
zval = dif / denom
pval = 2. * ss.norm.sf(np.abs(zval))
return pval
x, y_col, group_col, block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
if not sort:
x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
x[block_col] = Categorical(x[block_col], categories=x[block_col].unique(), ordered=True)
x.sort_values(by=[block_col, group_col], ascending=True, inplace=True)
x.dropna(inplace=True)
groups = x[group_col].unique()
k = len(groups)
b = x[block_col].unique().size
x['r'] = x.groupby(block_col)[y_col].rank()
q = (x.groupby(block_col)[y_col].max() - x.groupby(block_col)[y_col].min()).rank()
x['rr'] = x['r'] - (k + 1)/2
x['s'] = x.apply(lambda x, y: x['rr'] * y[x['blocks']], axis=1, args=(q,))
x['w'] = x.apply(lambda x, y: x['r'] * y[x['blocks']], axis=1, args=(q,))
A = (x['s'] ** 2).sum()
S = x.groupby(group_col)['s'].sum()
B = np.sum(S ** 2) / b
W = x.groupby(group_col)['w'].sum()
vs = np.zeros((k, k), dtype=np.float)
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
if dist == 't':
denom = np.sqrt((2 * b * (A - B)) / ((b - 1) * (k - 1)))
for i, j in combs:
vs[i, j] = compare_stats_t(i, j)
else:
n = b * k
denom = np.sqrt((k * (k + 1) * (2 * n + 1) * (k-1)) / (18 * n * (n + 1)))
ff = 1. / (b * (b + 1)/2)
for i, j in combs:
vs[i, j] = compare_stats_norm(i, j)
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_mackwolfe(a, val_col, group_col, p=None, n_perm=100, sort=False, p_adjust=None):
'''Mack-Wolfe Test for Umbrella Alternatives.
In dose-finding studies one may assume an increasing treatment effect with
increasing dose level. However, the test subject may actually succumb to
toxic effects at high doses, which leads to decresing treatment
effects [1]_, [2]_.
The scope of the Mack-Wolfe Test is to test for umbrella alternatives for
either a known or unknown point P (i.e. dose-level), where the peak
(umbrella point) is present.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
p : int, optional
The a-priori known peak as an ordinal number of the treatment group
including the zero dose level, i.e. p = {1, ..., k}. Defaults to None.
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
p : float
P value.
stat : float
Statistic.
References
----------
.. [1] Chen, I.Y. (1991) Notes on the Mack-Wolfe and Chen-Wolfe Tests for
Umbrella Alternatives. Biom. J., 33, 281-290.
.. [2] Mack, G.A., Wolfe, D. A. (1981) K-sample rank tests for umbrella
alternatives. J. Amer. Statist. Assoc., 76, 175-181.
Examples
--------
>>> x = np.array([[10,'a'], [59,'a'], [76,'b'], [10, 'b']])
>>> sp.posthoc_mackwolfe(x, val_col = 0, group_col = 1)
'''
x = __convert_to_df(a, val_col, group_col)
if not sort:
x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
x.sort_values(by=[group_col], ascending=True, inplace=True)
k = x[group_col].unique().size
if p:
if p > k:
print("Selected 'p' > number of groups:", str(p), " > ", str(k))
return False
elif p < 1:
print("Selected 'p' < 1: ", str(p))
return False
Rij = x[val_col].rank()
n = x.groupby(group_col)[val_col].count()
def _fn(Ri, Rj):
return np.sum(Ri.apply(lambda x: Rj[Rj > x].size))
def _ustat(Rij, g, k):
levels = np.unique(g)
U = np.identity(k)
for i in range(k):
for j in range(i):
U[i,j] = _fn(Rij[x[group_col] == levels[i]], Rij[x[group_col] == levels[j]])
U[j,i] = _fn(Rij[x[group_col] == levels[j]], Rij[x[group_col] == levels[i]])
return U
def _ap(p, U):
tmp1 = 0
if p > 0:
for i in range(p):
for j in range(i+1, p+1):
tmp1 += U[i,j]
tmp2 = 0
if p < k:
for i in range(p, k):
for j in range(i+1, k):
tmp2 += U[j,i]
return tmp1 + tmp2
def _n1(p, n):
return np.sum(n[:p+1])
def _n2(p, n):
return np.sum(n[p:k])
def _mean_at(p, n):
N1 = _n1(p, n)
N2 = _n2(p, n)
return (N1**2 + N2**2 - np.sum(n**2) - n.iloc[p]**2)/4
def _var_at(p, n):
N1 = _n1(p, n)
N2 = _n2(p, n)
N = np.sum(n)
var = (2 * (N1**3 + N2**3) + 3 * (N1**2 + N2**2) -\
np.sum(n**2 * (2*n + 3)) - n.iloc[p]**2 * (2 * n.iloc[p] + 3) +\
12. * n.iloc[p] * N1 * N2 - 12. * n.iloc[p] ** 2 * N) / 72.
return var
if p:
if (x.groupby(val_col).count() > 1).any().any():
print("Ties are present")
U = _ustat(Rij, x[group_col], k)
est = _ap(p, U)
mean = _mean_at(p, n)
sd = np.sqrt(_var_at(p, n))
stat = (est - mean)/sd
p_value = ss.norm.sf(stat)
else:
U = _ustat(Rij, x[group_col], k)
Ap = np.array([_ap(i, U) for i in range(k)]).ravel()
mean = np.array([_mean_at(i, n) for i in range(k)]).ravel()
var = np.array([_var_at(i, n) for i in range(k)]).ravel()
A = (Ap - mean) / np.sqrt(var)
stat = np.max(A)
p = A == stat
est = None
mt = []
for i in range(n_perm):
ix = Series(np.random.permutation(Rij))
Uix = _ustat(ix, x[group_col], k)
Apix = np.array([_ap(i, Uix) for i in range(k)])
Astarix = (Apix - mean) / np.sqrt(var)
mt.append(np.max(Astarix))
mt = np.array(mt)
p_value = mt[mt > stat] / n_perm
return p_value, stat
def posthoc_vanwaerden(a, val_col=None, group_col=None, sort=True, p_adjust=None):
'''Van der Waerden's test for pairwise multiple comparisons between group
levels. See references for additional information [1]_, [2]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
For one-factorial designs with samples that do not meet the assumptions for
one-way-ANOVA and subsequent post hoc tests, the van der Waerden test using
normal scores can be employed. Provided that significant differences were
detected by this global test, one may be interested in applying post hoc
tests according to van der Waerden for pairwise multiple comparisons of the
group levels.
There is no tie correction applied in this function.
References
----------
.. [1] W. J. Conover and R. L. Iman (1979), On multiple-comparisons procedures,
Tech. Rep. LA-7677-MS, Los Alamos Scientific Laboratory.
.. [2] B. L. van der Waerden (1952) Order tests for the two-sample problem and
their power, Indagationes Mathematicae, 14, 453-458.
Examples
--------
>>> x = np.array([[10,'a'], [59,'a'], [76,'b'], [10, 'b']])
>>> sp.posthoc_vanwaerden(x, val_col = 0, group_col = 1)
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = x[_group_col].unique()
n = x[_val_col].size
k = groups.size
r = ss.rankdata(x[_val_col])
x['z_scores'] = ss.norm.ppf(r / (n + 1))
aj = x.groupby(_group_col)['z_scores'].sum()
nj = x.groupby(_group_col)['z_scores'].count()
s2 = (1. / (n - 1.)) * (x['z_scores'] ** 2.).sum()
sts = (1. / s2) * np.sum(aj ** 2. / nj)
param = k - 1
A = aj / nj
vs = np.zeros((k, k), dtype=np.float)
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
def compare_stats(i, j):
dif = np.abs(A[groups[i]] - A[groups[j]])
B = 1. / nj[groups[i]] + 1. / nj[groups[j]]
tval = dif / np.sqrt(s2 * (n - 1. - sts)/(n - k) * B)
pval = 2. * ss.t.sf(np.abs(tval), df = n - k)
return pval
for i, j in combs:
vs[i, j] = compare_stats(i, j)
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_ttest(a, val_col=None, group_col=None, pool_sd=False, equal_var=True, p_adjust=None, sort=True):
'''Pairwise T test for multiple comparisons of independent groups. May be
used after a parametric ANOVA to do pairwise comparisons.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame. Array must be two-dimensional.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
equal_var : bool, optional
If True (default), perform a standard independent test
that assumes equal population variances [1]_.
If False, perform Welch's t-test, which does not assume equal
population variance [2]_.
pool_sd : bool, optional
Calculate a common SD for all groups and use that for all
comparisons (this can be useful if some groups are small).
This method does not actually call scipy ttest_ind() function,
so extra arguments are ignored. Default is False.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by group_col or not. Recommended
unless you sort your data manually.
Returns
-------
Numpy ndarray or pandas DataFrame of p values depending on input
data type.
References
----------
.. [1] http://en.wikipedia.org/wiki/T-test#Independent_two-sample_t-test
.. [2] http://en.wikipedia.org/wiki/Welch%27s_t_test
Examples
--------
>>> x = [[1,2,3,5,1], [12,31,54, np.nan], [10,12,6,74,11]]
>>> sp.posthoc_ttest(x, p_adjust = 'holm')
array([[-1. , 0.04600899, 0.31269089],
[ 0.04600899, -1. , 0.6327077 ],
[ 0.31269089, 0.6327077 , -1. ]])
'''
if isinstance(a, DataFrame):
x = a.copy()
if not sort:
x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
x.sort_values(by=[group_col, val_col], ascending=True, inplace=True)
x_lens = x.groupby(by=group_col)[val_col].count().values
x_lens_cumsum = np.insert(np.cumsum(x_lens), 0, 0)[:-1]
x_grouped = np.array([x[val_col][j:(j + x_lens[i])] for i, j in enumerate(x_lens_cumsum)])
#x_grouped = [x.loc[v, val_col].values.tolist() for g, v in x.groupby(group_col, sort=False).groups.items()]
else:
x = np.array(a)
x_grouped = np.array([np.asarray(a)[~np.isnan(a)] for a in x])
x_lens = np.asarray([len(a) for a in x_grouped])
x_lens_cumsum = np.insert(np.cumsum(x_lens), 0, 0)[:-1]
if any(x_lens == 0):
raise ValueError("All groups must contain data")
x_len = len(x_grouped)
vs = np.zeros((x_len, x_len), dtype=np.float)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
def compare_pooled(i, j):
diff = x_means[i] - x_means[j]
se_diff = pooled_sd * np.sqrt(1 / x_lens[i] + 1 / x_lens[j])
t_value = diff / se_diff
return 2 * ss.t.cdf(-np.abs(t_value), x_totaldegf)
combs = it.combinations(range(x_len), 2)
if pool_sd:
x_means = np.asarray([np.mean(xi) for xi in x_grouped])
x_sd = np.asarray([np.std(xi, ddof=1) for xi in x_grouped])
x_degf = x_lens - 1
x_totaldegf = np.sum(x_degf)
pooled_sd = np.sqrt(np.sum(x_sd ** 2 * x_degf) / x_totaldegf)
for i, j in combs:
vs[i, j] = compare_pooled(i, j)
else:
for i,j in combs:
vs[i, j] = ss.ttest_ind(x_grouped[i], x_grouped[j], equal_var=equal_var)[1]
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
if isinstance(x, DataFrame):
groups_unique = x[group_col].unique()
return DataFrame(vs, index=groups_unique, columns=groups_unique)
else:
return vs
def posthoc_tukey_hsd(x, g, alpha=0.05):
'''Pairwise comparisons with TukeyHSD confidence intervals. This is a
convenience function to make statsmodels `pairwise_tukeyhsd` method more
applicable for further use.
Parameters
----------
x : array_like or pandas Series object, 1d
An array, any object exposing the array interface, containing dependent
variable values (test or response variable). Values should have a
non-nominal scale. NaN values will cause an error (please handle
manually).
g : array_like or pandas Series object, 1d
An array, any object exposing the array interface, containing
independent variable values (grouping or predictor variable). Values
should have a nominal scale (categorical).
alpha : float, optional
Significance level for the test. Default is 0.05.
Returns
-------
result : pandas DataFrame
DataFrame with 0, 1, and -1 values, where 0 is False (not significant),
1 is True (significant), and -1 is for diagonal elements.
Examples
--------
>>> x = [[1,2,3,4,5], [35,31,75,40,21], [10,6,9,6,1]]
>>> g = [['a'] * 5, ['b'] * 5, ['c'] * 5]
>>> sp.posthoc_tukey_hsd(np.concatenate(x), np.concatenate(g))
'''
result = pairwise_tukeyhsd(x, g, alpha=0.05)
groups = np.array(result.groupsunique, dtype=np.str)
groups_len = len(groups)
vs = np.zeros((groups_len, groups_len), dtype=np.int)
for a in result.summary()[1:]:
a0 = str(a[0])
a1 = str(a[1])
a0i = np.where(groups == a0)[0][0]
a1i = np.where(groups == a1)[0][0]
vs[a0i, a1i] = 1 if str(a[5]) == 'True' else 0
vs = np.triu(vs)
np.fill_diagonal(vs, -1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[tri_lower] = vs.T[tri_lower]
return DataFrame(vs, index=groups, columns=groups)
def posthoc_mannwhitney(a, val_col=None, group_col=None, use_continuity=True, alternative='two-sided', p_adjust=None, sort=True):
'''Pairwise comparisons with Mann-Whitney rank test.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame. Array must be two-dimensional.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
use_continuity : bool, optional
Whether a continuity correction (1/2.) should be taken into account.
Default is True.
alternative : ['two-sided', 'less', or 'greater'], optional
Whether to get the p-value for the one-sided hypothesis
('less' or 'greater') or for the two-sided hypothesis ('two-sided').
Defaults to 'two-sided'.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by group_col or not. Recommended
unless you sort your data manually.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
Refer to `scipy.stats.mannwhitneyu` reference page for further details.
Examples
--------
>>> x = [[1,2,3,4,5], [35,31,75,40,21], [10,6,9,6,1]]
>>> sp.posthoc_mannwhitney(x, p_adjust = 'holm')
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col, _val_col], ascending=True, inplace=True)
groups = np.unique(x[_group_col])
x_len = groups.size
vs = np.zeros((x_len, x_len))
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(x_len), 2)
for i,j in combs:
vs[i, j] = ss.mannwhitneyu(x.loc[x[_group_col] == groups[i], _val_col],
x.loc[x[_group_col] == groups[j], _val_col],
use_continuity=use_continuity,
alternative=alternative)[1]
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_scheffe(a, val_col=None, group_col=None, sort=False, p_adjust=None):
'''Scheffe's all-pairs comparisons test for normally distributed data with equal
group variances. For all-pairs comparisons in an one-factorial layout with
normally distributed residuals and equal variances Scheffe's test can be
performed with parametric ANOVA [1]_, [2]_, [3]_.
A total of m = k(k-1)/2 hypotheses can be tested.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
The p values are computed from the F-distribution.
References
----------
.. [1] J. Bortz (1993) Statistik für Sozialwissenschaftler. 4. Aufl., Berlin:
Springer.
.. [2] L. Sachs (1997) Angewandte Statistik, New York: Springer.
.. [3] H. Scheffe (1953) A Method for Judging all Contrasts in the Analysis
of Variance. Biometrika 40, 87-110.
Examples
--------
>>> import scikit_posthocs as sp
>>> import pandas as pd
>>> x = pd.DataFrame({"a": [1,2,3,5,1], "b": [12,31,54,62,12], "c": [10,12,6,74,11]})
>>> x = x.melt(var_name='groups', value_name='values')
>>> sp.posthoc_scheffe(x, val_col='values', group_col='groups')
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = np.unique(x[_group_col])
x_grouped = x.groupby(_group_col)[_val_col]
ni = x_grouped.count()
n = ni.sum()
xi = x_grouped.mean()
si = x_grouped.var()
sin = 1. / (n - groups.size) * np.sum(si * (ni - 1.))
def compare(i, j):
dif = xi.loc[i] - xi.loc[j]
A = sin * (1. / ni[i] + 1. / ni[j]) * (groups.size - 1.)
f_val = dif ** 2. / A
return f_val
vs = np.zeros((groups.size, groups.size), dtype=np.float)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(groups.size), 2)
for i,j in combs:
vs[i, j] = compare(groups[i], groups[j])
vs[tri_lower] = vs.T[tri_lower]
p_values = ss.f.sf(vs, groups.size - 1., n - groups.size)
np.fill_diagonal(p_values, -1)
return DataFrame(p_values, index=groups, columns=groups)
def posthoc_tamhane(a, val_col=None, group_col=None, welch=True, sort=False):
'''Tamhane's T2 all-pairs comparison test for normally distributed data with
unequal variances. Tamhane's T2 test can be performed for all-pairs
comparisons in an one-factorial layout with normally distributed residuals
but unequal groups variances. A total of m = k(k-1)/2 hypotheses can be
tested. The null hypothesis is tested in the two-tailed test against the
alternative hypothesis [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
welch : bool, optional
If True, use Welch's approximate solution for calculating the degree of
freedom. T2 test uses the usual df = N - 2 approximation.
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
The p values are computed from the t-distribution and adjusted according to
Dunn-Sidak.
References
----------
.. [1] A.C. Tamhane (1979), A Comparison of Procedures for Multiple Comparisons of
Means with Unequal Variances. Journal of the American Statistical Association,
74, 471-480.
Examples
--------
>>> import scikit_posthocs as sp
>>> import pandas as pd
>>> x = pd.DataFrame({"a": [1,2,3,5,1], "b": [12,31,54,62,12], "c": [10,12,6,74,11]})
>>> x = x.melt(var_name='groups', value_name='values')
>>> sp.posthoc_tamhane(x, val_col='values', group_col='groups')
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = x[_group_col].unique()
x_grouped = x.groupby(_group_col)[_val_col]
ni = x_grouped.count()
n = ni.sum()
xi = x_grouped.mean()
si = x_grouped.var()
sin = 1. / (n - groups.size) * np.sum(si * (ni - 1))
def compare(i, j):
dif = xi[i] - xi[j]
A = si[i] / ni[i] + si[j] / ni[j]
t_val = dif / np.sqrt(A)
if welch:
df = A ** 2. / (si[i] ** 2. / (ni[i] ** 2. * (ni[i] - 1.)) + si[j] ** 2. / (ni[j] ** 2. * (ni[j] - 1.)))
else:
## checks according to Tamhane (1979, p. 474)
ok1 = (9./10. <= ni[i]/ni[j]) and (ni[i]/ni[j] <= 10./9.)
ok2 = (9./10. <= (s2i[i] / ni[i]) / (s2i[j] / ni[j])) and ((s2i[i] / ni[i]) / (s2i[j] / ni[j]) <= 10./9.)
ok3 = (4./5. <= ni[i]/ni[j]) and (ni[i]/ni[j] <= 5./4.) and (1./2. <= (s2i[i] / ni[i]) / (s2i[j] / ni[j])) and ((s2i[i] / ni[i]) / (s2i[j] / ni[j]) <= 2.)
ok4 = (2./3. <= ni[i]/ni[j]) and (ni[i]/ni[j] <= 3./2.) and (3./4. <= (s2i[i] / ni[i]) / (s2i[j] / ni[j])) and ((s2i[i] / ni[i]) / (s2i[j] / ni[j]) <= 4./3.)
OK = any(ok1, ok2, ok3, ok4)
if not OK:
print("Sample sizes or standard errors are not balanced. T2 test is recommended.")
df = ni[i] + ni[j] - 2.
p_val = 2. * ss.t.sf(np.abs(t_val), df=df)
return p_val
vs = np.zeros((groups.size, groups.size), dtype=np.float)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(groups.size), 2)
for i,j in combs:
vs[i, j] = compare(groups[i], groups[j])
vs[tri_upper] = 1. - (1. - vs[tri_upper]) ** groups.size
vs[tri_lower] = vs.T[tri_lower]
vs[vs > 1] = 1
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_tukey(a, val_col = None, group_col = None, sort = False):
'''Performs Tukey's all-pairs comparisons test for normally distributed data
with equal group variances. For all-pairs comparisons in an
one-factorial layout with normally distributed residuals and equal variances
Tukey's test can be performed. A total of m = k(k-1)/2 hypotheses can be
tested. The null hypothesis is tested in the two-tailed test against
the alternative hypothesis [1]_, [2]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
The p values are computed from the Tukey-distribution.
References
----------
.. [1] L. Sachs (1997) Angewandte Statistik, New York: Springer.
.. [2] J. Tukey (1949) Comparing Individual Means in the Analysis of Variance,
Biometrics 5, 99-114.
Examples
--------
>>> import scikit_posthocs as sp
>>> import pandas as pd
>>> x = pd.DataFrame({"a": [1,2,3,5,1], "b": [12,31,54,62,12], "c": [10,12,6,74,11]})
>>> x = x.melt(var_name='groups', value_name='values')
>>> sp.posthoc_tukey(x, val_col='values', group_col='groups')
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = np.unique(x[_group_col])
x_grouped = x.groupby(_group_col)[_val_col]
ni = x_grouped.count()
n = ni.sum()
xi = x_grouped.mean()
si = x_grouped.var()
sin = 1. / (n - groups.size) * np.sum(si * (ni - 1))
def compare(i, j):
dif = xi[i] - xi[j]
A = sin * 0.5 * (1. / ni.loc[i] + 1. / ni.loc[j])
q_val = dif / np.sqrt(A)
return q_val
vs = np.zeros((groups.size, groups.size), dtype=np.float)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(groups.size), 2)
for i,j in combs:
vs[i, j] = compare(groups[i], groups[j])
vs[tri_upper] = psturng(np.abs(vs[tri_upper]), groups.size, n - groups.size)
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_dscf(a, val_col=None, group_col=None, sort=False):
'''Dwass, Steel, Critchlow and Fligner all-pairs comparison test for a
one-factorial layout with non-normally distributed residuals. As opposed to
the all-pairs comparison procedures that depend on Kruskal ranks, the DSCF
test is basically an extension of the U-test as re-ranking is conducted for
each pairwise test [1]_, [2]_, [3]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
Pandas DataFrame containing p values.
Notes
-----
The p values are computed from the Tukey-distribution.
References
----------
.. [1] Douglas, C. E., Fligner, A. M. (1991) On distribution-free multiple
comparisons in the one-way analysis of variance, Communications in
Statistics - Theory and Methods, 20, 127-139.
.. [2] Dwass, M. (1960) Some k-sample rank-order tests. In Contributions to
Probability and Statistics, Edited by: I. Olkin, Stanford: Stanford
University Press.
.. [3] Steel, R. G. D. (1960) A rank sum test for comparing all pairs of
treatments, Technometrics, 2, 197-207.
Examples
--------
>>> import scikit_posthocs as sp
>>> import pandas as pd
>>> x = pd.DataFrame({"a": [1,2,3,5,1], "b": [12,31,54,62,12], "c": [10,12,6,74,11]})
>>> x = x.melt(var_name='groups', value_name='values')
>>> sp.posthoc_dscf(x, val_col='values', group_col='groups')
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = np.unique(x[_group_col])
x_grouped = x.groupby(_group_col)[_val_col]
n = x_grouped.count()
k = groups.size
def get_ties(x):
t = x.value_counts().values
c = np.sum((t ** 3 - t) / 12.)
return c
def compare(i, j):
ni = n.loc[i]
nj = n.loc[j]
x_raw = x.loc[(x[_group_col] == i) | (x[_group_col] == j)].copy()
x_raw['ranks'] = x_raw.loc[:, _val_col].rank()
r = x_raw.groupby(_group_col)['ranks'].sum().loc[[i, j]]
u = np.array([nj * ni + (nj * (nj + 1) / 2),
nj * ni + (ni * (ni + 1) / 2)]) - r
u_min = np.min(u)
s = ni + nj
var = (nj*ni/(s*(s - 1.))) * ((s**3 - s)/12. - get_ties(x_raw['ranks']))
p = np.sqrt(2.) * (u_min - nj * ni / 2.) / np.sqrt(var)
return p
vs = np.zeros((k, k))
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(k), 2)
for i, j in combs:
vs[i, j] = compare(groups[i], groups[j])
vs[tri_upper] = psturng(np.abs(vs[tri_upper]), k, np.inf)
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
|
maximtrp/scikit-posthocs | scikit_posthocs/_posthocs.py | posthoc_tamhane | python | def posthoc_tamhane(a, val_col=None, group_col=None, welch=True, sort=False):
'''Tamhane's T2 all-pairs comparison test for normally distributed data with
unequal variances. Tamhane's T2 test can be performed for all-pairs
comparisons in an one-factorial layout with normally distributed residuals
but unequal groups variances. A total of m = k(k-1)/2 hypotheses can be
tested. The null hypothesis is tested in the two-tailed test against the
alternative hypothesis [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
welch : bool, optional
If True, use Welch's approximate solution for calculating the degree of
freedom. T2 test uses the usual df = N - 2 approximation.
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
The p values are computed from the t-distribution and adjusted according to
Dunn-Sidak.
References
----------
.. [1] A.C. Tamhane (1979), A Comparison of Procedures for Multiple Comparisons of
Means with Unequal Variances. Journal of the American Statistical Association,
74, 471-480.
Examples
--------
>>> import scikit_posthocs as sp
>>> import pandas as pd
>>> x = pd.DataFrame({"a": [1,2,3,5,1], "b": [12,31,54,62,12], "c": [10,12,6,74,11]})
>>> x = x.melt(var_name='groups', value_name='values')
>>> sp.posthoc_tamhane(x, val_col='values', group_col='groups')
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = x[_group_col].unique()
x_grouped = x.groupby(_group_col)[_val_col]
ni = x_grouped.count()
n = ni.sum()
xi = x_grouped.mean()
si = x_grouped.var()
sin = 1. / (n - groups.size) * np.sum(si * (ni - 1))
def compare(i, j):
dif = xi[i] - xi[j]
A = si[i] / ni[i] + si[j] / ni[j]
t_val = dif / np.sqrt(A)
if welch:
df = A ** 2. / (si[i] ** 2. / (ni[i] ** 2. * (ni[i] - 1.)) + si[j] ** 2. / (ni[j] ** 2. * (ni[j] - 1.)))
else:
## checks according to Tamhane (1979, p. 474)
ok1 = (9./10. <= ni[i]/ni[j]) and (ni[i]/ni[j] <= 10./9.)
ok2 = (9./10. <= (s2i[i] / ni[i]) / (s2i[j] / ni[j])) and ((s2i[i] / ni[i]) / (s2i[j] / ni[j]) <= 10./9.)
ok3 = (4./5. <= ni[i]/ni[j]) and (ni[i]/ni[j] <= 5./4.) and (1./2. <= (s2i[i] / ni[i]) / (s2i[j] / ni[j])) and ((s2i[i] / ni[i]) / (s2i[j] / ni[j]) <= 2.)
ok4 = (2./3. <= ni[i]/ni[j]) and (ni[i]/ni[j] <= 3./2.) and (3./4. <= (s2i[i] / ni[i]) / (s2i[j] / ni[j])) and ((s2i[i] / ni[i]) / (s2i[j] / ni[j]) <= 4./3.)
OK = any(ok1, ok2, ok3, ok4)
if not OK:
print("Sample sizes or standard errors are not balanced. T2 test is recommended.")
df = ni[i] + ni[j] - 2.
p_val = 2. * ss.t.sf(np.abs(t_val), df=df)
return p_val
vs = np.zeros((groups.size, groups.size), dtype=np.float)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(groups.size), 2)
for i,j in combs:
vs[i, j] = compare(groups[i], groups[j])
vs[tri_upper] = 1. - (1. - vs[tri_upper]) ** groups.size
vs[tri_lower] = vs.T[tri_lower]
vs[vs > 1] = 1
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups) | Tamhane's T2 all-pairs comparison test for normally distributed data with
unequal variances. Tamhane's T2 test can be performed for all-pairs
comparisons in an one-factorial layout with normally distributed residuals
but unequal groups variances. A total of m = k(k-1)/2 hypotheses can be
tested. The null hypothesis is tested in the two-tailed test against the
alternative hypothesis [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
welch : bool, optional
If True, use Welch's approximate solution for calculating the degree of
freedom. T2 test uses the usual df = N - 2 approximation.
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
The p values are computed from the t-distribution and adjusted according to
Dunn-Sidak.
References
----------
.. [1] A.C. Tamhane (1979), A Comparison of Procedures for Multiple Comparisons of
Means with Unequal Variances. Journal of the American Statistical Association,
74, 471-480.
Examples
--------
>>> import scikit_posthocs as sp
>>> import pandas as pd
>>> x = pd.DataFrame({"a": [1,2,3,5,1], "b": [12,31,54,62,12], "c": [10,12,6,74,11]})
>>> x = x.melt(var_name='groups', value_name='values')
>>> sp.posthoc_tamhane(x, val_col='values', group_col='groups') | train | https://github.com/maximtrp/scikit-posthocs/blob/5476b09e2a325cd4e31c0b0bc6906ab5cd77fc5d/scikit_posthocs/_posthocs.py#L2186-L2294 | [
"def __convert_to_df(a, val_col=None, group_col=None, val_id=None, group_id=None):\n\n '''Hidden helper method to create a DataFrame with input data for further\n processing.\n\n Parameters\n ----------\n a : array_like or pandas DataFrame object\n An array, any object exposing the array inter... | # -*- coding: utf-8 -*-
import numpy as np
import scipy.stats as ss
import itertools as it
from statsmodels.sandbox.stats.multicomp import multipletests
from statsmodels.stats.multicomp import pairwise_tukeyhsd
from statsmodels.stats.libqsturng import psturng
from pandas import DataFrame, Categorical, Series
def __convert_to_df(a, val_col=None, group_col=None, val_id=None, group_id=None):
'''Hidden helper method to create a DataFrame with input data for further
processing.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas DataFrame.
Array must be two-dimensional. Second dimension may vary,
i.e. groups may have different lengths.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
val_id : int, optional
Index of a column that contains dependent variable values (test or
response variable). Should be specified if a NumPy ndarray is used as an
input. It will be inferred from data, if not specified.
group_id : int, optional
Index of a column that contains independent variable values (grouping or
predictor variable). Should be specified if a NumPy ndarray is used as
an input. It will be inferred from data, if not specified.
Returns
-------
x : pandas DataFrame
DataFrame with input data, `val_col` column contains numerical values and
`group_col` column contains categorical values.
val_col : str
Name of a DataFrame column that contains dependent variable values (test
or response variable).
group_col : str
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable).
Notes
-----
Inferrence algorithm for determining `val_id` and `group_id` args is rather
simple, so it is better to specify them explicitly to prevent errors.
'''
if not group_col:
group_col = 'groups'
if not val_col:
val_col = 'vals'
if isinstance(a, DataFrame):
x = a.copy()
if not {group_col, val_col}.issubset(a.columns):
raise ValueError('Specify correct column names using `group_col` and `val_col` args')
return x, val_col, group_col
elif isinstance(a, list) or (isinstance(a, np.ndarray) and not a.shape.count(2)):
grps_len = map(len, a)
grps = list(it.chain(*[[i+1] * l for i, l in enumerate(grps_len)]))
vals = list(it.chain(*a))
return DataFrame({val_col: vals, group_col: grps}), val_col, group_col
elif isinstance(a, np.ndarray):
# cols ids not defined
# trying to infer
if not(all([val_id, group_id])):
if np.argmax(a.shape):
a = a.T
ax = [np.unique(a[:, 0]).size, np.unique(a[:, 1]).size]
if np.asscalar(np.diff(ax)):
__val_col = np.argmax(ax)
__group_col = np.argmin(ax)
else:
raise ValueError('Cannot infer input format.\nPlease specify `val_id` and `group_id` args')
cols = {__val_col: val_col,
__group_col: group_col}
else:
cols = {val_id: val_col,
group_id: group_col}
cols_vals = dict(sorted(cols.items())).values()
return DataFrame(a, columns=cols_vals), val_col, group_col
def __convert_to_block_df(a, y_col=None, group_col=None, block_col=None, melted=False):
if isinstance(a, DataFrame) and not melted:
x = a.copy(deep=True)
group_col = 'groups'
block_col = 'blocks'
y_col = 'y'
x.columns.name = group_col
x.index.name = block_col
x = x.reset_index().melt(id_vars=block_col, var_name=group_col, value_name=y_col)
elif melted:
x = DataFrame.from_dict({'groups': a[group_col],
'blocks': a[block_col],
'y': a[y_col]})
elif not isinstance(a, DataFrame):
x = np.array(a)
x = DataFrame(x, index=np.arange(x.shape[0]), columns=np.arange(x.shape[1]))
if not melted:
group_col = 'groups'
block_col = 'blocks'
y_col = 'y'
x.columns.name = group_col
x.index.name = block_col
x = x.reset_index().melt(id_vars=block_col, var_name=group_col, value_name=y_col)
else:
x.columns[group_col] = 'groups'
x.columns[block_col] = 'blocks'
x.columns[y_col] = 'y'
group_col = 'groups'
block_col = 'blocks'
y_col = 'y'
return x, 'y', 'groups', 'blocks'
def posthoc_conover(a, val_col=None, group_col=None, p_adjust=None, sort=True):
'''Post hoc pairwise test for multiple comparisons of mean rank sums
(Conover's test). May be used after Kruskal-Wallis one-way analysis of
variance by ranks to do pairwise comparisons [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas DataFrame.
Array must be two-dimensional. Second dimension may vary,
i.e. groups may have different lengths.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
p_adjust : str, optional
Method for adjusting p values. See `statsmodels.sandbox.stats.multicomp`
for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by `group_col` or not. Recommended
unless you sort your data manually.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
A tie correction are employed according to Conover [1]_.
References
----------
.. [1] W. J. Conover and R. L. Iman (1979), On multiple-comparisons procedures,
Tech. Rep. LA-7677-MS, Los Alamos Scientific Laboratory.
Examples
--------
>>> x = [[1,2,3,5,1], [12,31,54, np.nan], [10,12,6,74,11]]
>>> sp.posthoc_conover(x, p_adjust = 'holm')
'''
def compare_conover(i, j):
diff = np.abs(x_ranks_avg.loc[i] - x_ranks_avg.loc[j])
B = (1. / x_lens.loc[i] + 1. / x_lens.loc[j])
D = (n - 1. - H_cor) / (n - x_len)
t_value = diff / np.sqrt(S2 * B * D)
p_value = 2. * ss.t.sf(np.abs(t_value), df = n - x_len)
return p_value
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col, _val_col], ascending=True, inplace=True)
n = len(x.index)
x_groups_unique = np.unique(x[_group_col])
x_len = x_groups_unique.size
x_lens = x.groupby(_group_col)[_val_col].count()
x['ranks'] = x[_val_col].rank()
x_ranks_avg = x.groupby(_group_col)['ranks'].mean()
x_ranks_sum = x.groupby(_group_col)['ranks'].sum()
# ties
vals = x.groupby('ranks').count()[_val_col].values
tie_sum = np.sum(vals[vals != 1] ** 3 - vals[vals != 1])
tie_sum = 0 if not tie_sum else tie_sum
x_ties = np.min([1., 1. - tie_sum / (n ** 3. - n)])
H = (12. / (n * (n + 1.))) * np.sum(x_ranks_sum**2 / x_lens) - 3. * (n + 1.)
H_cor = H / x_ties
if x_ties == 1:
S2 = n * (n + 1.) / 12.
else:
S2 = (1. / (n - 1.)) * (np.sum(x['ranks'] ** 2.) - (n * (((n + 1.)**2.) / 4.)))
vs = np.zeros((x_len, x_len))
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(x_len), 2)
for i, j in combs:
vs[i, j] = compare_conover(x_groups_unique[i], x_groups_unique[j])
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=x_groups_unique, columns=x_groups_unique)
def posthoc_dunn(a, val_col=None, group_col=None, p_adjust=None, sort=True):
'''Post hoc pairwise test for multiple comparisons of mean rank sums
(Dunn's test). May be used after Kruskal-Wallis one-way analysis of
variance by ranks to do pairwise comparisons [1]_, [2]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas DataFrame.
Array must be two-dimensional. Second dimension may vary,
i.e. groups may have different lengths.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
p_adjust : str, optional
Method for adjusting p values. See `statsmodels.sandbox.stats.multicomp`
for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by group_col or not. Recommended
unless you sort your data manually.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
A tie correction will be employed according to Glantz (2012).
References
----------
.. [1] O.J. Dunn (1964). Multiple comparisons using rank sums.
Technometrics, 6, 241-252.
.. [2] S.A. Glantz (2012), Primer of Biostatistics. New York: McGraw Hill.
Examples
--------
>>> x = [[1,2,3,5,1], [12,31,54, np.nan], [10,12,6,74,11]]
>>> sp.posthoc_dunn(x, p_adjust = 'holm')
'''
def compare_dunn(i, j):
diff = np.abs(x_ranks_avg.loc[i] - x_ranks_avg.loc[j])
A = n * (n + 1.) / 12.
B = (1. / x_lens.loc[i] + 1. / x_lens.loc[j])
z_value = diff / np.sqrt((A - x_ties) * B)
p_value = 2. * ss.norm.sf(np.abs(z_value))
return p_value
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col, _val_col], ascending=True, inplace=True)
n = len(x.index)
x_groups_unique = np.unique(x[_group_col])
x_len = x_groups_unique.size
x_lens = x.groupby(_group_col)[_val_col].count()
x['ranks'] = x[_val_col].rank()
x_ranks_avg = x.groupby(_group_col)['ranks'].mean()
# ties
vals = x.groupby('ranks').count()[_val_col].values
tie_sum = np.sum(vals[vals != 1] ** 3 - vals[vals != 1])
tie_sum = 0 if not tie_sum else tie_sum
x_ties = tie_sum / (12. * (n - 1))
vs = np.zeros((x_len, x_len))
combs = it.combinations(range(x_len), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
for i,j in combs:
vs[i, j] = compare_dunn(x_groups_unique[i], x_groups_unique[j])
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=x_groups_unique, columns=x_groups_unique)
def posthoc_nemenyi(a, val_col=None, group_col=None, dist='chi', sort=True):
'''Post hoc pairwise test for multiple comparisons of mean rank sums
(Nemenyi's test). May be used after Kruskal-Wallis one-way analysis of
variance by ranks to do pairwise comparisons [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame. Array must be two-dimensional. Second dimension may vary,
i.e. groups may have different lengths.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
dist : str, optional
Method for determining the p value. The default distribution is "chi"
(chi-squared), else "tukey" (studentized range).
sort : bool, optional
Specifies whether to sort DataFrame by group_col or not. Recommended
unless you sort your data manually.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
A tie correction will be employed according to Glantz (2012).
References
----------
.. [1] Lothar Sachs (1997), Angewandte Statistik. Berlin: Springer.
Pages: 395-397, 662-664.
Examples
--------
>>> x = [[1,2,3,5,1], [12,31,54, np.nan], [10,12,6,74,11]]
>>> sp.posthoc_nemenyi(x)
'''
def compare_stats_chi(i, j):
diff = np.abs(x_ranks_avg.loc[i] - x_ranks_avg.loc[j])
A = n * (n + 1.) / 12.
B = (1. / x_lens.loc[i] + 1. / x_lens.loc[j])
chi = diff ** 2. / (A * B)
return chi
def compare_stats_tukey(i, j):
diff = np.abs(x_ranks_avg.loc[i] - x_ranks_avg.loc[j])
B = (1. / x_lens.loc[i] + 1. / x_lens.loc[j])
q = diff / np.sqrt((n * (n + 1.) / 12.) * B)
return q
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col, _val_col], ascending=True, inplace=True)
n = len(x.index)
x_groups_unique = np.unique(x[_group_col])
x_len = x_groups_unique.size
x_lens = x.groupby(_group_col)[_val_col].count()
x['ranks'] = x[_val_col].rank()
x_ranks_avg = x.groupby(_group_col)['ranks'].mean()
# ties
vals = x.groupby('ranks').count()[_val_col].values
tie_sum = np.sum(vals[vals != 1] ** 3 - vals[vals != 1])
tie_sum = 0 if not tie_sum else tie_sum
x_ties = np.min([1., 1. - tie_sum / (n ** 3. - n)])
vs = np.zeros((x_len, x_len))
combs = it.combinations(range(x_len), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
if dist == 'chi':
for i,j in combs:
vs[i, j] = compare_stats_chi(x_groups_unique[i], x_groups_unique[j]) / x_ties
vs[tri_upper] = ss.chi2.sf(vs[tri_upper], x_len - 1)
elif dist == 'tukey':
for i,j in combs:
vs[i, j] = compare_stats_tukey(x_groups_unique[i], x_groups_unique[j]) * np.sqrt(2.)
vs[tri_upper] = psturng(vs[tri_upper], x_len, np.inf)
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=x_groups_unique, columns=x_groups_unique)
def posthoc_nemenyi_friedman(a, y_col=None, block_col=None, group_col=None, melted=False, sort=False):
'''Calculate pairwise comparisons using Nemenyi post hoc test for
unreplicated blocked data. This test is usually conducted post hoc if
significant results of the Friedman's test are obtained. The statistics
refer to upper quantiles of the studentized range distribution (Tukey) [1]_,
[2]_, [3]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of
block design, i.e. rows are blocks, and columns are groups. In this
case you do not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (strings).
y_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
A one-way ANOVA with repeated measures that is also referred to as ANOVA
with unreplicated block design can also be conducted via Friedman's
test. The consequent post hoc pairwise multiple comparison test
according to Nemenyi is conducted with this function.
This function does not test for ties.
References
----------
.. [1] J. Demsar (2006), Statistical comparisons of classifiers over
multiple data sets, Journal of Machine Learning Research, 7, 1-30.
.. [2] P. Nemenyi (1963) Distribution-free Multiple Comparisons. Ph.D.
thesis, Princeton University.
.. [3] L. Sachs (1997), Angewandte Statistik. Berlin: Springer.
Pages: 668-675.
Examples
--------
>>> # Non-melted case, x is a block design matrix, i.e. rows are blocks
>>> # and columns are groups.
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_nemenyi_friedman(x)
'''
if melted and not all([block_col, group_col, y_col]):
raise ValueError('block_col, group_col, y_col should be explicitly specified if using melted data')
def compare_stats(i, j):
dif = np.abs(R[groups[i]] - R[groups[j]])
qval = dif / np.sqrt(k * (k + 1.) / (6. * n))
return qval
x, _y_col, _group_col, _block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
#if not sort:
# x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
# x[block_col] = Categorical(x[block_col], categories=x[block_col].unique(), ordered=True)
x.sort_values(by=[_group_col, _block_col], ascending=True, inplace=True)
x.dropna(inplace=True)
groups = x[_group_col].unique()
k = groups.size
n = x[_block_col].unique().size
x['mat'] = x.groupby(_block_col)[_y_col].rank()
R = x.groupby(_group_col)['mat'].mean()
vs = np.zeros((k, k))
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
for i, j in combs:
vs[i, j] = compare_stats(i, j)
vs *= np.sqrt(2.)
vs[tri_upper] = psturng(vs[tri_upper], k, np.inf)
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_conover_friedman(a, y_col=None, block_col=None, group_col=None, melted=False, sort=False, p_adjust=None):
'''Calculate pairwise comparisons using Conover post hoc test for unreplicated
blocked data. This test is usually conducted post hoc after
significant results of the Friedman test. The statistics refer to
the Student t distribution [1]_, [2]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of
block design, i.e. rows are blocks, and columns are groups. In this
case you do not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (strings).
y_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values. See statsmodels.sandbox.stats.multicomp
for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
'single-step' : uses Tukey distribution for multiple comparisons
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
A one-way ANOVA with repeated measures that is also referred to as ANOVA
with unreplicated block design can also be conducted via the
friedman.test. The consequent post hoc pairwise multiple comparison test
according to Conover is conducted with this function.
If y is a matrix, than the columns refer to the treatment and the rows
indicate the block.
References
----------
.. [1] W. J. Conover and R. L. Iman (1979), On multiple-comparisons procedures,
Tech. Rep. LA-7677-MS, Los Alamos Scientific Laboratory.
.. [2] W. J. Conover (1999), Practical nonparametric Statistics, 3rd. Edition,
Wiley.
Examples
--------
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_conover_friedman(x)
'''
if melted and not all([block_col, group_col, y_col]):
raise ValueError('block_col, group_col, y_col should be explicitly specified if using melted data')
def compare_stats(i, j):
dif = np.abs(R.loc[groups[i]] - R.loc[groups[j]])
tval = dif / np.sqrt(A) / np.sqrt(B)
pval = 2. * ss.t.sf(np.abs(tval), df = (m*n*k - k - n + 1))
return pval
def compare_tukey(i, j):
dif = np.abs(R.loc[groups[i]] - R.loc[groups[j]])
qval = np.sqrt(2.) * dif / (np.sqrt(A) * np.sqrt(B))
pval = psturng(qval, k, np.inf)
return pval
x, _y_col, _group_col, _block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
#if not sort:
# x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
# x[block_col] = Categorical(x[block_col], categories=x[block_col].unique(), ordered=True)
x.sort_values(by=[_group_col,_block_col], ascending=True, inplace=True)
x.dropna(inplace=True)
groups = np.unique(x[_group_col])
k = groups.size
n = x[_block_col].unique().size
x['mat'] = x.groupby(_block_col)[_y_col].rank()
R = x.groupby(_group_col)['mat'].sum()
A1 = (x['mat'] ** 2).sum()
m = 1
S2 = m/(m*k - 1.) * (A1 - m*k*n*(m*k + 1.)**2./4.)
T2 = 1 / S2 * (np.sum(R) - n * m * ((m * k + 1.) / 2.)**2.)
A = S2 * (2. * n * (m * k - 1.)) / ( m * n * k - k - n + 1.)
B = 1. - T2 / (n * (m * k - 1.))
vs = np.zeros((k, k))
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
if p_adjust == 'single-step':
for i, j in combs:
vs[i, j] = compare_tukey(i, j)
else:
for i, j in combs:
vs[i, j] = compare_stats(i, j)
if p_adjust is not None:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_npm_test(a, val_col=None, group_col=None, sort=False, p_adjust=None):
'''Calculate pairwise comparisons using Nashimoto and Wright's all-pairs
comparison procedure (NPM test) for simply ordered mean ranksums.
NPM test is basically an extension of Nemenyi's procedure for testing
increasingly ordered alternatives [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values. See `statsmodels.sandbox.stats.multicomp`
for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
The p values are estimated from the studentized range distribution. If
the medians are already increasingly ordered, than the NPM-test
simplifies to the ordinary Nemenyi test
References
----------
.. [1] Nashimoto, K., Wright, F.T., (2005), Multiple comparison procedures for
detecting differences in simply ordered means. Comput. Statist. Data
Anal. 48, 291--306.
Examples
--------
>>> x = np.array([[102,109,114,120,124],
[110,112,123,130,145],
[132,141,156,160,172]])
>>> sp.posthoc_npm_test(x)
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
#if not sort:
# x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = np.unique(x[_group_col])
x['ranks'] = x[_val_col].rank()
Ri = x.groupby(_group_col)['ranks'].mean()
ni = x.groupby(_group_col)[_val_col].count()
k = groups.size
n = x.shape[0]
sigma = np.sqrt(n * (n + 1) / 12.)
df = np.inf
def compare(m, u):
a = [(Ri.loc[groups[u]]-Ri.loc[groups[_mi]])/(sigma/np.sqrt(2)*np.sqrt(1./ni.loc[groups[_mi]] + 1./ni.loc[groups[u]])) for _mi in m]
return np.array(a)
stat = np.zeros((k, k))
for i in range(k-1):
for j in range(i+1, k):
u = j
m = np.arange(i, u)
tmp = compare(m, u)
stat[j, i] = np.max(tmp)
stat[stat < 0] = 0
p_values = psturng(stat, k, np.inf)
tri_upper = np.triu_indices(p_values.shape[0], 1)
tri_lower = np.tril_indices(p_values.shape[0], -1)
p_values[tri_lower] = p_values.T[tri_lower]
#if p_adjust:
# p_values[tri_upper] = multipletests(p_values[tri_upper], method = p_adjust)[1]
np.fill_diagonal(p_values, -1)
return DataFrame(p_values, index=groups, columns=groups)
def posthoc_siegel_friedman(a, y_col=None, block_col=None, group_col=None, melted=False, sort=False, p_adjust=None):
'''Siegel and Castellan's All-Pairs Comparisons Test for Unreplicated Blocked
Data. See authors' paper for additional information [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of
block design, i.e. rows are blocks, and columns are groups. In this
case you do not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (strings).
y_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values. See statsmodels.sandbox.stats.multicomp for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
For all-pairs comparisons in a two factorial unreplicated complete block design
with non-normally distributed residuals, Siegel and Castellan's test can be
performed on Friedman-type ranked data.
References
----------
.. [1] S. Siegel, N. J. Castellan Jr. (1988), Nonparametric Statistics for the
Behavioral Sciences. 2nd ed. New York: McGraw-Hill.
Examples
--------
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_siegel_friedman(x)
'''
if melted and not all([block_col, group_col, y_col]):
raise ValueError('block_col, group_col, y_col should be explicitly specified if using melted data')
def compare_stats(i, j):
dif = np.abs(R[groups[i]] - R[groups[j]])
zval = dif / np.sqrt(k * (k + 1.) / (6. * n))
return zval
x, y_col, group_col, block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
#if not sort:
# x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
# x[block_col] = Categorical(x[block_col], categories=x[block_col].unique(), ordered=True)
x.sort_values(by=[group_col,block_col], ascending=True, inplace=True)
x.dropna(inplace=True)
groups = x[group_col].unique()
k = groups.size
n = x[block_col].unique().size
x['mat'] = x.groupby(block_col)[y_col].rank()
R = x.groupby(group_col)['mat'].mean()
vs = np.zeros((k, k), dtype=np.float)
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
for i, j in combs:
vs[i, j] = compare_stats(i, j)
vs = 2. * ss.norm.sf(np.abs(vs))
vs[vs > 1] = 1.
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_miller_friedman(a, y_col=None, block_col=None, group_col=None, melted=False, sort=False):
'''Miller's All-Pairs Comparisons Test for Unreplicated Blocked Data.
The p-values are computed from the chi-square distribution [1]_, [2]_,
[3]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of
block design, i.e. rows are blocks, and columns are groups. In this
case you do not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (strings).
y_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
Pandas DataFrame containing p values.
Notes
-----
For all-pairs comparisons in a two factorial unreplicated complete block
design with non-normally distributed residuals, Miller's test can be
performed on Friedman-type ranked data.
References
----------
.. [1] J. Bortz J, G. A. Lienert, K. Boehnke (1990), Verteilungsfreie
Methoden in der Biostatistik. Berlin: Springerself.
.. [2] R. G. Miller Jr. (1996), Simultaneous statistical inference. New
York: McGraw-Hill.
.. [3] E. L. Wike (2006), Data Analysis. A Statistical Primer for Psychology
Students. New Brunswick: Aldine Transaction.
Examples
--------
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_miller_friedman(x)
'''
if melted and not all([block_col, group_col, y_col]):
raise ValueError('block_col, group_col, y_col should be explicitly specified if using melted data')
def compare_stats(i, j):
dif = np.abs(R[groups[i]] - R[groups[j]])
qval = dif / np.sqrt(k * (k + 1.) / (6. * n))
return qval
x, y_col, group_col, block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
#if not sort:
# x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
# x[block_col] = Categorical(x[block_col], categories=x[block_col].unique(), ordered=True)
x.sort_values(by=[group_col,block_col], ascending=True, inplace=True)
x.dropna(inplace=True)
groups = x[group_col].unique()
k = groups.size
n = x[block_col].unique().size
x['mat'] = x.groupby(block_col)[y_col].rank()
R = x.groupby(group_col)['mat'].mean()
vs = np.zeros((k, k), dtype=np.float)
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
for i, j in combs:
vs[i, j] = compare_stats(i, j)
vs = vs ** 2
vs = ss.chi2.sf(vs, k - 1)
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_durbin(a, y_col=None, block_col=None, group_col=None, melted=False, sort=False, p_adjust=None):
'''Pairwise post hoc test for multiple comparisons of rank sums according to
Durbin and Conover for a two-way balanced incomplete block design (BIBD). See
references for additional information [1]_, [2]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of block design,
i.e. rows are blocks, and columns are groups. In this case you do
not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (string).
y_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values. See statsmodels.sandbox.stats.multicomp
for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
Pandas DataFrame containing p values.
References
----------
.. [1] W. J. Conover and R. L. Iman (1979), On multiple-comparisons procedures,
Tech. Rep. LA-7677-MS, Los Alamos Scientific Laboratory.
.. [2] W. J. Conover (1999), Practical nonparametric Statistics,
3rd. edition, Wiley.
Examples
--------
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_durbin(x)
'''
if melted and not all([block_col, group_col, y_col]):
raise ValueError('block_col, group_col, y_col should be explicitly specified if using melted data')
def compare_stats(i, j):
dif = np.abs(Rj[groups[i]] - Rj[groups[j]])
tval = dif / denom
pval = 2. * ss.t.sf(np.abs(tval), df = df)
return pval
x, y_col, group_col, block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
if not sort:
x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
x[block_col] = Categorical(x[block_col], categories=x[block_col].unique(), ordered=True)
x.sort_values(by=[block_col, group_col], ascending=True, inplace=True)
x.dropna(inplace=True)
groups = x[group_col].unique()
t = len(groups)
b = x[block_col].unique().size
r = b
k = t
x['y_ranked'] = x.groupby(block_col)[y_col].rank()
Rj = x.groupby(group_col)['y_ranked'].sum()
A = (x['y_ranked'] ** 2).sum()
C = (b * k * (k + 1) ** 2) / 4.
D = (Rj ** 2).sum() - r * C
T1 = (t - 1) / (A - C) * D
denom = np.sqrt(((A - C) * 2 * r) / (b * k - b - t + 1) * (1 - T1 / (b * (k -1))))
df = b * k - b - t + 1
vs = np.zeros((t, t), dtype=np.float)
combs = it.combinations(range(t), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
for i, j in combs:
vs[i, j] = compare_stats(i, j)
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_anderson(a, val_col=None, group_col=None, midrank=True, sort=False, p_adjust=None):
'''Anderson-Darling Pairwise Test for k-samples. Tests the null hypothesis
that k-samples are drawn from the same population without having to specify
the distribution function of that population [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
midrank : bool, optional
Type of Anderson-Darling test which is computed. If set to True (default), the
midrank test applicable to continuous and discrete populations is performed. If
False, the right side empirical distribution is used.
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values. See statsmodels.sandbox.stats.multicomp for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
result : pandas DataFrame
P values.
References
----------
.. [1] F.W. Scholz, M.A. Stephens (1987), K-Sample Anderson-Darling Tests,
Journal of the American Statistical Association, Vol. 82, pp. 918-924.
Examples
--------
>>> x = np.array([[2.9, 3.0, 2.5, 2.6, 3.2], [3.8, 2.7, 4.0, 2.4], [2.8, 3.4, 3.7, 2.2, 2.0]])
>>> sp.posthoc_anderson(x)
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = x[_group_col].unique()
k = groups.size
vs = np.zeros((k, k), dtype=np.float)
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
for i, j in combs:
vs[i, j] = ss.anderson_ksamp([x.loc[x[_group_col] == groups[i], _val_col], x.loc[x[_group_col] == groups[j], _val_col]])[2]
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_quade(a, y_col=None, block_col=None, group_col=None, dist='t', melted=False, sort=False, p_adjust=None):
'''Calculate pairwise comparisons using Quade's post hoc test for
unreplicated blocked data. This test is usually conducted if significant
results were obtained by the omnibus test [1]_, [2]_, [3]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of
block design, i.e. rows are blocks, and columns are groups. In this
case you do not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (string).
y_col : str or int, optional
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
dist : str, optional
Method for determining p values.
The default distribution is "t", else "normal".
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
Pandas DataFrame containing p values.
References
----------
.. [1] W. J. Conover (1999), Practical nonparametric Statistics, 3rd. Edition,
Wiley.
.. [2] N. A. Heckert and J. J. Filliben (2003). NIST Handbook 148: Dataplot
Reference Manual, Volume 2: Let Subcommands and Library Functions.
National Institute of Standards and Technology Handbook Series, June 2003.
.. [3] D. Quade (1979), Using weighted rankings in the analysis of complete
blocks with additive block effects. Journal of the American Statistical
Association, 74, 680-683.
Examples
--------
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_quade(x)
'''
if melted and not all([block_col, group_col, y_col]):
raise ValueError('block_col, group_col, y_col should be explicitly specified if using melted data')
def compare_stats_t(i, j):
dif = np.abs(S[groups[i]] - S[groups[j]])
tval = dif / denom
pval = 2. * ss.t.sf(np.abs(tval), df = (b - 1) * (k - 1))
return pval
def compare_stats_norm(i, j):
dif = np.abs(W[groups[i]] * ff - W[groups[j]] * ff)
zval = dif / denom
pval = 2. * ss.norm.sf(np.abs(zval))
return pval
x, y_col, group_col, block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
if not sort:
x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
x[block_col] = Categorical(x[block_col], categories=x[block_col].unique(), ordered=True)
x.sort_values(by=[block_col, group_col], ascending=True, inplace=True)
x.dropna(inplace=True)
groups = x[group_col].unique()
k = len(groups)
b = x[block_col].unique().size
x['r'] = x.groupby(block_col)[y_col].rank()
q = (x.groupby(block_col)[y_col].max() - x.groupby(block_col)[y_col].min()).rank()
x['rr'] = x['r'] - (k + 1)/2
x['s'] = x.apply(lambda x, y: x['rr'] * y[x['blocks']], axis=1, args=(q,))
x['w'] = x.apply(lambda x, y: x['r'] * y[x['blocks']], axis=1, args=(q,))
A = (x['s'] ** 2).sum()
S = x.groupby(group_col)['s'].sum()
B = np.sum(S ** 2) / b
W = x.groupby(group_col)['w'].sum()
vs = np.zeros((k, k), dtype=np.float)
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
if dist == 't':
denom = np.sqrt((2 * b * (A - B)) / ((b - 1) * (k - 1)))
for i, j in combs:
vs[i, j] = compare_stats_t(i, j)
else:
n = b * k
denom = np.sqrt((k * (k + 1) * (2 * n + 1) * (k-1)) / (18 * n * (n + 1)))
ff = 1. / (b * (b + 1)/2)
for i, j in combs:
vs[i, j] = compare_stats_norm(i, j)
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_mackwolfe(a, val_col, group_col, p=None, n_perm=100, sort=False, p_adjust=None):
'''Mack-Wolfe Test for Umbrella Alternatives.
In dose-finding studies one may assume an increasing treatment effect with
increasing dose level. However, the test subject may actually succumb to
toxic effects at high doses, which leads to decresing treatment
effects [1]_, [2]_.
The scope of the Mack-Wolfe Test is to test for umbrella alternatives for
either a known or unknown point P (i.e. dose-level), where the peak
(umbrella point) is present.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
p : int, optional
The a-priori known peak as an ordinal number of the treatment group
including the zero dose level, i.e. p = {1, ..., k}. Defaults to None.
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
p : float
P value.
stat : float
Statistic.
References
----------
.. [1] Chen, I.Y. (1991) Notes on the Mack-Wolfe and Chen-Wolfe Tests for
Umbrella Alternatives. Biom. J., 33, 281-290.
.. [2] Mack, G.A., Wolfe, D. A. (1981) K-sample rank tests for umbrella
alternatives. J. Amer. Statist. Assoc., 76, 175-181.
Examples
--------
>>> x = np.array([[10,'a'], [59,'a'], [76,'b'], [10, 'b']])
>>> sp.posthoc_mackwolfe(x, val_col = 0, group_col = 1)
'''
x = __convert_to_df(a, val_col, group_col)
if not sort:
x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
x.sort_values(by=[group_col], ascending=True, inplace=True)
k = x[group_col].unique().size
if p:
if p > k:
print("Selected 'p' > number of groups:", str(p), " > ", str(k))
return False
elif p < 1:
print("Selected 'p' < 1: ", str(p))
return False
Rij = x[val_col].rank()
n = x.groupby(group_col)[val_col].count()
def _fn(Ri, Rj):
return np.sum(Ri.apply(lambda x: Rj[Rj > x].size))
def _ustat(Rij, g, k):
levels = np.unique(g)
U = np.identity(k)
for i in range(k):
for j in range(i):
U[i,j] = _fn(Rij[x[group_col] == levels[i]], Rij[x[group_col] == levels[j]])
U[j,i] = _fn(Rij[x[group_col] == levels[j]], Rij[x[group_col] == levels[i]])
return U
def _ap(p, U):
tmp1 = 0
if p > 0:
for i in range(p):
for j in range(i+1, p+1):
tmp1 += U[i,j]
tmp2 = 0
if p < k:
for i in range(p, k):
for j in range(i+1, k):
tmp2 += U[j,i]
return tmp1 + tmp2
def _n1(p, n):
return np.sum(n[:p+1])
def _n2(p, n):
return np.sum(n[p:k])
def _mean_at(p, n):
N1 = _n1(p, n)
N2 = _n2(p, n)
return (N1**2 + N2**2 - np.sum(n**2) - n.iloc[p]**2)/4
def _var_at(p, n):
N1 = _n1(p, n)
N2 = _n2(p, n)
N = np.sum(n)
var = (2 * (N1**3 + N2**3) + 3 * (N1**2 + N2**2) -\
np.sum(n**2 * (2*n + 3)) - n.iloc[p]**2 * (2 * n.iloc[p] + 3) +\
12. * n.iloc[p] * N1 * N2 - 12. * n.iloc[p] ** 2 * N) / 72.
return var
if p:
if (x.groupby(val_col).count() > 1).any().any():
print("Ties are present")
U = _ustat(Rij, x[group_col], k)
est = _ap(p, U)
mean = _mean_at(p, n)
sd = np.sqrt(_var_at(p, n))
stat = (est - mean)/sd
p_value = ss.norm.sf(stat)
else:
U = _ustat(Rij, x[group_col], k)
Ap = np.array([_ap(i, U) for i in range(k)]).ravel()
mean = np.array([_mean_at(i, n) for i in range(k)]).ravel()
var = np.array([_var_at(i, n) for i in range(k)]).ravel()
A = (Ap - mean) / np.sqrt(var)
stat = np.max(A)
p = A == stat
est = None
mt = []
for i in range(n_perm):
ix = Series(np.random.permutation(Rij))
Uix = _ustat(ix, x[group_col], k)
Apix = np.array([_ap(i, Uix) for i in range(k)])
Astarix = (Apix - mean) / np.sqrt(var)
mt.append(np.max(Astarix))
mt = np.array(mt)
p_value = mt[mt > stat] / n_perm
return p_value, stat
def posthoc_vanwaerden(a, val_col=None, group_col=None, sort=True, p_adjust=None):
'''Van der Waerden's test for pairwise multiple comparisons between group
levels. See references for additional information [1]_, [2]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
For one-factorial designs with samples that do not meet the assumptions for
one-way-ANOVA and subsequent post hoc tests, the van der Waerden test using
normal scores can be employed. Provided that significant differences were
detected by this global test, one may be interested in applying post hoc
tests according to van der Waerden for pairwise multiple comparisons of the
group levels.
There is no tie correction applied in this function.
References
----------
.. [1] W. J. Conover and R. L. Iman (1979), On multiple-comparisons procedures,
Tech. Rep. LA-7677-MS, Los Alamos Scientific Laboratory.
.. [2] B. L. van der Waerden (1952) Order tests for the two-sample problem and
their power, Indagationes Mathematicae, 14, 453-458.
Examples
--------
>>> x = np.array([[10,'a'], [59,'a'], [76,'b'], [10, 'b']])
>>> sp.posthoc_vanwaerden(x, val_col = 0, group_col = 1)
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = x[_group_col].unique()
n = x[_val_col].size
k = groups.size
r = ss.rankdata(x[_val_col])
x['z_scores'] = ss.norm.ppf(r / (n + 1))
aj = x.groupby(_group_col)['z_scores'].sum()
nj = x.groupby(_group_col)['z_scores'].count()
s2 = (1. / (n - 1.)) * (x['z_scores'] ** 2.).sum()
sts = (1. / s2) * np.sum(aj ** 2. / nj)
param = k - 1
A = aj / nj
vs = np.zeros((k, k), dtype=np.float)
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
def compare_stats(i, j):
dif = np.abs(A[groups[i]] - A[groups[j]])
B = 1. / nj[groups[i]] + 1. / nj[groups[j]]
tval = dif / np.sqrt(s2 * (n - 1. - sts)/(n - k) * B)
pval = 2. * ss.t.sf(np.abs(tval), df = n - k)
return pval
for i, j in combs:
vs[i, j] = compare_stats(i, j)
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_ttest(a, val_col=None, group_col=None, pool_sd=False, equal_var=True, p_adjust=None, sort=True):
'''Pairwise T test for multiple comparisons of independent groups. May be
used after a parametric ANOVA to do pairwise comparisons.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame. Array must be two-dimensional.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
equal_var : bool, optional
If True (default), perform a standard independent test
that assumes equal population variances [1]_.
If False, perform Welch's t-test, which does not assume equal
population variance [2]_.
pool_sd : bool, optional
Calculate a common SD for all groups and use that for all
comparisons (this can be useful if some groups are small).
This method does not actually call scipy ttest_ind() function,
so extra arguments are ignored. Default is False.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by group_col or not. Recommended
unless you sort your data manually.
Returns
-------
Numpy ndarray or pandas DataFrame of p values depending on input
data type.
References
----------
.. [1] http://en.wikipedia.org/wiki/T-test#Independent_two-sample_t-test
.. [2] http://en.wikipedia.org/wiki/Welch%27s_t_test
Examples
--------
>>> x = [[1,2,3,5,1], [12,31,54, np.nan], [10,12,6,74,11]]
>>> sp.posthoc_ttest(x, p_adjust = 'holm')
array([[-1. , 0.04600899, 0.31269089],
[ 0.04600899, -1. , 0.6327077 ],
[ 0.31269089, 0.6327077 , -1. ]])
'''
if isinstance(a, DataFrame):
x = a.copy()
if not sort:
x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
x.sort_values(by=[group_col, val_col], ascending=True, inplace=True)
x_lens = x.groupby(by=group_col)[val_col].count().values
x_lens_cumsum = np.insert(np.cumsum(x_lens), 0, 0)[:-1]
x_grouped = np.array([x[val_col][j:(j + x_lens[i])] for i, j in enumerate(x_lens_cumsum)])
#x_grouped = [x.loc[v, val_col].values.tolist() for g, v in x.groupby(group_col, sort=False).groups.items()]
else:
x = np.array(a)
x_grouped = np.array([np.asarray(a)[~np.isnan(a)] for a in x])
x_lens = np.asarray([len(a) for a in x_grouped])
x_lens_cumsum = np.insert(np.cumsum(x_lens), 0, 0)[:-1]
if any(x_lens == 0):
raise ValueError("All groups must contain data")
x_len = len(x_grouped)
vs = np.zeros((x_len, x_len), dtype=np.float)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
def compare_pooled(i, j):
diff = x_means[i] - x_means[j]
se_diff = pooled_sd * np.sqrt(1 / x_lens[i] + 1 / x_lens[j])
t_value = diff / se_diff
return 2 * ss.t.cdf(-np.abs(t_value), x_totaldegf)
combs = it.combinations(range(x_len), 2)
if pool_sd:
x_means = np.asarray([np.mean(xi) for xi in x_grouped])
x_sd = np.asarray([np.std(xi, ddof=1) for xi in x_grouped])
x_degf = x_lens - 1
x_totaldegf = np.sum(x_degf)
pooled_sd = np.sqrt(np.sum(x_sd ** 2 * x_degf) / x_totaldegf)
for i, j in combs:
vs[i, j] = compare_pooled(i, j)
else:
for i,j in combs:
vs[i, j] = ss.ttest_ind(x_grouped[i], x_grouped[j], equal_var=equal_var)[1]
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
if isinstance(x, DataFrame):
groups_unique = x[group_col].unique()
return DataFrame(vs, index=groups_unique, columns=groups_unique)
else:
return vs
def posthoc_tukey_hsd(x, g, alpha=0.05):
'''Pairwise comparisons with TukeyHSD confidence intervals. This is a
convenience function to make statsmodels `pairwise_tukeyhsd` method more
applicable for further use.
Parameters
----------
x : array_like or pandas Series object, 1d
An array, any object exposing the array interface, containing dependent
variable values (test or response variable). Values should have a
non-nominal scale. NaN values will cause an error (please handle
manually).
g : array_like or pandas Series object, 1d
An array, any object exposing the array interface, containing
independent variable values (grouping or predictor variable). Values
should have a nominal scale (categorical).
alpha : float, optional
Significance level for the test. Default is 0.05.
Returns
-------
result : pandas DataFrame
DataFrame with 0, 1, and -1 values, where 0 is False (not significant),
1 is True (significant), and -1 is for diagonal elements.
Examples
--------
>>> x = [[1,2,3,4,5], [35,31,75,40,21], [10,6,9,6,1]]
>>> g = [['a'] * 5, ['b'] * 5, ['c'] * 5]
>>> sp.posthoc_tukey_hsd(np.concatenate(x), np.concatenate(g))
'''
result = pairwise_tukeyhsd(x, g, alpha=0.05)
groups = np.array(result.groupsunique, dtype=np.str)
groups_len = len(groups)
vs = np.zeros((groups_len, groups_len), dtype=np.int)
for a in result.summary()[1:]:
a0 = str(a[0])
a1 = str(a[1])
a0i = np.where(groups == a0)[0][0]
a1i = np.where(groups == a1)[0][0]
vs[a0i, a1i] = 1 if str(a[5]) == 'True' else 0
vs = np.triu(vs)
np.fill_diagonal(vs, -1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[tri_lower] = vs.T[tri_lower]
return DataFrame(vs, index=groups, columns=groups)
def posthoc_mannwhitney(a, val_col=None, group_col=None, use_continuity=True, alternative='two-sided', p_adjust=None, sort=True):
'''Pairwise comparisons with Mann-Whitney rank test.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame. Array must be two-dimensional.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
use_continuity : bool, optional
Whether a continuity correction (1/2.) should be taken into account.
Default is True.
alternative : ['two-sided', 'less', or 'greater'], optional
Whether to get the p-value for the one-sided hypothesis
('less' or 'greater') or for the two-sided hypothesis ('two-sided').
Defaults to 'two-sided'.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by group_col or not. Recommended
unless you sort your data manually.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
Refer to `scipy.stats.mannwhitneyu` reference page for further details.
Examples
--------
>>> x = [[1,2,3,4,5], [35,31,75,40,21], [10,6,9,6,1]]
>>> sp.posthoc_mannwhitney(x, p_adjust = 'holm')
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col, _val_col], ascending=True, inplace=True)
groups = np.unique(x[_group_col])
x_len = groups.size
vs = np.zeros((x_len, x_len))
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(x_len), 2)
for i,j in combs:
vs[i, j] = ss.mannwhitneyu(x.loc[x[_group_col] == groups[i], _val_col],
x.loc[x[_group_col] == groups[j], _val_col],
use_continuity=use_continuity,
alternative=alternative)[1]
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_wilcoxon(a, val_col=None, group_col=None, zero_method='wilcox', correction=False, p_adjust=None, sort=False):
'''Pairwise comparisons with Wilcoxon signed-rank test. It is a non-parametric
version of the paired T-test for use with non-parametric ANOVA.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame. Array must be two-dimensional.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
zero_method : string, {"pratt", "wilcox", "zsplit"}, optional
"pratt": Pratt treatment, includes zero-differences in the ranking
process (more conservative)
"wilcox": Wilcox treatment, discards all zero-differences
"zsplit": Zero rank split, just like Pratt, but spliting the zero rank
between positive and negative ones
correction : bool, optional
If True, apply continuity correction by adjusting the Wilcoxon rank
statistic by 0.5 towards the mean value when computing the z-statistic.
Default is False.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by group_col and val_col or not.
Default is False.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
Refer to `scipy.stats.wilcoxon` reference page for further details.
Examples
--------
>>> x = [[1,2,3,4,5], [35,31,75,40,21], [10,6,9,6,1]]
>>> sp.posthoc_wilcoxon(x)
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
#x.sort_values(by=[_group_col, _val_col], ascending=True, inplace=True)
groups = np.unique(x[_group_col])
x_len = groups.size
vs = np.zeros((x_len, x_len))
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(x_len), 2)
for i,j in combs:
vs[i, j] = ss.wilcoxon(x.loc[x[_group_col] == groups[i], _val_col],
x.loc[x[_group_col] == groups[j], _val_col],
zero_method=zero_method, correction=correction)[1]
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method=p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_scheffe(a, val_col=None, group_col=None, sort=False, p_adjust=None):
'''Scheffe's all-pairs comparisons test for normally distributed data with equal
group variances. For all-pairs comparisons in an one-factorial layout with
normally distributed residuals and equal variances Scheffe's test can be
performed with parametric ANOVA [1]_, [2]_, [3]_.
A total of m = k(k-1)/2 hypotheses can be tested.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
The p values are computed from the F-distribution.
References
----------
.. [1] J. Bortz (1993) Statistik für Sozialwissenschaftler. 4. Aufl., Berlin:
Springer.
.. [2] L. Sachs (1997) Angewandte Statistik, New York: Springer.
.. [3] H. Scheffe (1953) A Method for Judging all Contrasts in the Analysis
of Variance. Biometrika 40, 87-110.
Examples
--------
>>> import scikit_posthocs as sp
>>> import pandas as pd
>>> x = pd.DataFrame({"a": [1,2,3,5,1], "b": [12,31,54,62,12], "c": [10,12,6,74,11]})
>>> x = x.melt(var_name='groups', value_name='values')
>>> sp.posthoc_scheffe(x, val_col='values', group_col='groups')
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = np.unique(x[_group_col])
x_grouped = x.groupby(_group_col)[_val_col]
ni = x_grouped.count()
n = ni.sum()
xi = x_grouped.mean()
si = x_grouped.var()
sin = 1. / (n - groups.size) * np.sum(si * (ni - 1.))
def compare(i, j):
dif = xi.loc[i] - xi.loc[j]
A = sin * (1. / ni[i] + 1. / ni[j]) * (groups.size - 1.)
f_val = dif ** 2. / A
return f_val
vs = np.zeros((groups.size, groups.size), dtype=np.float)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(groups.size), 2)
for i,j in combs:
vs[i, j] = compare(groups[i], groups[j])
vs[tri_lower] = vs.T[tri_lower]
p_values = ss.f.sf(vs, groups.size - 1., n - groups.size)
np.fill_diagonal(p_values, -1)
return DataFrame(p_values, index=groups, columns=groups)
def posthoc_tukey(a, val_col = None, group_col = None, sort = False):
'''Performs Tukey's all-pairs comparisons test for normally distributed data
with equal group variances. For all-pairs comparisons in an
one-factorial layout with normally distributed residuals and equal variances
Tukey's test can be performed. A total of m = k(k-1)/2 hypotheses can be
tested. The null hypothesis is tested in the two-tailed test against
the alternative hypothesis [1]_, [2]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
The p values are computed from the Tukey-distribution.
References
----------
.. [1] L. Sachs (1997) Angewandte Statistik, New York: Springer.
.. [2] J. Tukey (1949) Comparing Individual Means in the Analysis of Variance,
Biometrics 5, 99-114.
Examples
--------
>>> import scikit_posthocs as sp
>>> import pandas as pd
>>> x = pd.DataFrame({"a": [1,2,3,5,1], "b": [12,31,54,62,12], "c": [10,12,6,74,11]})
>>> x = x.melt(var_name='groups', value_name='values')
>>> sp.posthoc_tukey(x, val_col='values', group_col='groups')
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = np.unique(x[_group_col])
x_grouped = x.groupby(_group_col)[_val_col]
ni = x_grouped.count()
n = ni.sum()
xi = x_grouped.mean()
si = x_grouped.var()
sin = 1. / (n - groups.size) * np.sum(si * (ni - 1))
def compare(i, j):
dif = xi[i] - xi[j]
A = sin * 0.5 * (1. / ni.loc[i] + 1. / ni.loc[j])
q_val = dif / np.sqrt(A)
return q_val
vs = np.zeros((groups.size, groups.size), dtype=np.float)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(groups.size), 2)
for i,j in combs:
vs[i, j] = compare(groups[i], groups[j])
vs[tri_upper] = psturng(np.abs(vs[tri_upper]), groups.size, n - groups.size)
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_dscf(a, val_col=None, group_col=None, sort=False):
'''Dwass, Steel, Critchlow and Fligner all-pairs comparison test for a
one-factorial layout with non-normally distributed residuals. As opposed to
the all-pairs comparison procedures that depend on Kruskal ranks, the DSCF
test is basically an extension of the U-test as re-ranking is conducted for
each pairwise test [1]_, [2]_, [3]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
Pandas DataFrame containing p values.
Notes
-----
The p values are computed from the Tukey-distribution.
References
----------
.. [1] Douglas, C. E., Fligner, A. M. (1991) On distribution-free multiple
comparisons in the one-way analysis of variance, Communications in
Statistics - Theory and Methods, 20, 127-139.
.. [2] Dwass, M. (1960) Some k-sample rank-order tests. In Contributions to
Probability and Statistics, Edited by: I. Olkin, Stanford: Stanford
University Press.
.. [3] Steel, R. G. D. (1960) A rank sum test for comparing all pairs of
treatments, Technometrics, 2, 197-207.
Examples
--------
>>> import scikit_posthocs as sp
>>> import pandas as pd
>>> x = pd.DataFrame({"a": [1,2,3,5,1], "b": [12,31,54,62,12], "c": [10,12,6,74,11]})
>>> x = x.melt(var_name='groups', value_name='values')
>>> sp.posthoc_dscf(x, val_col='values', group_col='groups')
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = np.unique(x[_group_col])
x_grouped = x.groupby(_group_col)[_val_col]
n = x_grouped.count()
k = groups.size
def get_ties(x):
t = x.value_counts().values
c = np.sum((t ** 3 - t) / 12.)
return c
def compare(i, j):
ni = n.loc[i]
nj = n.loc[j]
x_raw = x.loc[(x[_group_col] == i) | (x[_group_col] == j)].copy()
x_raw['ranks'] = x_raw.loc[:, _val_col].rank()
r = x_raw.groupby(_group_col)['ranks'].sum().loc[[i, j]]
u = np.array([nj * ni + (nj * (nj + 1) / 2),
nj * ni + (ni * (ni + 1) / 2)]) - r
u_min = np.min(u)
s = ni + nj
var = (nj*ni/(s*(s - 1.))) * ((s**3 - s)/12. - get_ties(x_raw['ranks']))
p = np.sqrt(2.) * (u_min - nj * ni / 2.) / np.sqrt(var)
return p
vs = np.zeros((k, k))
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(k), 2)
for i, j in combs:
vs[i, j] = compare(groups[i], groups[j])
vs[tri_upper] = psturng(np.abs(vs[tri_upper]), k, np.inf)
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
|
maximtrp/scikit-posthocs | scikit_posthocs/_posthocs.py | posthoc_tukey | python | def posthoc_tukey(a, val_col = None, group_col = None, sort = False):
'''Performs Tukey's all-pairs comparisons test for normally distributed data
with equal group variances. For all-pairs comparisons in an
one-factorial layout with normally distributed residuals and equal variances
Tukey's test can be performed. A total of m = k(k-1)/2 hypotheses can be
tested. The null hypothesis is tested in the two-tailed test against
the alternative hypothesis [1]_, [2]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
The p values are computed from the Tukey-distribution.
References
----------
.. [1] L. Sachs (1997) Angewandte Statistik, New York: Springer.
.. [2] J. Tukey (1949) Comparing Individual Means in the Analysis of Variance,
Biometrics 5, 99-114.
Examples
--------
>>> import scikit_posthocs as sp
>>> import pandas as pd
>>> x = pd.DataFrame({"a": [1,2,3,5,1], "b": [12,31,54,62,12], "c": [10,12,6,74,11]})
>>> x = x.melt(var_name='groups', value_name='values')
>>> sp.posthoc_tukey(x, val_col='values', group_col='groups')
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = np.unique(x[_group_col])
x_grouped = x.groupby(_group_col)[_val_col]
ni = x_grouped.count()
n = ni.sum()
xi = x_grouped.mean()
si = x_grouped.var()
sin = 1. / (n - groups.size) * np.sum(si * (ni - 1))
def compare(i, j):
dif = xi[i] - xi[j]
A = sin * 0.5 * (1. / ni.loc[i] + 1. / ni.loc[j])
q_val = dif / np.sqrt(A)
return q_val
vs = np.zeros((groups.size, groups.size), dtype=np.float)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(groups.size), 2)
for i,j in combs:
vs[i, j] = compare(groups[i], groups[j])
vs[tri_upper] = psturng(np.abs(vs[tri_upper]), groups.size, n - groups.size)
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups) | Performs Tukey's all-pairs comparisons test for normally distributed data
with equal group variances. For all-pairs comparisons in an
one-factorial layout with normally distributed residuals and equal variances
Tukey's test can be performed. A total of m = k(k-1)/2 hypotheses can be
tested. The null hypothesis is tested in the two-tailed test against
the alternative hypothesis [1]_, [2]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
The p values are computed from the Tukey-distribution.
References
----------
.. [1] L. Sachs (1997) Angewandte Statistik, New York: Springer.
.. [2] J. Tukey (1949) Comparing Individual Means in the Analysis of Variance,
Biometrics 5, 99-114.
Examples
--------
>>> import scikit_posthocs as sp
>>> import pandas as pd
>>> x = pd.DataFrame({"a": [1,2,3,5,1], "b": [12,31,54,62,12], "c": [10,12,6,74,11]})
>>> x = x.melt(var_name='groups', value_name='values')
>>> sp.posthoc_tukey(x, val_col='values', group_col='groups') | train | https://github.com/maximtrp/scikit-posthocs/blob/5476b09e2a325cd4e31c0b0bc6906ab5cd77fc5d/scikit_posthocs/_posthocs.py#L2297-L2385 | [
"def __convert_to_df(a, val_col=None, group_col=None, val_id=None, group_id=None):\n\n '''Hidden helper method to create a DataFrame with input data for further\n processing.\n\n Parameters\n ----------\n a : array_like or pandas DataFrame object\n An array, any object exposing the array inter... | # -*- coding: utf-8 -*-
import numpy as np
import scipy.stats as ss
import itertools as it
from statsmodels.sandbox.stats.multicomp import multipletests
from statsmodels.stats.multicomp import pairwise_tukeyhsd
from statsmodels.stats.libqsturng import psturng
from pandas import DataFrame, Categorical, Series
def __convert_to_df(a, val_col=None, group_col=None, val_id=None, group_id=None):
'''Hidden helper method to create a DataFrame with input data for further
processing.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas DataFrame.
Array must be two-dimensional. Second dimension may vary,
i.e. groups may have different lengths.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
val_id : int, optional
Index of a column that contains dependent variable values (test or
response variable). Should be specified if a NumPy ndarray is used as an
input. It will be inferred from data, if not specified.
group_id : int, optional
Index of a column that contains independent variable values (grouping or
predictor variable). Should be specified if a NumPy ndarray is used as
an input. It will be inferred from data, if not specified.
Returns
-------
x : pandas DataFrame
DataFrame with input data, `val_col` column contains numerical values and
`group_col` column contains categorical values.
val_col : str
Name of a DataFrame column that contains dependent variable values (test
or response variable).
group_col : str
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable).
Notes
-----
Inferrence algorithm for determining `val_id` and `group_id` args is rather
simple, so it is better to specify them explicitly to prevent errors.
'''
if not group_col:
group_col = 'groups'
if not val_col:
val_col = 'vals'
if isinstance(a, DataFrame):
x = a.copy()
if not {group_col, val_col}.issubset(a.columns):
raise ValueError('Specify correct column names using `group_col` and `val_col` args')
return x, val_col, group_col
elif isinstance(a, list) or (isinstance(a, np.ndarray) and not a.shape.count(2)):
grps_len = map(len, a)
grps = list(it.chain(*[[i+1] * l for i, l in enumerate(grps_len)]))
vals = list(it.chain(*a))
return DataFrame({val_col: vals, group_col: grps}), val_col, group_col
elif isinstance(a, np.ndarray):
# cols ids not defined
# trying to infer
if not(all([val_id, group_id])):
if np.argmax(a.shape):
a = a.T
ax = [np.unique(a[:, 0]).size, np.unique(a[:, 1]).size]
if np.asscalar(np.diff(ax)):
__val_col = np.argmax(ax)
__group_col = np.argmin(ax)
else:
raise ValueError('Cannot infer input format.\nPlease specify `val_id` and `group_id` args')
cols = {__val_col: val_col,
__group_col: group_col}
else:
cols = {val_id: val_col,
group_id: group_col}
cols_vals = dict(sorted(cols.items())).values()
return DataFrame(a, columns=cols_vals), val_col, group_col
def __convert_to_block_df(a, y_col=None, group_col=None, block_col=None, melted=False):
if isinstance(a, DataFrame) and not melted:
x = a.copy(deep=True)
group_col = 'groups'
block_col = 'blocks'
y_col = 'y'
x.columns.name = group_col
x.index.name = block_col
x = x.reset_index().melt(id_vars=block_col, var_name=group_col, value_name=y_col)
elif melted:
x = DataFrame.from_dict({'groups': a[group_col],
'blocks': a[block_col],
'y': a[y_col]})
elif not isinstance(a, DataFrame):
x = np.array(a)
x = DataFrame(x, index=np.arange(x.shape[0]), columns=np.arange(x.shape[1]))
if not melted:
group_col = 'groups'
block_col = 'blocks'
y_col = 'y'
x.columns.name = group_col
x.index.name = block_col
x = x.reset_index().melt(id_vars=block_col, var_name=group_col, value_name=y_col)
else:
x.columns[group_col] = 'groups'
x.columns[block_col] = 'blocks'
x.columns[y_col] = 'y'
group_col = 'groups'
block_col = 'blocks'
y_col = 'y'
return x, 'y', 'groups', 'blocks'
def posthoc_conover(a, val_col=None, group_col=None, p_adjust=None, sort=True):
'''Post hoc pairwise test for multiple comparisons of mean rank sums
(Conover's test). May be used after Kruskal-Wallis one-way analysis of
variance by ranks to do pairwise comparisons [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas DataFrame.
Array must be two-dimensional. Second dimension may vary,
i.e. groups may have different lengths.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
p_adjust : str, optional
Method for adjusting p values. See `statsmodels.sandbox.stats.multicomp`
for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by `group_col` or not. Recommended
unless you sort your data manually.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
A tie correction are employed according to Conover [1]_.
References
----------
.. [1] W. J. Conover and R. L. Iman (1979), On multiple-comparisons procedures,
Tech. Rep. LA-7677-MS, Los Alamos Scientific Laboratory.
Examples
--------
>>> x = [[1,2,3,5,1], [12,31,54, np.nan], [10,12,6,74,11]]
>>> sp.posthoc_conover(x, p_adjust = 'holm')
'''
def compare_conover(i, j):
diff = np.abs(x_ranks_avg.loc[i] - x_ranks_avg.loc[j])
B = (1. / x_lens.loc[i] + 1. / x_lens.loc[j])
D = (n - 1. - H_cor) / (n - x_len)
t_value = diff / np.sqrt(S2 * B * D)
p_value = 2. * ss.t.sf(np.abs(t_value), df = n - x_len)
return p_value
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col, _val_col], ascending=True, inplace=True)
n = len(x.index)
x_groups_unique = np.unique(x[_group_col])
x_len = x_groups_unique.size
x_lens = x.groupby(_group_col)[_val_col].count()
x['ranks'] = x[_val_col].rank()
x_ranks_avg = x.groupby(_group_col)['ranks'].mean()
x_ranks_sum = x.groupby(_group_col)['ranks'].sum()
# ties
vals = x.groupby('ranks').count()[_val_col].values
tie_sum = np.sum(vals[vals != 1] ** 3 - vals[vals != 1])
tie_sum = 0 if not tie_sum else tie_sum
x_ties = np.min([1., 1. - tie_sum / (n ** 3. - n)])
H = (12. / (n * (n + 1.))) * np.sum(x_ranks_sum**2 / x_lens) - 3. * (n + 1.)
H_cor = H / x_ties
if x_ties == 1:
S2 = n * (n + 1.) / 12.
else:
S2 = (1. / (n - 1.)) * (np.sum(x['ranks'] ** 2.) - (n * (((n + 1.)**2.) / 4.)))
vs = np.zeros((x_len, x_len))
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(x_len), 2)
for i, j in combs:
vs[i, j] = compare_conover(x_groups_unique[i], x_groups_unique[j])
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=x_groups_unique, columns=x_groups_unique)
def posthoc_dunn(a, val_col=None, group_col=None, p_adjust=None, sort=True):
'''Post hoc pairwise test for multiple comparisons of mean rank sums
(Dunn's test). May be used after Kruskal-Wallis one-way analysis of
variance by ranks to do pairwise comparisons [1]_, [2]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas DataFrame.
Array must be two-dimensional. Second dimension may vary,
i.e. groups may have different lengths.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
p_adjust : str, optional
Method for adjusting p values. See `statsmodels.sandbox.stats.multicomp`
for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by group_col or not. Recommended
unless you sort your data manually.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
A tie correction will be employed according to Glantz (2012).
References
----------
.. [1] O.J. Dunn (1964). Multiple comparisons using rank sums.
Technometrics, 6, 241-252.
.. [2] S.A. Glantz (2012), Primer of Biostatistics. New York: McGraw Hill.
Examples
--------
>>> x = [[1,2,3,5,1], [12,31,54, np.nan], [10,12,6,74,11]]
>>> sp.posthoc_dunn(x, p_adjust = 'holm')
'''
def compare_dunn(i, j):
diff = np.abs(x_ranks_avg.loc[i] - x_ranks_avg.loc[j])
A = n * (n + 1.) / 12.
B = (1. / x_lens.loc[i] + 1. / x_lens.loc[j])
z_value = diff / np.sqrt((A - x_ties) * B)
p_value = 2. * ss.norm.sf(np.abs(z_value))
return p_value
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col, _val_col], ascending=True, inplace=True)
n = len(x.index)
x_groups_unique = np.unique(x[_group_col])
x_len = x_groups_unique.size
x_lens = x.groupby(_group_col)[_val_col].count()
x['ranks'] = x[_val_col].rank()
x_ranks_avg = x.groupby(_group_col)['ranks'].mean()
# ties
vals = x.groupby('ranks').count()[_val_col].values
tie_sum = np.sum(vals[vals != 1] ** 3 - vals[vals != 1])
tie_sum = 0 if not tie_sum else tie_sum
x_ties = tie_sum / (12. * (n - 1))
vs = np.zeros((x_len, x_len))
combs = it.combinations(range(x_len), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
for i,j in combs:
vs[i, j] = compare_dunn(x_groups_unique[i], x_groups_unique[j])
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=x_groups_unique, columns=x_groups_unique)
def posthoc_nemenyi(a, val_col=None, group_col=None, dist='chi', sort=True):
'''Post hoc pairwise test for multiple comparisons of mean rank sums
(Nemenyi's test). May be used after Kruskal-Wallis one-way analysis of
variance by ranks to do pairwise comparisons [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame. Array must be two-dimensional. Second dimension may vary,
i.e. groups may have different lengths.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
dist : str, optional
Method for determining the p value. The default distribution is "chi"
(chi-squared), else "tukey" (studentized range).
sort : bool, optional
Specifies whether to sort DataFrame by group_col or not. Recommended
unless you sort your data manually.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
A tie correction will be employed according to Glantz (2012).
References
----------
.. [1] Lothar Sachs (1997), Angewandte Statistik. Berlin: Springer.
Pages: 395-397, 662-664.
Examples
--------
>>> x = [[1,2,3,5,1], [12,31,54, np.nan], [10,12,6,74,11]]
>>> sp.posthoc_nemenyi(x)
'''
def compare_stats_chi(i, j):
diff = np.abs(x_ranks_avg.loc[i] - x_ranks_avg.loc[j])
A = n * (n + 1.) / 12.
B = (1. / x_lens.loc[i] + 1. / x_lens.loc[j])
chi = diff ** 2. / (A * B)
return chi
def compare_stats_tukey(i, j):
diff = np.abs(x_ranks_avg.loc[i] - x_ranks_avg.loc[j])
B = (1. / x_lens.loc[i] + 1. / x_lens.loc[j])
q = diff / np.sqrt((n * (n + 1.) / 12.) * B)
return q
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col, _val_col], ascending=True, inplace=True)
n = len(x.index)
x_groups_unique = np.unique(x[_group_col])
x_len = x_groups_unique.size
x_lens = x.groupby(_group_col)[_val_col].count()
x['ranks'] = x[_val_col].rank()
x_ranks_avg = x.groupby(_group_col)['ranks'].mean()
# ties
vals = x.groupby('ranks').count()[_val_col].values
tie_sum = np.sum(vals[vals != 1] ** 3 - vals[vals != 1])
tie_sum = 0 if not tie_sum else tie_sum
x_ties = np.min([1., 1. - tie_sum / (n ** 3. - n)])
vs = np.zeros((x_len, x_len))
combs = it.combinations(range(x_len), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
if dist == 'chi':
for i,j in combs:
vs[i, j] = compare_stats_chi(x_groups_unique[i], x_groups_unique[j]) / x_ties
vs[tri_upper] = ss.chi2.sf(vs[tri_upper], x_len - 1)
elif dist == 'tukey':
for i,j in combs:
vs[i, j] = compare_stats_tukey(x_groups_unique[i], x_groups_unique[j]) * np.sqrt(2.)
vs[tri_upper] = psturng(vs[tri_upper], x_len, np.inf)
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=x_groups_unique, columns=x_groups_unique)
def posthoc_nemenyi_friedman(a, y_col=None, block_col=None, group_col=None, melted=False, sort=False):
'''Calculate pairwise comparisons using Nemenyi post hoc test for
unreplicated blocked data. This test is usually conducted post hoc if
significant results of the Friedman's test are obtained. The statistics
refer to upper quantiles of the studentized range distribution (Tukey) [1]_,
[2]_, [3]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of
block design, i.e. rows are blocks, and columns are groups. In this
case you do not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (strings).
y_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
A one-way ANOVA with repeated measures that is also referred to as ANOVA
with unreplicated block design can also be conducted via Friedman's
test. The consequent post hoc pairwise multiple comparison test
according to Nemenyi is conducted with this function.
This function does not test for ties.
References
----------
.. [1] J. Demsar (2006), Statistical comparisons of classifiers over
multiple data sets, Journal of Machine Learning Research, 7, 1-30.
.. [2] P. Nemenyi (1963) Distribution-free Multiple Comparisons. Ph.D.
thesis, Princeton University.
.. [3] L. Sachs (1997), Angewandte Statistik. Berlin: Springer.
Pages: 668-675.
Examples
--------
>>> # Non-melted case, x is a block design matrix, i.e. rows are blocks
>>> # and columns are groups.
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_nemenyi_friedman(x)
'''
if melted and not all([block_col, group_col, y_col]):
raise ValueError('block_col, group_col, y_col should be explicitly specified if using melted data')
def compare_stats(i, j):
dif = np.abs(R[groups[i]] - R[groups[j]])
qval = dif / np.sqrt(k * (k + 1.) / (6. * n))
return qval
x, _y_col, _group_col, _block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
#if not sort:
# x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
# x[block_col] = Categorical(x[block_col], categories=x[block_col].unique(), ordered=True)
x.sort_values(by=[_group_col, _block_col], ascending=True, inplace=True)
x.dropna(inplace=True)
groups = x[_group_col].unique()
k = groups.size
n = x[_block_col].unique().size
x['mat'] = x.groupby(_block_col)[_y_col].rank()
R = x.groupby(_group_col)['mat'].mean()
vs = np.zeros((k, k))
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
for i, j in combs:
vs[i, j] = compare_stats(i, j)
vs *= np.sqrt(2.)
vs[tri_upper] = psturng(vs[tri_upper], k, np.inf)
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_conover_friedman(a, y_col=None, block_col=None, group_col=None, melted=False, sort=False, p_adjust=None):
'''Calculate pairwise comparisons using Conover post hoc test for unreplicated
blocked data. This test is usually conducted post hoc after
significant results of the Friedman test. The statistics refer to
the Student t distribution [1]_, [2]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of
block design, i.e. rows are blocks, and columns are groups. In this
case you do not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (strings).
y_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values. See statsmodels.sandbox.stats.multicomp
for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
'single-step' : uses Tukey distribution for multiple comparisons
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
A one-way ANOVA with repeated measures that is also referred to as ANOVA
with unreplicated block design can also be conducted via the
friedman.test. The consequent post hoc pairwise multiple comparison test
according to Conover is conducted with this function.
If y is a matrix, than the columns refer to the treatment and the rows
indicate the block.
References
----------
.. [1] W. J. Conover and R. L. Iman (1979), On multiple-comparisons procedures,
Tech. Rep. LA-7677-MS, Los Alamos Scientific Laboratory.
.. [2] W. J. Conover (1999), Practical nonparametric Statistics, 3rd. Edition,
Wiley.
Examples
--------
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_conover_friedman(x)
'''
if melted and not all([block_col, group_col, y_col]):
raise ValueError('block_col, group_col, y_col should be explicitly specified if using melted data')
def compare_stats(i, j):
dif = np.abs(R.loc[groups[i]] - R.loc[groups[j]])
tval = dif / np.sqrt(A) / np.sqrt(B)
pval = 2. * ss.t.sf(np.abs(tval), df = (m*n*k - k - n + 1))
return pval
def compare_tukey(i, j):
dif = np.abs(R.loc[groups[i]] - R.loc[groups[j]])
qval = np.sqrt(2.) * dif / (np.sqrt(A) * np.sqrt(B))
pval = psturng(qval, k, np.inf)
return pval
x, _y_col, _group_col, _block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
#if not sort:
# x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
# x[block_col] = Categorical(x[block_col], categories=x[block_col].unique(), ordered=True)
x.sort_values(by=[_group_col,_block_col], ascending=True, inplace=True)
x.dropna(inplace=True)
groups = np.unique(x[_group_col])
k = groups.size
n = x[_block_col].unique().size
x['mat'] = x.groupby(_block_col)[_y_col].rank()
R = x.groupby(_group_col)['mat'].sum()
A1 = (x['mat'] ** 2).sum()
m = 1
S2 = m/(m*k - 1.) * (A1 - m*k*n*(m*k + 1.)**2./4.)
T2 = 1 / S2 * (np.sum(R) - n * m * ((m * k + 1.) / 2.)**2.)
A = S2 * (2. * n * (m * k - 1.)) / ( m * n * k - k - n + 1.)
B = 1. - T2 / (n * (m * k - 1.))
vs = np.zeros((k, k))
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
if p_adjust == 'single-step':
for i, j in combs:
vs[i, j] = compare_tukey(i, j)
else:
for i, j in combs:
vs[i, j] = compare_stats(i, j)
if p_adjust is not None:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_npm_test(a, val_col=None, group_col=None, sort=False, p_adjust=None):
'''Calculate pairwise comparisons using Nashimoto and Wright's all-pairs
comparison procedure (NPM test) for simply ordered mean ranksums.
NPM test is basically an extension of Nemenyi's procedure for testing
increasingly ordered alternatives [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values. See `statsmodels.sandbox.stats.multicomp`
for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
The p values are estimated from the studentized range distribution. If
the medians are already increasingly ordered, than the NPM-test
simplifies to the ordinary Nemenyi test
References
----------
.. [1] Nashimoto, K., Wright, F.T., (2005), Multiple comparison procedures for
detecting differences in simply ordered means. Comput. Statist. Data
Anal. 48, 291--306.
Examples
--------
>>> x = np.array([[102,109,114,120,124],
[110,112,123,130,145],
[132,141,156,160,172]])
>>> sp.posthoc_npm_test(x)
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
#if not sort:
# x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = np.unique(x[_group_col])
x['ranks'] = x[_val_col].rank()
Ri = x.groupby(_group_col)['ranks'].mean()
ni = x.groupby(_group_col)[_val_col].count()
k = groups.size
n = x.shape[0]
sigma = np.sqrt(n * (n + 1) / 12.)
df = np.inf
def compare(m, u):
a = [(Ri.loc[groups[u]]-Ri.loc[groups[_mi]])/(sigma/np.sqrt(2)*np.sqrt(1./ni.loc[groups[_mi]] + 1./ni.loc[groups[u]])) for _mi in m]
return np.array(a)
stat = np.zeros((k, k))
for i in range(k-1):
for j in range(i+1, k):
u = j
m = np.arange(i, u)
tmp = compare(m, u)
stat[j, i] = np.max(tmp)
stat[stat < 0] = 0
p_values = psturng(stat, k, np.inf)
tri_upper = np.triu_indices(p_values.shape[0], 1)
tri_lower = np.tril_indices(p_values.shape[0], -1)
p_values[tri_lower] = p_values.T[tri_lower]
#if p_adjust:
# p_values[tri_upper] = multipletests(p_values[tri_upper], method = p_adjust)[1]
np.fill_diagonal(p_values, -1)
return DataFrame(p_values, index=groups, columns=groups)
def posthoc_siegel_friedman(a, y_col=None, block_col=None, group_col=None, melted=False, sort=False, p_adjust=None):
'''Siegel and Castellan's All-Pairs Comparisons Test for Unreplicated Blocked
Data. See authors' paper for additional information [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of
block design, i.e. rows are blocks, and columns are groups. In this
case you do not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (strings).
y_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values. See statsmodels.sandbox.stats.multicomp for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
For all-pairs comparisons in a two factorial unreplicated complete block design
with non-normally distributed residuals, Siegel and Castellan's test can be
performed on Friedman-type ranked data.
References
----------
.. [1] S. Siegel, N. J. Castellan Jr. (1988), Nonparametric Statistics for the
Behavioral Sciences. 2nd ed. New York: McGraw-Hill.
Examples
--------
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_siegel_friedman(x)
'''
if melted and not all([block_col, group_col, y_col]):
raise ValueError('block_col, group_col, y_col should be explicitly specified if using melted data')
def compare_stats(i, j):
dif = np.abs(R[groups[i]] - R[groups[j]])
zval = dif / np.sqrt(k * (k + 1.) / (6. * n))
return zval
x, y_col, group_col, block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
#if not sort:
# x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
# x[block_col] = Categorical(x[block_col], categories=x[block_col].unique(), ordered=True)
x.sort_values(by=[group_col,block_col], ascending=True, inplace=True)
x.dropna(inplace=True)
groups = x[group_col].unique()
k = groups.size
n = x[block_col].unique().size
x['mat'] = x.groupby(block_col)[y_col].rank()
R = x.groupby(group_col)['mat'].mean()
vs = np.zeros((k, k), dtype=np.float)
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
for i, j in combs:
vs[i, j] = compare_stats(i, j)
vs = 2. * ss.norm.sf(np.abs(vs))
vs[vs > 1] = 1.
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_miller_friedman(a, y_col=None, block_col=None, group_col=None, melted=False, sort=False):
'''Miller's All-Pairs Comparisons Test for Unreplicated Blocked Data.
The p-values are computed from the chi-square distribution [1]_, [2]_,
[3]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of
block design, i.e. rows are blocks, and columns are groups. In this
case you do not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (strings).
y_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
Pandas DataFrame containing p values.
Notes
-----
For all-pairs comparisons in a two factorial unreplicated complete block
design with non-normally distributed residuals, Miller's test can be
performed on Friedman-type ranked data.
References
----------
.. [1] J. Bortz J, G. A. Lienert, K. Boehnke (1990), Verteilungsfreie
Methoden in der Biostatistik. Berlin: Springerself.
.. [2] R. G. Miller Jr. (1996), Simultaneous statistical inference. New
York: McGraw-Hill.
.. [3] E. L. Wike (2006), Data Analysis. A Statistical Primer for Psychology
Students. New Brunswick: Aldine Transaction.
Examples
--------
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_miller_friedman(x)
'''
if melted and not all([block_col, group_col, y_col]):
raise ValueError('block_col, group_col, y_col should be explicitly specified if using melted data')
def compare_stats(i, j):
dif = np.abs(R[groups[i]] - R[groups[j]])
qval = dif / np.sqrt(k * (k + 1.) / (6. * n))
return qval
x, y_col, group_col, block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
#if not sort:
# x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
# x[block_col] = Categorical(x[block_col], categories=x[block_col].unique(), ordered=True)
x.sort_values(by=[group_col,block_col], ascending=True, inplace=True)
x.dropna(inplace=True)
groups = x[group_col].unique()
k = groups.size
n = x[block_col].unique().size
x['mat'] = x.groupby(block_col)[y_col].rank()
R = x.groupby(group_col)['mat'].mean()
vs = np.zeros((k, k), dtype=np.float)
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
for i, j in combs:
vs[i, j] = compare_stats(i, j)
vs = vs ** 2
vs = ss.chi2.sf(vs, k - 1)
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_durbin(a, y_col=None, block_col=None, group_col=None, melted=False, sort=False, p_adjust=None):
'''Pairwise post hoc test for multiple comparisons of rank sums according to
Durbin and Conover for a two-way balanced incomplete block design (BIBD). See
references for additional information [1]_, [2]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of block design,
i.e. rows are blocks, and columns are groups. In this case you do
not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (string).
y_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values. See statsmodels.sandbox.stats.multicomp
for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
Pandas DataFrame containing p values.
References
----------
.. [1] W. J. Conover and R. L. Iman (1979), On multiple-comparisons procedures,
Tech. Rep. LA-7677-MS, Los Alamos Scientific Laboratory.
.. [2] W. J. Conover (1999), Practical nonparametric Statistics,
3rd. edition, Wiley.
Examples
--------
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_durbin(x)
'''
if melted and not all([block_col, group_col, y_col]):
raise ValueError('block_col, group_col, y_col should be explicitly specified if using melted data')
def compare_stats(i, j):
dif = np.abs(Rj[groups[i]] - Rj[groups[j]])
tval = dif / denom
pval = 2. * ss.t.sf(np.abs(tval), df = df)
return pval
x, y_col, group_col, block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
if not sort:
x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
x[block_col] = Categorical(x[block_col], categories=x[block_col].unique(), ordered=True)
x.sort_values(by=[block_col, group_col], ascending=True, inplace=True)
x.dropna(inplace=True)
groups = x[group_col].unique()
t = len(groups)
b = x[block_col].unique().size
r = b
k = t
x['y_ranked'] = x.groupby(block_col)[y_col].rank()
Rj = x.groupby(group_col)['y_ranked'].sum()
A = (x['y_ranked'] ** 2).sum()
C = (b * k * (k + 1) ** 2) / 4.
D = (Rj ** 2).sum() - r * C
T1 = (t - 1) / (A - C) * D
denom = np.sqrt(((A - C) * 2 * r) / (b * k - b - t + 1) * (1 - T1 / (b * (k -1))))
df = b * k - b - t + 1
vs = np.zeros((t, t), dtype=np.float)
combs = it.combinations(range(t), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
for i, j in combs:
vs[i, j] = compare_stats(i, j)
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_anderson(a, val_col=None, group_col=None, midrank=True, sort=False, p_adjust=None):
'''Anderson-Darling Pairwise Test for k-samples. Tests the null hypothesis
that k-samples are drawn from the same population without having to specify
the distribution function of that population [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
midrank : bool, optional
Type of Anderson-Darling test which is computed. If set to True (default), the
midrank test applicable to continuous and discrete populations is performed. If
False, the right side empirical distribution is used.
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values. See statsmodels.sandbox.stats.multicomp for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
result : pandas DataFrame
P values.
References
----------
.. [1] F.W. Scholz, M.A. Stephens (1987), K-Sample Anderson-Darling Tests,
Journal of the American Statistical Association, Vol. 82, pp. 918-924.
Examples
--------
>>> x = np.array([[2.9, 3.0, 2.5, 2.6, 3.2], [3.8, 2.7, 4.0, 2.4], [2.8, 3.4, 3.7, 2.2, 2.0]])
>>> sp.posthoc_anderson(x)
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = x[_group_col].unique()
k = groups.size
vs = np.zeros((k, k), dtype=np.float)
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
for i, j in combs:
vs[i, j] = ss.anderson_ksamp([x.loc[x[_group_col] == groups[i], _val_col], x.loc[x[_group_col] == groups[j], _val_col]])[2]
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_quade(a, y_col=None, block_col=None, group_col=None, dist='t', melted=False, sort=False, p_adjust=None):
'''Calculate pairwise comparisons using Quade's post hoc test for
unreplicated blocked data. This test is usually conducted if significant
results were obtained by the omnibus test [1]_, [2]_, [3]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of
block design, i.e. rows are blocks, and columns are groups. In this
case you do not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (string).
y_col : str or int, optional
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
dist : str, optional
Method for determining p values.
The default distribution is "t", else "normal".
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
Pandas DataFrame containing p values.
References
----------
.. [1] W. J. Conover (1999), Practical nonparametric Statistics, 3rd. Edition,
Wiley.
.. [2] N. A. Heckert and J. J. Filliben (2003). NIST Handbook 148: Dataplot
Reference Manual, Volume 2: Let Subcommands and Library Functions.
National Institute of Standards and Technology Handbook Series, June 2003.
.. [3] D. Quade (1979), Using weighted rankings in the analysis of complete
blocks with additive block effects. Journal of the American Statistical
Association, 74, 680-683.
Examples
--------
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_quade(x)
'''
if melted and not all([block_col, group_col, y_col]):
raise ValueError('block_col, group_col, y_col should be explicitly specified if using melted data')
def compare_stats_t(i, j):
dif = np.abs(S[groups[i]] - S[groups[j]])
tval = dif / denom
pval = 2. * ss.t.sf(np.abs(tval), df = (b - 1) * (k - 1))
return pval
def compare_stats_norm(i, j):
dif = np.abs(W[groups[i]] * ff - W[groups[j]] * ff)
zval = dif / denom
pval = 2. * ss.norm.sf(np.abs(zval))
return pval
x, y_col, group_col, block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
if not sort:
x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
x[block_col] = Categorical(x[block_col], categories=x[block_col].unique(), ordered=True)
x.sort_values(by=[block_col, group_col], ascending=True, inplace=True)
x.dropna(inplace=True)
groups = x[group_col].unique()
k = len(groups)
b = x[block_col].unique().size
x['r'] = x.groupby(block_col)[y_col].rank()
q = (x.groupby(block_col)[y_col].max() - x.groupby(block_col)[y_col].min()).rank()
x['rr'] = x['r'] - (k + 1)/2
x['s'] = x.apply(lambda x, y: x['rr'] * y[x['blocks']], axis=1, args=(q,))
x['w'] = x.apply(lambda x, y: x['r'] * y[x['blocks']], axis=1, args=(q,))
A = (x['s'] ** 2).sum()
S = x.groupby(group_col)['s'].sum()
B = np.sum(S ** 2) / b
W = x.groupby(group_col)['w'].sum()
vs = np.zeros((k, k), dtype=np.float)
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
if dist == 't':
denom = np.sqrt((2 * b * (A - B)) / ((b - 1) * (k - 1)))
for i, j in combs:
vs[i, j] = compare_stats_t(i, j)
else:
n = b * k
denom = np.sqrt((k * (k + 1) * (2 * n + 1) * (k-1)) / (18 * n * (n + 1)))
ff = 1. / (b * (b + 1)/2)
for i, j in combs:
vs[i, j] = compare_stats_norm(i, j)
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_mackwolfe(a, val_col, group_col, p=None, n_perm=100, sort=False, p_adjust=None):
'''Mack-Wolfe Test for Umbrella Alternatives.
In dose-finding studies one may assume an increasing treatment effect with
increasing dose level. However, the test subject may actually succumb to
toxic effects at high doses, which leads to decresing treatment
effects [1]_, [2]_.
The scope of the Mack-Wolfe Test is to test for umbrella alternatives for
either a known or unknown point P (i.e. dose-level), where the peak
(umbrella point) is present.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
p : int, optional
The a-priori known peak as an ordinal number of the treatment group
including the zero dose level, i.e. p = {1, ..., k}. Defaults to None.
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
p : float
P value.
stat : float
Statistic.
References
----------
.. [1] Chen, I.Y. (1991) Notes on the Mack-Wolfe and Chen-Wolfe Tests for
Umbrella Alternatives. Biom. J., 33, 281-290.
.. [2] Mack, G.A., Wolfe, D. A. (1981) K-sample rank tests for umbrella
alternatives. J. Amer. Statist. Assoc., 76, 175-181.
Examples
--------
>>> x = np.array([[10,'a'], [59,'a'], [76,'b'], [10, 'b']])
>>> sp.posthoc_mackwolfe(x, val_col = 0, group_col = 1)
'''
x = __convert_to_df(a, val_col, group_col)
if not sort:
x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
x.sort_values(by=[group_col], ascending=True, inplace=True)
k = x[group_col].unique().size
if p:
if p > k:
print("Selected 'p' > number of groups:", str(p), " > ", str(k))
return False
elif p < 1:
print("Selected 'p' < 1: ", str(p))
return False
Rij = x[val_col].rank()
n = x.groupby(group_col)[val_col].count()
def _fn(Ri, Rj):
return np.sum(Ri.apply(lambda x: Rj[Rj > x].size))
def _ustat(Rij, g, k):
levels = np.unique(g)
U = np.identity(k)
for i in range(k):
for j in range(i):
U[i,j] = _fn(Rij[x[group_col] == levels[i]], Rij[x[group_col] == levels[j]])
U[j,i] = _fn(Rij[x[group_col] == levels[j]], Rij[x[group_col] == levels[i]])
return U
def _ap(p, U):
tmp1 = 0
if p > 0:
for i in range(p):
for j in range(i+1, p+1):
tmp1 += U[i,j]
tmp2 = 0
if p < k:
for i in range(p, k):
for j in range(i+1, k):
tmp2 += U[j,i]
return tmp1 + tmp2
def _n1(p, n):
return np.sum(n[:p+1])
def _n2(p, n):
return np.sum(n[p:k])
def _mean_at(p, n):
N1 = _n1(p, n)
N2 = _n2(p, n)
return (N1**2 + N2**2 - np.sum(n**2) - n.iloc[p]**2)/4
def _var_at(p, n):
N1 = _n1(p, n)
N2 = _n2(p, n)
N = np.sum(n)
var = (2 * (N1**3 + N2**3) + 3 * (N1**2 + N2**2) -\
np.sum(n**2 * (2*n + 3)) - n.iloc[p]**2 * (2 * n.iloc[p] + 3) +\
12. * n.iloc[p] * N1 * N2 - 12. * n.iloc[p] ** 2 * N) / 72.
return var
if p:
if (x.groupby(val_col).count() > 1).any().any():
print("Ties are present")
U = _ustat(Rij, x[group_col], k)
est = _ap(p, U)
mean = _mean_at(p, n)
sd = np.sqrt(_var_at(p, n))
stat = (est - mean)/sd
p_value = ss.norm.sf(stat)
else:
U = _ustat(Rij, x[group_col], k)
Ap = np.array([_ap(i, U) for i in range(k)]).ravel()
mean = np.array([_mean_at(i, n) for i in range(k)]).ravel()
var = np.array([_var_at(i, n) for i in range(k)]).ravel()
A = (Ap - mean) / np.sqrt(var)
stat = np.max(A)
p = A == stat
est = None
mt = []
for i in range(n_perm):
ix = Series(np.random.permutation(Rij))
Uix = _ustat(ix, x[group_col], k)
Apix = np.array([_ap(i, Uix) for i in range(k)])
Astarix = (Apix - mean) / np.sqrt(var)
mt.append(np.max(Astarix))
mt = np.array(mt)
p_value = mt[mt > stat] / n_perm
return p_value, stat
def posthoc_vanwaerden(a, val_col=None, group_col=None, sort=True, p_adjust=None):
'''Van der Waerden's test for pairwise multiple comparisons between group
levels. See references for additional information [1]_, [2]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
For one-factorial designs with samples that do not meet the assumptions for
one-way-ANOVA and subsequent post hoc tests, the van der Waerden test using
normal scores can be employed. Provided that significant differences were
detected by this global test, one may be interested in applying post hoc
tests according to van der Waerden for pairwise multiple comparisons of the
group levels.
There is no tie correction applied in this function.
References
----------
.. [1] W. J. Conover and R. L. Iman (1979), On multiple-comparisons procedures,
Tech. Rep. LA-7677-MS, Los Alamos Scientific Laboratory.
.. [2] B. L. van der Waerden (1952) Order tests for the two-sample problem and
their power, Indagationes Mathematicae, 14, 453-458.
Examples
--------
>>> x = np.array([[10,'a'], [59,'a'], [76,'b'], [10, 'b']])
>>> sp.posthoc_vanwaerden(x, val_col = 0, group_col = 1)
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = x[_group_col].unique()
n = x[_val_col].size
k = groups.size
r = ss.rankdata(x[_val_col])
x['z_scores'] = ss.norm.ppf(r / (n + 1))
aj = x.groupby(_group_col)['z_scores'].sum()
nj = x.groupby(_group_col)['z_scores'].count()
s2 = (1. / (n - 1.)) * (x['z_scores'] ** 2.).sum()
sts = (1. / s2) * np.sum(aj ** 2. / nj)
param = k - 1
A = aj / nj
vs = np.zeros((k, k), dtype=np.float)
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
def compare_stats(i, j):
dif = np.abs(A[groups[i]] - A[groups[j]])
B = 1. / nj[groups[i]] + 1. / nj[groups[j]]
tval = dif / np.sqrt(s2 * (n - 1. - sts)/(n - k) * B)
pval = 2. * ss.t.sf(np.abs(tval), df = n - k)
return pval
for i, j in combs:
vs[i, j] = compare_stats(i, j)
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_ttest(a, val_col=None, group_col=None, pool_sd=False, equal_var=True, p_adjust=None, sort=True):
'''Pairwise T test for multiple comparisons of independent groups. May be
used after a parametric ANOVA to do pairwise comparisons.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame. Array must be two-dimensional.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
equal_var : bool, optional
If True (default), perform a standard independent test
that assumes equal population variances [1]_.
If False, perform Welch's t-test, which does not assume equal
population variance [2]_.
pool_sd : bool, optional
Calculate a common SD for all groups and use that for all
comparisons (this can be useful if some groups are small).
This method does not actually call scipy ttest_ind() function,
so extra arguments are ignored. Default is False.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by group_col or not. Recommended
unless you sort your data manually.
Returns
-------
Numpy ndarray or pandas DataFrame of p values depending on input
data type.
References
----------
.. [1] http://en.wikipedia.org/wiki/T-test#Independent_two-sample_t-test
.. [2] http://en.wikipedia.org/wiki/Welch%27s_t_test
Examples
--------
>>> x = [[1,2,3,5,1], [12,31,54, np.nan], [10,12,6,74,11]]
>>> sp.posthoc_ttest(x, p_adjust = 'holm')
array([[-1. , 0.04600899, 0.31269089],
[ 0.04600899, -1. , 0.6327077 ],
[ 0.31269089, 0.6327077 , -1. ]])
'''
if isinstance(a, DataFrame):
x = a.copy()
if not sort:
x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
x.sort_values(by=[group_col, val_col], ascending=True, inplace=True)
x_lens = x.groupby(by=group_col)[val_col].count().values
x_lens_cumsum = np.insert(np.cumsum(x_lens), 0, 0)[:-1]
x_grouped = np.array([x[val_col][j:(j + x_lens[i])] for i, j in enumerate(x_lens_cumsum)])
#x_grouped = [x.loc[v, val_col].values.tolist() for g, v in x.groupby(group_col, sort=False).groups.items()]
else:
x = np.array(a)
x_grouped = np.array([np.asarray(a)[~np.isnan(a)] for a in x])
x_lens = np.asarray([len(a) for a in x_grouped])
x_lens_cumsum = np.insert(np.cumsum(x_lens), 0, 0)[:-1]
if any(x_lens == 0):
raise ValueError("All groups must contain data")
x_len = len(x_grouped)
vs = np.zeros((x_len, x_len), dtype=np.float)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
def compare_pooled(i, j):
diff = x_means[i] - x_means[j]
se_diff = pooled_sd * np.sqrt(1 / x_lens[i] + 1 / x_lens[j])
t_value = diff / se_diff
return 2 * ss.t.cdf(-np.abs(t_value), x_totaldegf)
combs = it.combinations(range(x_len), 2)
if pool_sd:
x_means = np.asarray([np.mean(xi) for xi in x_grouped])
x_sd = np.asarray([np.std(xi, ddof=1) for xi in x_grouped])
x_degf = x_lens - 1
x_totaldegf = np.sum(x_degf)
pooled_sd = np.sqrt(np.sum(x_sd ** 2 * x_degf) / x_totaldegf)
for i, j in combs:
vs[i, j] = compare_pooled(i, j)
else:
for i,j in combs:
vs[i, j] = ss.ttest_ind(x_grouped[i], x_grouped[j], equal_var=equal_var)[1]
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
if isinstance(x, DataFrame):
groups_unique = x[group_col].unique()
return DataFrame(vs, index=groups_unique, columns=groups_unique)
else:
return vs
def posthoc_tukey_hsd(x, g, alpha=0.05):
'''Pairwise comparisons with TukeyHSD confidence intervals. This is a
convenience function to make statsmodels `pairwise_tukeyhsd` method more
applicable for further use.
Parameters
----------
x : array_like or pandas Series object, 1d
An array, any object exposing the array interface, containing dependent
variable values (test or response variable). Values should have a
non-nominal scale. NaN values will cause an error (please handle
manually).
g : array_like or pandas Series object, 1d
An array, any object exposing the array interface, containing
independent variable values (grouping or predictor variable). Values
should have a nominal scale (categorical).
alpha : float, optional
Significance level for the test. Default is 0.05.
Returns
-------
result : pandas DataFrame
DataFrame with 0, 1, and -1 values, where 0 is False (not significant),
1 is True (significant), and -1 is for diagonal elements.
Examples
--------
>>> x = [[1,2,3,4,5], [35,31,75,40,21], [10,6,9,6,1]]
>>> g = [['a'] * 5, ['b'] * 5, ['c'] * 5]
>>> sp.posthoc_tukey_hsd(np.concatenate(x), np.concatenate(g))
'''
result = pairwise_tukeyhsd(x, g, alpha=0.05)
groups = np.array(result.groupsunique, dtype=np.str)
groups_len = len(groups)
vs = np.zeros((groups_len, groups_len), dtype=np.int)
for a in result.summary()[1:]:
a0 = str(a[0])
a1 = str(a[1])
a0i = np.where(groups == a0)[0][0]
a1i = np.where(groups == a1)[0][0]
vs[a0i, a1i] = 1 if str(a[5]) == 'True' else 0
vs = np.triu(vs)
np.fill_diagonal(vs, -1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[tri_lower] = vs.T[tri_lower]
return DataFrame(vs, index=groups, columns=groups)
def posthoc_mannwhitney(a, val_col=None, group_col=None, use_continuity=True, alternative='two-sided', p_adjust=None, sort=True):
'''Pairwise comparisons with Mann-Whitney rank test.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame. Array must be two-dimensional.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
use_continuity : bool, optional
Whether a continuity correction (1/2.) should be taken into account.
Default is True.
alternative : ['two-sided', 'less', or 'greater'], optional
Whether to get the p-value for the one-sided hypothesis
('less' or 'greater') or for the two-sided hypothesis ('two-sided').
Defaults to 'two-sided'.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by group_col or not. Recommended
unless you sort your data manually.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
Refer to `scipy.stats.mannwhitneyu` reference page for further details.
Examples
--------
>>> x = [[1,2,3,4,5], [35,31,75,40,21], [10,6,9,6,1]]
>>> sp.posthoc_mannwhitney(x, p_adjust = 'holm')
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col, _val_col], ascending=True, inplace=True)
groups = np.unique(x[_group_col])
x_len = groups.size
vs = np.zeros((x_len, x_len))
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(x_len), 2)
for i,j in combs:
vs[i, j] = ss.mannwhitneyu(x.loc[x[_group_col] == groups[i], _val_col],
x.loc[x[_group_col] == groups[j], _val_col],
use_continuity=use_continuity,
alternative=alternative)[1]
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_wilcoxon(a, val_col=None, group_col=None, zero_method='wilcox', correction=False, p_adjust=None, sort=False):
'''Pairwise comparisons with Wilcoxon signed-rank test. It is a non-parametric
version of the paired T-test for use with non-parametric ANOVA.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame. Array must be two-dimensional.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
zero_method : string, {"pratt", "wilcox", "zsplit"}, optional
"pratt": Pratt treatment, includes zero-differences in the ranking
process (more conservative)
"wilcox": Wilcox treatment, discards all zero-differences
"zsplit": Zero rank split, just like Pratt, but spliting the zero rank
between positive and negative ones
correction : bool, optional
If True, apply continuity correction by adjusting the Wilcoxon rank
statistic by 0.5 towards the mean value when computing the z-statistic.
Default is False.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by group_col and val_col or not.
Default is False.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
Refer to `scipy.stats.wilcoxon` reference page for further details.
Examples
--------
>>> x = [[1,2,3,4,5], [35,31,75,40,21], [10,6,9,6,1]]
>>> sp.posthoc_wilcoxon(x)
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
#x.sort_values(by=[_group_col, _val_col], ascending=True, inplace=True)
groups = np.unique(x[_group_col])
x_len = groups.size
vs = np.zeros((x_len, x_len))
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(x_len), 2)
for i,j in combs:
vs[i, j] = ss.wilcoxon(x.loc[x[_group_col] == groups[i], _val_col],
x.loc[x[_group_col] == groups[j], _val_col],
zero_method=zero_method, correction=correction)[1]
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method=p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_scheffe(a, val_col=None, group_col=None, sort=False, p_adjust=None):
'''Scheffe's all-pairs comparisons test for normally distributed data with equal
group variances. For all-pairs comparisons in an one-factorial layout with
normally distributed residuals and equal variances Scheffe's test can be
performed with parametric ANOVA [1]_, [2]_, [3]_.
A total of m = k(k-1)/2 hypotheses can be tested.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
The p values are computed from the F-distribution.
References
----------
.. [1] J. Bortz (1993) Statistik für Sozialwissenschaftler. 4. Aufl., Berlin:
Springer.
.. [2] L. Sachs (1997) Angewandte Statistik, New York: Springer.
.. [3] H. Scheffe (1953) A Method for Judging all Contrasts in the Analysis
of Variance. Biometrika 40, 87-110.
Examples
--------
>>> import scikit_posthocs as sp
>>> import pandas as pd
>>> x = pd.DataFrame({"a": [1,2,3,5,1], "b": [12,31,54,62,12], "c": [10,12,6,74,11]})
>>> x = x.melt(var_name='groups', value_name='values')
>>> sp.posthoc_scheffe(x, val_col='values', group_col='groups')
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = np.unique(x[_group_col])
x_grouped = x.groupby(_group_col)[_val_col]
ni = x_grouped.count()
n = ni.sum()
xi = x_grouped.mean()
si = x_grouped.var()
sin = 1. / (n - groups.size) * np.sum(si * (ni - 1.))
def compare(i, j):
dif = xi.loc[i] - xi.loc[j]
A = sin * (1. / ni[i] + 1. / ni[j]) * (groups.size - 1.)
f_val = dif ** 2. / A
return f_val
vs = np.zeros((groups.size, groups.size), dtype=np.float)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(groups.size), 2)
for i,j in combs:
vs[i, j] = compare(groups[i], groups[j])
vs[tri_lower] = vs.T[tri_lower]
p_values = ss.f.sf(vs, groups.size - 1., n - groups.size)
np.fill_diagonal(p_values, -1)
return DataFrame(p_values, index=groups, columns=groups)
def posthoc_tamhane(a, val_col=None, group_col=None, welch=True, sort=False):
'''Tamhane's T2 all-pairs comparison test for normally distributed data with
unequal variances. Tamhane's T2 test can be performed for all-pairs
comparisons in an one-factorial layout with normally distributed residuals
but unequal groups variances. A total of m = k(k-1)/2 hypotheses can be
tested. The null hypothesis is tested in the two-tailed test against the
alternative hypothesis [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
welch : bool, optional
If True, use Welch's approximate solution for calculating the degree of
freedom. T2 test uses the usual df = N - 2 approximation.
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
The p values are computed from the t-distribution and adjusted according to
Dunn-Sidak.
References
----------
.. [1] A.C. Tamhane (1979), A Comparison of Procedures for Multiple Comparisons of
Means with Unequal Variances. Journal of the American Statistical Association,
74, 471-480.
Examples
--------
>>> import scikit_posthocs as sp
>>> import pandas as pd
>>> x = pd.DataFrame({"a": [1,2,3,5,1], "b": [12,31,54,62,12], "c": [10,12,6,74,11]})
>>> x = x.melt(var_name='groups', value_name='values')
>>> sp.posthoc_tamhane(x, val_col='values', group_col='groups')
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = x[_group_col].unique()
x_grouped = x.groupby(_group_col)[_val_col]
ni = x_grouped.count()
n = ni.sum()
xi = x_grouped.mean()
si = x_grouped.var()
sin = 1. / (n - groups.size) * np.sum(si * (ni - 1))
def compare(i, j):
dif = xi[i] - xi[j]
A = si[i] / ni[i] + si[j] / ni[j]
t_val = dif / np.sqrt(A)
if welch:
df = A ** 2. / (si[i] ** 2. / (ni[i] ** 2. * (ni[i] - 1.)) + si[j] ** 2. / (ni[j] ** 2. * (ni[j] - 1.)))
else:
## checks according to Tamhane (1979, p. 474)
ok1 = (9./10. <= ni[i]/ni[j]) and (ni[i]/ni[j] <= 10./9.)
ok2 = (9./10. <= (s2i[i] / ni[i]) / (s2i[j] / ni[j])) and ((s2i[i] / ni[i]) / (s2i[j] / ni[j]) <= 10./9.)
ok3 = (4./5. <= ni[i]/ni[j]) and (ni[i]/ni[j] <= 5./4.) and (1./2. <= (s2i[i] / ni[i]) / (s2i[j] / ni[j])) and ((s2i[i] / ni[i]) / (s2i[j] / ni[j]) <= 2.)
ok4 = (2./3. <= ni[i]/ni[j]) and (ni[i]/ni[j] <= 3./2.) and (3./4. <= (s2i[i] / ni[i]) / (s2i[j] / ni[j])) and ((s2i[i] / ni[i]) / (s2i[j] / ni[j]) <= 4./3.)
OK = any(ok1, ok2, ok3, ok4)
if not OK:
print("Sample sizes or standard errors are not balanced. T2 test is recommended.")
df = ni[i] + ni[j] - 2.
p_val = 2. * ss.t.sf(np.abs(t_val), df=df)
return p_val
vs = np.zeros((groups.size, groups.size), dtype=np.float)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(groups.size), 2)
for i,j in combs:
vs[i, j] = compare(groups[i], groups[j])
vs[tri_upper] = 1. - (1. - vs[tri_upper]) ** groups.size
vs[tri_lower] = vs.T[tri_lower]
vs[vs > 1] = 1
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_dscf(a, val_col=None, group_col=None, sort=False):
'''Dwass, Steel, Critchlow and Fligner all-pairs comparison test for a
one-factorial layout with non-normally distributed residuals. As opposed to
the all-pairs comparison procedures that depend on Kruskal ranks, the DSCF
test is basically an extension of the U-test as re-ranking is conducted for
each pairwise test [1]_, [2]_, [3]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
Pandas DataFrame containing p values.
Notes
-----
The p values are computed from the Tukey-distribution.
References
----------
.. [1] Douglas, C. E., Fligner, A. M. (1991) On distribution-free multiple
comparisons in the one-way analysis of variance, Communications in
Statistics - Theory and Methods, 20, 127-139.
.. [2] Dwass, M. (1960) Some k-sample rank-order tests. In Contributions to
Probability and Statistics, Edited by: I. Olkin, Stanford: Stanford
University Press.
.. [3] Steel, R. G. D. (1960) A rank sum test for comparing all pairs of
treatments, Technometrics, 2, 197-207.
Examples
--------
>>> import scikit_posthocs as sp
>>> import pandas as pd
>>> x = pd.DataFrame({"a": [1,2,3,5,1], "b": [12,31,54,62,12], "c": [10,12,6,74,11]})
>>> x = x.melt(var_name='groups', value_name='values')
>>> sp.posthoc_dscf(x, val_col='values', group_col='groups')
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = np.unique(x[_group_col])
x_grouped = x.groupby(_group_col)[_val_col]
n = x_grouped.count()
k = groups.size
def get_ties(x):
t = x.value_counts().values
c = np.sum((t ** 3 - t) / 12.)
return c
def compare(i, j):
ni = n.loc[i]
nj = n.loc[j]
x_raw = x.loc[(x[_group_col] == i) | (x[_group_col] == j)].copy()
x_raw['ranks'] = x_raw.loc[:, _val_col].rank()
r = x_raw.groupby(_group_col)['ranks'].sum().loc[[i, j]]
u = np.array([nj * ni + (nj * (nj + 1) / 2),
nj * ni + (ni * (ni + 1) / 2)]) - r
u_min = np.min(u)
s = ni + nj
var = (nj*ni/(s*(s - 1.))) * ((s**3 - s)/12. - get_ties(x_raw['ranks']))
p = np.sqrt(2.) * (u_min - nj * ni / 2.) / np.sqrt(var)
return p
vs = np.zeros((k, k))
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(k), 2)
for i, j in combs:
vs[i, j] = compare(groups[i], groups[j])
vs[tri_upper] = psturng(np.abs(vs[tri_upper]), k, np.inf)
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
|
maximtrp/scikit-posthocs | scikit_posthocs/_posthocs.py | posthoc_dscf | python | def posthoc_dscf(a, val_col=None, group_col=None, sort=False):
'''Dwass, Steel, Critchlow and Fligner all-pairs comparison test for a
one-factorial layout with non-normally distributed residuals. As opposed to
the all-pairs comparison procedures that depend on Kruskal ranks, the DSCF
test is basically an extension of the U-test as re-ranking is conducted for
each pairwise test [1]_, [2]_, [3]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
Pandas DataFrame containing p values.
Notes
-----
The p values are computed from the Tukey-distribution.
References
----------
.. [1] Douglas, C. E., Fligner, A. M. (1991) On distribution-free multiple
comparisons in the one-way analysis of variance, Communications in
Statistics - Theory and Methods, 20, 127-139.
.. [2] Dwass, M. (1960) Some k-sample rank-order tests. In Contributions to
Probability and Statistics, Edited by: I. Olkin, Stanford: Stanford
University Press.
.. [3] Steel, R. G. D. (1960) A rank sum test for comparing all pairs of
treatments, Technometrics, 2, 197-207.
Examples
--------
>>> import scikit_posthocs as sp
>>> import pandas as pd
>>> x = pd.DataFrame({"a": [1,2,3,5,1], "b": [12,31,54,62,12], "c": [10,12,6,74,11]})
>>> x = x.melt(var_name='groups', value_name='values')
>>> sp.posthoc_dscf(x, val_col='values', group_col='groups')
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = np.unique(x[_group_col])
x_grouped = x.groupby(_group_col)[_val_col]
n = x_grouped.count()
k = groups.size
def get_ties(x):
t = x.value_counts().values
c = np.sum((t ** 3 - t) / 12.)
return c
def compare(i, j):
ni = n.loc[i]
nj = n.loc[j]
x_raw = x.loc[(x[_group_col] == i) | (x[_group_col] == j)].copy()
x_raw['ranks'] = x_raw.loc[:, _val_col].rank()
r = x_raw.groupby(_group_col)['ranks'].sum().loc[[i, j]]
u = np.array([nj * ni + (nj * (nj + 1) / 2),
nj * ni + (ni * (ni + 1) / 2)]) - r
u_min = np.min(u)
s = ni + nj
var = (nj*ni/(s*(s - 1.))) * ((s**3 - s)/12. - get_ties(x_raw['ranks']))
p = np.sqrt(2.) * (u_min - nj * ni / 2.) / np.sqrt(var)
return p
vs = np.zeros((k, k))
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(k), 2)
for i, j in combs:
vs[i, j] = compare(groups[i], groups[j])
vs[tri_upper] = psturng(np.abs(vs[tri_upper]), k, np.inf)
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups) | Dwass, Steel, Critchlow and Fligner all-pairs comparison test for a
one-factorial layout with non-normally distributed residuals. As opposed to
the all-pairs comparison procedures that depend on Kruskal ranks, the DSCF
test is basically an extension of the U-test as re-ranking is conducted for
each pairwise test [1]_, [2]_, [3]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
Pandas DataFrame containing p values.
Notes
-----
The p values are computed from the Tukey-distribution.
References
----------
.. [1] Douglas, C. E., Fligner, A. M. (1991) On distribution-free multiple
comparisons in the one-way analysis of variance, Communications in
Statistics - Theory and Methods, 20, 127-139.
.. [2] Dwass, M. (1960) Some k-sample rank-order tests. In Contributions to
Probability and Statistics, Edited by: I. Olkin, Stanford: Stanford
University Press.
.. [3] Steel, R. G. D. (1960) A rank sum test for comparing all pairs of
treatments, Technometrics, 2, 197-207.
Examples
--------
>>> import scikit_posthocs as sp
>>> import pandas as pd
>>> x = pd.DataFrame({"a": [1,2,3,5,1], "b": [12,31,54,62,12], "c": [10,12,6,74,11]})
>>> x = x.melt(var_name='groups', value_name='values')
>>> sp.posthoc_dscf(x, val_col='values', group_col='groups') | train | https://github.com/maximtrp/scikit-posthocs/blob/5476b09e2a325cd4e31c0b0bc6906ab5cd77fc5d/scikit_posthocs/_posthocs.py#L2388-L2490 | [
"def __convert_to_df(a, val_col=None, group_col=None, val_id=None, group_id=None):\n\n '''Hidden helper method to create a DataFrame with input data for further\n processing.\n\n Parameters\n ----------\n a : array_like or pandas DataFrame object\n An array, any object exposing the array inter... | # -*- coding: utf-8 -*-
import numpy as np
import scipy.stats as ss
import itertools as it
from statsmodels.sandbox.stats.multicomp import multipletests
from statsmodels.stats.multicomp import pairwise_tukeyhsd
from statsmodels.stats.libqsturng import psturng
from pandas import DataFrame, Categorical, Series
def __convert_to_df(a, val_col=None, group_col=None, val_id=None, group_id=None):
'''Hidden helper method to create a DataFrame with input data for further
processing.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas DataFrame.
Array must be two-dimensional. Second dimension may vary,
i.e. groups may have different lengths.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
val_id : int, optional
Index of a column that contains dependent variable values (test or
response variable). Should be specified if a NumPy ndarray is used as an
input. It will be inferred from data, if not specified.
group_id : int, optional
Index of a column that contains independent variable values (grouping or
predictor variable). Should be specified if a NumPy ndarray is used as
an input. It will be inferred from data, if not specified.
Returns
-------
x : pandas DataFrame
DataFrame with input data, `val_col` column contains numerical values and
`group_col` column contains categorical values.
val_col : str
Name of a DataFrame column that contains dependent variable values (test
or response variable).
group_col : str
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable).
Notes
-----
Inferrence algorithm for determining `val_id` and `group_id` args is rather
simple, so it is better to specify them explicitly to prevent errors.
'''
if not group_col:
group_col = 'groups'
if not val_col:
val_col = 'vals'
if isinstance(a, DataFrame):
x = a.copy()
if not {group_col, val_col}.issubset(a.columns):
raise ValueError('Specify correct column names using `group_col` and `val_col` args')
return x, val_col, group_col
elif isinstance(a, list) or (isinstance(a, np.ndarray) and not a.shape.count(2)):
grps_len = map(len, a)
grps = list(it.chain(*[[i+1] * l for i, l in enumerate(grps_len)]))
vals = list(it.chain(*a))
return DataFrame({val_col: vals, group_col: grps}), val_col, group_col
elif isinstance(a, np.ndarray):
# cols ids not defined
# trying to infer
if not(all([val_id, group_id])):
if np.argmax(a.shape):
a = a.T
ax = [np.unique(a[:, 0]).size, np.unique(a[:, 1]).size]
if np.asscalar(np.diff(ax)):
__val_col = np.argmax(ax)
__group_col = np.argmin(ax)
else:
raise ValueError('Cannot infer input format.\nPlease specify `val_id` and `group_id` args')
cols = {__val_col: val_col,
__group_col: group_col}
else:
cols = {val_id: val_col,
group_id: group_col}
cols_vals = dict(sorted(cols.items())).values()
return DataFrame(a, columns=cols_vals), val_col, group_col
def __convert_to_block_df(a, y_col=None, group_col=None, block_col=None, melted=False):
if isinstance(a, DataFrame) and not melted:
x = a.copy(deep=True)
group_col = 'groups'
block_col = 'blocks'
y_col = 'y'
x.columns.name = group_col
x.index.name = block_col
x = x.reset_index().melt(id_vars=block_col, var_name=group_col, value_name=y_col)
elif melted:
x = DataFrame.from_dict({'groups': a[group_col],
'blocks': a[block_col],
'y': a[y_col]})
elif not isinstance(a, DataFrame):
x = np.array(a)
x = DataFrame(x, index=np.arange(x.shape[0]), columns=np.arange(x.shape[1]))
if not melted:
group_col = 'groups'
block_col = 'blocks'
y_col = 'y'
x.columns.name = group_col
x.index.name = block_col
x = x.reset_index().melt(id_vars=block_col, var_name=group_col, value_name=y_col)
else:
x.columns[group_col] = 'groups'
x.columns[block_col] = 'blocks'
x.columns[y_col] = 'y'
group_col = 'groups'
block_col = 'blocks'
y_col = 'y'
return x, 'y', 'groups', 'blocks'
def posthoc_conover(a, val_col=None, group_col=None, p_adjust=None, sort=True):
'''Post hoc pairwise test for multiple comparisons of mean rank sums
(Conover's test). May be used after Kruskal-Wallis one-way analysis of
variance by ranks to do pairwise comparisons [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas DataFrame.
Array must be two-dimensional. Second dimension may vary,
i.e. groups may have different lengths.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
p_adjust : str, optional
Method for adjusting p values. See `statsmodels.sandbox.stats.multicomp`
for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by `group_col` or not. Recommended
unless you sort your data manually.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
A tie correction are employed according to Conover [1]_.
References
----------
.. [1] W. J. Conover and R. L. Iman (1979), On multiple-comparisons procedures,
Tech. Rep. LA-7677-MS, Los Alamos Scientific Laboratory.
Examples
--------
>>> x = [[1,2,3,5,1], [12,31,54, np.nan], [10,12,6,74,11]]
>>> sp.posthoc_conover(x, p_adjust = 'holm')
'''
def compare_conover(i, j):
diff = np.abs(x_ranks_avg.loc[i] - x_ranks_avg.loc[j])
B = (1. / x_lens.loc[i] + 1. / x_lens.loc[j])
D = (n - 1. - H_cor) / (n - x_len)
t_value = diff / np.sqrt(S2 * B * D)
p_value = 2. * ss.t.sf(np.abs(t_value), df = n - x_len)
return p_value
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col, _val_col], ascending=True, inplace=True)
n = len(x.index)
x_groups_unique = np.unique(x[_group_col])
x_len = x_groups_unique.size
x_lens = x.groupby(_group_col)[_val_col].count()
x['ranks'] = x[_val_col].rank()
x_ranks_avg = x.groupby(_group_col)['ranks'].mean()
x_ranks_sum = x.groupby(_group_col)['ranks'].sum()
# ties
vals = x.groupby('ranks').count()[_val_col].values
tie_sum = np.sum(vals[vals != 1] ** 3 - vals[vals != 1])
tie_sum = 0 if not tie_sum else tie_sum
x_ties = np.min([1., 1. - tie_sum / (n ** 3. - n)])
H = (12. / (n * (n + 1.))) * np.sum(x_ranks_sum**2 / x_lens) - 3. * (n + 1.)
H_cor = H / x_ties
if x_ties == 1:
S2 = n * (n + 1.) / 12.
else:
S2 = (1. / (n - 1.)) * (np.sum(x['ranks'] ** 2.) - (n * (((n + 1.)**2.) / 4.)))
vs = np.zeros((x_len, x_len))
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(x_len), 2)
for i, j in combs:
vs[i, j] = compare_conover(x_groups_unique[i], x_groups_unique[j])
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=x_groups_unique, columns=x_groups_unique)
def posthoc_dunn(a, val_col=None, group_col=None, p_adjust=None, sort=True):
'''Post hoc pairwise test for multiple comparisons of mean rank sums
(Dunn's test). May be used after Kruskal-Wallis one-way analysis of
variance by ranks to do pairwise comparisons [1]_, [2]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas DataFrame.
Array must be two-dimensional. Second dimension may vary,
i.e. groups may have different lengths.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
p_adjust : str, optional
Method for adjusting p values. See `statsmodels.sandbox.stats.multicomp`
for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by group_col or not. Recommended
unless you sort your data manually.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
A tie correction will be employed according to Glantz (2012).
References
----------
.. [1] O.J. Dunn (1964). Multiple comparisons using rank sums.
Technometrics, 6, 241-252.
.. [2] S.A. Glantz (2012), Primer of Biostatistics. New York: McGraw Hill.
Examples
--------
>>> x = [[1,2,3,5,1], [12,31,54, np.nan], [10,12,6,74,11]]
>>> sp.posthoc_dunn(x, p_adjust = 'holm')
'''
def compare_dunn(i, j):
diff = np.abs(x_ranks_avg.loc[i] - x_ranks_avg.loc[j])
A = n * (n + 1.) / 12.
B = (1. / x_lens.loc[i] + 1. / x_lens.loc[j])
z_value = diff / np.sqrt((A - x_ties) * B)
p_value = 2. * ss.norm.sf(np.abs(z_value))
return p_value
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col, _val_col], ascending=True, inplace=True)
n = len(x.index)
x_groups_unique = np.unique(x[_group_col])
x_len = x_groups_unique.size
x_lens = x.groupby(_group_col)[_val_col].count()
x['ranks'] = x[_val_col].rank()
x_ranks_avg = x.groupby(_group_col)['ranks'].mean()
# ties
vals = x.groupby('ranks').count()[_val_col].values
tie_sum = np.sum(vals[vals != 1] ** 3 - vals[vals != 1])
tie_sum = 0 if not tie_sum else tie_sum
x_ties = tie_sum / (12. * (n - 1))
vs = np.zeros((x_len, x_len))
combs = it.combinations(range(x_len), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
for i,j in combs:
vs[i, j] = compare_dunn(x_groups_unique[i], x_groups_unique[j])
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=x_groups_unique, columns=x_groups_unique)
def posthoc_nemenyi(a, val_col=None, group_col=None, dist='chi', sort=True):
'''Post hoc pairwise test for multiple comparisons of mean rank sums
(Nemenyi's test). May be used after Kruskal-Wallis one-way analysis of
variance by ranks to do pairwise comparisons [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame. Array must be two-dimensional. Second dimension may vary,
i.e. groups may have different lengths.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
dist : str, optional
Method for determining the p value. The default distribution is "chi"
(chi-squared), else "tukey" (studentized range).
sort : bool, optional
Specifies whether to sort DataFrame by group_col or not. Recommended
unless you sort your data manually.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
A tie correction will be employed according to Glantz (2012).
References
----------
.. [1] Lothar Sachs (1997), Angewandte Statistik. Berlin: Springer.
Pages: 395-397, 662-664.
Examples
--------
>>> x = [[1,2,3,5,1], [12,31,54, np.nan], [10,12,6,74,11]]
>>> sp.posthoc_nemenyi(x)
'''
def compare_stats_chi(i, j):
diff = np.abs(x_ranks_avg.loc[i] - x_ranks_avg.loc[j])
A = n * (n + 1.) / 12.
B = (1. / x_lens.loc[i] + 1. / x_lens.loc[j])
chi = diff ** 2. / (A * B)
return chi
def compare_stats_tukey(i, j):
diff = np.abs(x_ranks_avg.loc[i] - x_ranks_avg.loc[j])
B = (1. / x_lens.loc[i] + 1. / x_lens.loc[j])
q = diff / np.sqrt((n * (n + 1.) / 12.) * B)
return q
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col, _val_col], ascending=True, inplace=True)
n = len(x.index)
x_groups_unique = np.unique(x[_group_col])
x_len = x_groups_unique.size
x_lens = x.groupby(_group_col)[_val_col].count()
x['ranks'] = x[_val_col].rank()
x_ranks_avg = x.groupby(_group_col)['ranks'].mean()
# ties
vals = x.groupby('ranks').count()[_val_col].values
tie_sum = np.sum(vals[vals != 1] ** 3 - vals[vals != 1])
tie_sum = 0 if not tie_sum else tie_sum
x_ties = np.min([1., 1. - tie_sum / (n ** 3. - n)])
vs = np.zeros((x_len, x_len))
combs = it.combinations(range(x_len), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
if dist == 'chi':
for i,j in combs:
vs[i, j] = compare_stats_chi(x_groups_unique[i], x_groups_unique[j]) / x_ties
vs[tri_upper] = ss.chi2.sf(vs[tri_upper], x_len - 1)
elif dist == 'tukey':
for i,j in combs:
vs[i, j] = compare_stats_tukey(x_groups_unique[i], x_groups_unique[j]) * np.sqrt(2.)
vs[tri_upper] = psturng(vs[tri_upper], x_len, np.inf)
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=x_groups_unique, columns=x_groups_unique)
def posthoc_nemenyi_friedman(a, y_col=None, block_col=None, group_col=None, melted=False, sort=False):
'''Calculate pairwise comparisons using Nemenyi post hoc test for
unreplicated blocked data. This test is usually conducted post hoc if
significant results of the Friedman's test are obtained. The statistics
refer to upper quantiles of the studentized range distribution (Tukey) [1]_,
[2]_, [3]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of
block design, i.e. rows are blocks, and columns are groups. In this
case you do not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (strings).
y_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
A one-way ANOVA with repeated measures that is also referred to as ANOVA
with unreplicated block design can also be conducted via Friedman's
test. The consequent post hoc pairwise multiple comparison test
according to Nemenyi is conducted with this function.
This function does not test for ties.
References
----------
.. [1] J. Demsar (2006), Statistical comparisons of classifiers over
multiple data sets, Journal of Machine Learning Research, 7, 1-30.
.. [2] P. Nemenyi (1963) Distribution-free Multiple Comparisons. Ph.D.
thesis, Princeton University.
.. [3] L. Sachs (1997), Angewandte Statistik. Berlin: Springer.
Pages: 668-675.
Examples
--------
>>> # Non-melted case, x is a block design matrix, i.e. rows are blocks
>>> # and columns are groups.
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_nemenyi_friedman(x)
'''
if melted and not all([block_col, group_col, y_col]):
raise ValueError('block_col, group_col, y_col should be explicitly specified if using melted data')
def compare_stats(i, j):
dif = np.abs(R[groups[i]] - R[groups[j]])
qval = dif / np.sqrt(k * (k + 1.) / (6. * n))
return qval
x, _y_col, _group_col, _block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
#if not sort:
# x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
# x[block_col] = Categorical(x[block_col], categories=x[block_col].unique(), ordered=True)
x.sort_values(by=[_group_col, _block_col], ascending=True, inplace=True)
x.dropna(inplace=True)
groups = x[_group_col].unique()
k = groups.size
n = x[_block_col].unique().size
x['mat'] = x.groupby(_block_col)[_y_col].rank()
R = x.groupby(_group_col)['mat'].mean()
vs = np.zeros((k, k))
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
for i, j in combs:
vs[i, j] = compare_stats(i, j)
vs *= np.sqrt(2.)
vs[tri_upper] = psturng(vs[tri_upper], k, np.inf)
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_conover_friedman(a, y_col=None, block_col=None, group_col=None, melted=False, sort=False, p_adjust=None):
'''Calculate pairwise comparisons using Conover post hoc test for unreplicated
blocked data. This test is usually conducted post hoc after
significant results of the Friedman test. The statistics refer to
the Student t distribution [1]_, [2]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of
block design, i.e. rows are blocks, and columns are groups. In this
case you do not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (strings).
y_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values. See statsmodels.sandbox.stats.multicomp
for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
'single-step' : uses Tukey distribution for multiple comparisons
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
A one-way ANOVA with repeated measures that is also referred to as ANOVA
with unreplicated block design can also be conducted via the
friedman.test. The consequent post hoc pairwise multiple comparison test
according to Conover is conducted with this function.
If y is a matrix, than the columns refer to the treatment and the rows
indicate the block.
References
----------
.. [1] W. J. Conover and R. L. Iman (1979), On multiple-comparisons procedures,
Tech. Rep. LA-7677-MS, Los Alamos Scientific Laboratory.
.. [2] W. J. Conover (1999), Practical nonparametric Statistics, 3rd. Edition,
Wiley.
Examples
--------
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_conover_friedman(x)
'''
if melted and not all([block_col, group_col, y_col]):
raise ValueError('block_col, group_col, y_col should be explicitly specified if using melted data')
def compare_stats(i, j):
dif = np.abs(R.loc[groups[i]] - R.loc[groups[j]])
tval = dif / np.sqrt(A) / np.sqrt(B)
pval = 2. * ss.t.sf(np.abs(tval), df = (m*n*k - k - n + 1))
return pval
def compare_tukey(i, j):
dif = np.abs(R.loc[groups[i]] - R.loc[groups[j]])
qval = np.sqrt(2.) * dif / (np.sqrt(A) * np.sqrt(B))
pval = psturng(qval, k, np.inf)
return pval
x, _y_col, _group_col, _block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
#if not sort:
# x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
# x[block_col] = Categorical(x[block_col], categories=x[block_col].unique(), ordered=True)
x.sort_values(by=[_group_col,_block_col], ascending=True, inplace=True)
x.dropna(inplace=True)
groups = np.unique(x[_group_col])
k = groups.size
n = x[_block_col].unique().size
x['mat'] = x.groupby(_block_col)[_y_col].rank()
R = x.groupby(_group_col)['mat'].sum()
A1 = (x['mat'] ** 2).sum()
m = 1
S2 = m/(m*k - 1.) * (A1 - m*k*n*(m*k + 1.)**2./4.)
T2 = 1 / S2 * (np.sum(R) - n * m * ((m * k + 1.) / 2.)**2.)
A = S2 * (2. * n * (m * k - 1.)) / ( m * n * k - k - n + 1.)
B = 1. - T2 / (n * (m * k - 1.))
vs = np.zeros((k, k))
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
if p_adjust == 'single-step':
for i, j in combs:
vs[i, j] = compare_tukey(i, j)
else:
for i, j in combs:
vs[i, j] = compare_stats(i, j)
if p_adjust is not None:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_npm_test(a, val_col=None, group_col=None, sort=False, p_adjust=None):
'''Calculate pairwise comparisons using Nashimoto and Wright's all-pairs
comparison procedure (NPM test) for simply ordered mean ranksums.
NPM test is basically an extension of Nemenyi's procedure for testing
increasingly ordered alternatives [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values. See `statsmodels.sandbox.stats.multicomp`
for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
The p values are estimated from the studentized range distribution. If
the medians are already increasingly ordered, than the NPM-test
simplifies to the ordinary Nemenyi test
References
----------
.. [1] Nashimoto, K., Wright, F.T., (2005), Multiple comparison procedures for
detecting differences in simply ordered means. Comput. Statist. Data
Anal. 48, 291--306.
Examples
--------
>>> x = np.array([[102,109,114,120,124],
[110,112,123,130,145],
[132,141,156,160,172]])
>>> sp.posthoc_npm_test(x)
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
#if not sort:
# x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = np.unique(x[_group_col])
x['ranks'] = x[_val_col].rank()
Ri = x.groupby(_group_col)['ranks'].mean()
ni = x.groupby(_group_col)[_val_col].count()
k = groups.size
n = x.shape[0]
sigma = np.sqrt(n * (n + 1) / 12.)
df = np.inf
def compare(m, u):
a = [(Ri.loc[groups[u]]-Ri.loc[groups[_mi]])/(sigma/np.sqrt(2)*np.sqrt(1./ni.loc[groups[_mi]] + 1./ni.loc[groups[u]])) for _mi in m]
return np.array(a)
stat = np.zeros((k, k))
for i in range(k-1):
for j in range(i+1, k):
u = j
m = np.arange(i, u)
tmp = compare(m, u)
stat[j, i] = np.max(tmp)
stat[stat < 0] = 0
p_values = psturng(stat, k, np.inf)
tri_upper = np.triu_indices(p_values.shape[0], 1)
tri_lower = np.tril_indices(p_values.shape[0], -1)
p_values[tri_lower] = p_values.T[tri_lower]
#if p_adjust:
# p_values[tri_upper] = multipletests(p_values[tri_upper], method = p_adjust)[1]
np.fill_diagonal(p_values, -1)
return DataFrame(p_values, index=groups, columns=groups)
def posthoc_siegel_friedman(a, y_col=None, block_col=None, group_col=None, melted=False, sort=False, p_adjust=None):
'''Siegel and Castellan's All-Pairs Comparisons Test for Unreplicated Blocked
Data. See authors' paper for additional information [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of
block design, i.e. rows are blocks, and columns are groups. In this
case you do not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (strings).
y_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values. See statsmodels.sandbox.stats.multicomp for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
For all-pairs comparisons in a two factorial unreplicated complete block design
with non-normally distributed residuals, Siegel and Castellan's test can be
performed on Friedman-type ranked data.
References
----------
.. [1] S. Siegel, N. J. Castellan Jr. (1988), Nonparametric Statistics for the
Behavioral Sciences. 2nd ed. New York: McGraw-Hill.
Examples
--------
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_siegel_friedman(x)
'''
if melted and not all([block_col, group_col, y_col]):
raise ValueError('block_col, group_col, y_col should be explicitly specified if using melted data')
def compare_stats(i, j):
dif = np.abs(R[groups[i]] - R[groups[j]])
zval = dif / np.sqrt(k * (k + 1.) / (6. * n))
return zval
x, y_col, group_col, block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
#if not sort:
# x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
# x[block_col] = Categorical(x[block_col], categories=x[block_col].unique(), ordered=True)
x.sort_values(by=[group_col,block_col], ascending=True, inplace=True)
x.dropna(inplace=True)
groups = x[group_col].unique()
k = groups.size
n = x[block_col].unique().size
x['mat'] = x.groupby(block_col)[y_col].rank()
R = x.groupby(group_col)['mat'].mean()
vs = np.zeros((k, k), dtype=np.float)
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
for i, j in combs:
vs[i, j] = compare_stats(i, j)
vs = 2. * ss.norm.sf(np.abs(vs))
vs[vs > 1] = 1.
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_miller_friedman(a, y_col=None, block_col=None, group_col=None, melted=False, sort=False):
'''Miller's All-Pairs Comparisons Test for Unreplicated Blocked Data.
The p-values are computed from the chi-square distribution [1]_, [2]_,
[3]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of
block design, i.e. rows are blocks, and columns are groups. In this
case you do not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (strings).
y_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
Pandas DataFrame containing p values.
Notes
-----
For all-pairs comparisons in a two factorial unreplicated complete block
design with non-normally distributed residuals, Miller's test can be
performed on Friedman-type ranked data.
References
----------
.. [1] J. Bortz J, G. A. Lienert, K. Boehnke (1990), Verteilungsfreie
Methoden in der Biostatistik. Berlin: Springerself.
.. [2] R. G. Miller Jr. (1996), Simultaneous statistical inference. New
York: McGraw-Hill.
.. [3] E. L. Wike (2006), Data Analysis. A Statistical Primer for Psychology
Students. New Brunswick: Aldine Transaction.
Examples
--------
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_miller_friedman(x)
'''
if melted and not all([block_col, group_col, y_col]):
raise ValueError('block_col, group_col, y_col should be explicitly specified if using melted data')
def compare_stats(i, j):
dif = np.abs(R[groups[i]] - R[groups[j]])
qval = dif / np.sqrt(k * (k + 1.) / (6. * n))
return qval
x, y_col, group_col, block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
#if not sort:
# x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
# x[block_col] = Categorical(x[block_col], categories=x[block_col].unique(), ordered=True)
x.sort_values(by=[group_col,block_col], ascending=True, inplace=True)
x.dropna(inplace=True)
groups = x[group_col].unique()
k = groups.size
n = x[block_col].unique().size
x['mat'] = x.groupby(block_col)[y_col].rank()
R = x.groupby(group_col)['mat'].mean()
vs = np.zeros((k, k), dtype=np.float)
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
for i, j in combs:
vs[i, j] = compare_stats(i, j)
vs = vs ** 2
vs = ss.chi2.sf(vs, k - 1)
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_durbin(a, y_col=None, block_col=None, group_col=None, melted=False, sort=False, p_adjust=None):
'''Pairwise post hoc test for multiple comparisons of rank sums according to
Durbin and Conover for a two-way balanced incomplete block design (BIBD). See
references for additional information [1]_, [2]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of block design,
i.e. rows are blocks, and columns are groups. In this case you do
not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (string).
y_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values. See statsmodels.sandbox.stats.multicomp
for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
Pandas DataFrame containing p values.
References
----------
.. [1] W. J. Conover and R. L. Iman (1979), On multiple-comparisons procedures,
Tech. Rep. LA-7677-MS, Los Alamos Scientific Laboratory.
.. [2] W. J. Conover (1999), Practical nonparametric Statistics,
3rd. edition, Wiley.
Examples
--------
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_durbin(x)
'''
if melted and not all([block_col, group_col, y_col]):
raise ValueError('block_col, group_col, y_col should be explicitly specified if using melted data')
def compare_stats(i, j):
dif = np.abs(Rj[groups[i]] - Rj[groups[j]])
tval = dif / denom
pval = 2. * ss.t.sf(np.abs(tval), df = df)
return pval
x, y_col, group_col, block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
if not sort:
x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
x[block_col] = Categorical(x[block_col], categories=x[block_col].unique(), ordered=True)
x.sort_values(by=[block_col, group_col], ascending=True, inplace=True)
x.dropna(inplace=True)
groups = x[group_col].unique()
t = len(groups)
b = x[block_col].unique().size
r = b
k = t
x['y_ranked'] = x.groupby(block_col)[y_col].rank()
Rj = x.groupby(group_col)['y_ranked'].sum()
A = (x['y_ranked'] ** 2).sum()
C = (b * k * (k + 1) ** 2) / 4.
D = (Rj ** 2).sum() - r * C
T1 = (t - 1) / (A - C) * D
denom = np.sqrt(((A - C) * 2 * r) / (b * k - b - t + 1) * (1 - T1 / (b * (k -1))))
df = b * k - b - t + 1
vs = np.zeros((t, t), dtype=np.float)
combs = it.combinations(range(t), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
for i, j in combs:
vs[i, j] = compare_stats(i, j)
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_anderson(a, val_col=None, group_col=None, midrank=True, sort=False, p_adjust=None):
'''Anderson-Darling Pairwise Test for k-samples. Tests the null hypothesis
that k-samples are drawn from the same population without having to specify
the distribution function of that population [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
midrank : bool, optional
Type of Anderson-Darling test which is computed. If set to True (default), the
midrank test applicable to continuous and discrete populations is performed. If
False, the right side empirical distribution is used.
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values. See statsmodels.sandbox.stats.multicomp for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
result : pandas DataFrame
P values.
References
----------
.. [1] F.W. Scholz, M.A. Stephens (1987), K-Sample Anderson-Darling Tests,
Journal of the American Statistical Association, Vol. 82, pp. 918-924.
Examples
--------
>>> x = np.array([[2.9, 3.0, 2.5, 2.6, 3.2], [3.8, 2.7, 4.0, 2.4], [2.8, 3.4, 3.7, 2.2, 2.0]])
>>> sp.posthoc_anderson(x)
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = x[_group_col].unique()
k = groups.size
vs = np.zeros((k, k), dtype=np.float)
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
for i, j in combs:
vs[i, j] = ss.anderson_ksamp([x.loc[x[_group_col] == groups[i], _val_col], x.loc[x[_group_col] == groups[j], _val_col]])[2]
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_quade(a, y_col=None, block_col=None, group_col=None, dist='t', melted=False, sort=False, p_adjust=None):
'''Calculate pairwise comparisons using Quade's post hoc test for
unreplicated blocked data. This test is usually conducted if significant
results were obtained by the omnibus test [1]_, [2]_, [3]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of
block design, i.e. rows are blocks, and columns are groups. In this
case you do not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (string).
y_col : str or int, optional
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
dist : str, optional
Method for determining p values.
The default distribution is "t", else "normal".
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
Pandas DataFrame containing p values.
References
----------
.. [1] W. J. Conover (1999), Practical nonparametric Statistics, 3rd. Edition,
Wiley.
.. [2] N. A. Heckert and J. J. Filliben (2003). NIST Handbook 148: Dataplot
Reference Manual, Volume 2: Let Subcommands and Library Functions.
National Institute of Standards and Technology Handbook Series, June 2003.
.. [3] D. Quade (1979), Using weighted rankings in the analysis of complete
blocks with additive block effects. Journal of the American Statistical
Association, 74, 680-683.
Examples
--------
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_quade(x)
'''
if melted and not all([block_col, group_col, y_col]):
raise ValueError('block_col, group_col, y_col should be explicitly specified if using melted data')
def compare_stats_t(i, j):
dif = np.abs(S[groups[i]] - S[groups[j]])
tval = dif / denom
pval = 2. * ss.t.sf(np.abs(tval), df = (b - 1) * (k - 1))
return pval
def compare_stats_norm(i, j):
dif = np.abs(W[groups[i]] * ff - W[groups[j]] * ff)
zval = dif / denom
pval = 2. * ss.norm.sf(np.abs(zval))
return pval
x, y_col, group_col, block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
if not sort:
x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
x[block_col] = Categorical(x[block_col], categories=x[block_col].unique(), ordered=True)
x.sort_values(by=[block_col, group_col], ascending=True, inplace=True)
x.dropna(inplace=True)
groups = x[group_col].unique()
k = len(groups)
b = x[block_col].unique().size
x['r'] = x.groupby(block_col)[y_col].rank()
q = (x.groupby(block_col)[y_col].max() - x.groupby(block_col)[y_col].min()).rank()
x['rr'] = x['r'] - (k + 1)/2
x['s'] = x.apply(lambda x, y: x['rr'] * y[x['blocks']], axis=1, args=(q,))
x['w'] = x.apply(lambda x, y: x['r'] * y[x['blocks']], axis=1, args=(q,))
A = (x['s'] ** 2).sum()
S = x.groupby(group_col)['s'].sum()
B = np.sum(S ** 2) / b
W = x.groupby(group_col)['w'].sum()
vs = np.zeros((k, k), dtype=np.float)
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
if dist == 't':
denom = np.sqrt((2 * b * (A - B)) / ((b - 1) * (k - 1)))
for i, j in combs:
vs[i, j] = compare_stats_t(i, j)
else:
n = b * k
denom = np.sqrt((k * (k + 1) * (2 * n + 1) * (k-1)) / (18 * n * (n + 1)))
ff = 1. / (b * (b + 1)/2)
for i, j in combs:
vs[i, j] = compare_stats_norm(i, j)
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_mackwolfe(a, val_col, group_col, p=None, n_perm=100, sort=False, p_adjust=None):
'''Mack-Wolfe Test for Umbrella Alternatives.
In dose-finding studies one may assume an increasing treatment effect with
increasing dose level. However, the test subject may actually succumb to
toxic effects at high doses, which leads to decresing treatment
effects [1]_, [2]_.
The scope of the Mack-Wolfe Test is to test for umbrella alternatives for
either a known or unknown point P (i.e. dose-level), where the peak
(umbrella point) is present.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
p : int, optional
The a-priori known peak as an ordinal number of the treatment group
including the zero dose level, i.e. p = {1, ..., k}. Defaults to None.
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
p : float
P value.
stat : float
Statistic.
References
----------
.. [1] Chen, I.Y. (1991) Notes on the Mack-Wolfe and Chen-Wolfe Tests for
Umbrella Alternatives. Biom. J., 33, 281-290.
.. [2] Mack, G.A., Wolfe, D. A. (1981) K-sample rank tests for umbrella
alternatives. J. Amer. Statist. Assoc., 76, 175-181.
Examples
--------
>>> x = np.array([[10,'a'], [59,'a'], [76,'b'], [10, 'b']])
>>> sp.posthoc_mackwolfe(x, val_col = 0, group_col = 1)
'''
x = __convert_to_df(a, val_col, group_col)
if not sort:
x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
x.sort_values(by=[group_col], ascending=True, inplace=True)
k = x[group_col].unique().size
if p:
if p > k:
print("Selected 'p' > number of groups:", str(p), " > ", str(k))
return False
elif p < 1:
print("Selected 'p' < 1: ", str(p))
return False
Rij = x[val_col].rank()
n = x.groupby(group_col)[val_col].count()
def _fn(Ri, Rj):
return np.sum(Ri.apply(lambda x: Rj[Rj > x].size))
def _ustat(Rij, g, k):
levels = np.unique(g)
U = np.identity(k)
for i in range(k):
for j in range(i):
U[i,j] = _fn(Rij[x[group_col] == levels[i]], Rij[x[group_col] == levels[j]])
U[j,i] = _fn(Rij[x[group_col] == levels[j]], Rij[x[group_col] == levels[i]])
return U
def _ap(p, U):
tmp1 = 0
if p > 0:
for i in range(p):
for j in range(i+1, p+1):
tmp1 += U[i,j]
tmp2 = 0
if p < k:
for i in range(p, k):
for j in range(i+1, k):
tmp2 += U[j,i]
return tmp1 + tmp2
def _n1(p, n):
return np.sum(n[:p+1])
def _n2(p, n):
return np.sum(n[p:k])
def _mean_at(p, n):
N1 = _n1(p, n)
N2 = _n2(p, n)
return (N1**2 + N2**2 - np.sum(n**2) - n.iloc[p]**2)/4
def _var_at(p, n):
N1 = _n1(p, n)
N2 = _n2(p, n)
N = np.sum(n)
var = (2 * (N1**3 + N2**3) + 3 * (N1**2 + N2**2) -\
np.sum(n**2 * (2*n + 3)) - n.iloc[p]**2 * (2 * n.iloc[p] + 3) +\
12. * n.iloc[p] * N1 * N2 - 12. * n.iloc[p] ** 2 * N) / 72.
return var
if p:
if (x.groupby(val_col).count() > 1).any().any():
print("Ties are present")
U = _ustat(Rij, x[group_col], k)
est = _ap(p, U)
mean = _mean_at(p, n)
sd = np.sqrt(_var_at(p, n))
stat = (est - mean)/sd
p_value = ss.norm.sf(stat)
else:
U = _ustat(Rij, x[group_col], k)
Ap = np.array([_ap(i, U) for i in range(k)]).ravel()
mean = np.array([_mean_at(i, n) for i in range(k)]).ravel()
var = np.array([_var_at(i, n) for i in range(k)]).ravel()
A = (Ap - mean) / np.sqrt(var)
stat = np.max(A)
p = A == stat
est = None
mt = []
for i in range(n_perm):
ix = Series(np.random.permutation(Rij))
Uix = _ustat(ix, x[group_col], k)
Apix = np.array([_ap(i, Uix) for i in range(k)])
Astarix = (Apix - mean) / np.sqrt(var)
mt.append(np.max(Astarix))
mt = np.array(mt)
p_value = mt[mt > stat] / n_perm
return p_value, stat
def posthoc_vanwaerden(a, val_col=None, group_col=None, sort=True, p_adjust=None):
'''Van der Waerden's test for pairwise multiple comparisons between group
levels. See references for additional information [1]_, [2]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
For one-factorial designs with samples that do not meet the assumptions for
one-way-ANOVA and subsequent post hoc tests, the van der Waerden test using
normal scores can be employed. Provided that significant differences were
detected by this global test, one may be interested in applying post hoc
tests according to van der Waerden for pairwise multiple comparisons of the
group levels.
There is no tie correction applied in this function.
References
----------
.. [1] W. J. Conover and R. L. Iman (1979), On multiple-comparisons procedures,
Tech. Rep. LA-7677-MS, Los Alamos Scientific Laboratory.
.. [2] B. L. van der Waerden (1952) Order tests for the two-sample problem and
their power, Indagationes Mathematicae, 14, 453-458.
Examples
--------
>>> x = np.array([[10,'a'], [59,'a'], [76,'b'], [10, 'b']])
>>> sp.posthoc_vanwaerden(x, val_col = 0, group_col = 1)
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = x[_group_col].unique()
n = x[_val_col].size
k = groups.size
r = ss.rankdata(x[_val_col])
x['z_scores'] = ss.norm.ppf(r / (n + 1))
aj = x.groupby(_group_col)['z_scores'].sum()
nj = x.groupby(_group_col)['z_scores'].count()
s2 = (1. / (n - 1.)) * (x['z_scores'] ** 2.).sum()
sts = (1. / s2) * np.sum(aj ** 2. / nj)
param = k - 1
A = aj / nj
vs = np.zeros((k, k), dtype=np.float)
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
def compare_stats(i, j):
dif = np.abs(A[groups[i]] - A[groups[j]])
B = 1. / nj[groups[i]] + 1. / nj[groups[j]]
tval = dif / np.sqrt(s2 * (n - 1. - sts)/(n - k) * B)
pval = 2. * ss.t.sf(np.abs(tval), df = n - k)
return pval
for i, j in combs:
vs[i, j] = compare_stats(i, j)
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_ttest(a, val_col=None, group_col=None, pool_sd=False, equal_var=True, p_adjust=None, sort=True):
'''Pairwise T test for multiple comparisons of independent groups. May be
used after a parametric ANOVA to do pairwise comparisons.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame. Array must be two-dimensional.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
equal_var : bool, optional
If True (default), perform a standard independent test
that assumes equal population variances [1]_.
If False, perform Welch's t-test, which does not assume equal
population variance [2]_.
pool_sd : bool, optional
Calculate a common SD for all groups and use that for all
comparisons (this can be useful if some groups are small).
This method does not actually call scipy ttest_ind() function,
so extra arguments are ignored. Default is False.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by group_col or not. Recommended
unless you sort your data manually.
Returns
-------
Numpy ndarray or pandas DataFrame of p values depending on input
data type.
References
----------
.. [1] http://en.wikipedia.org/wiki/T-test#Independent_two-sample_t-test
.. [2] http://en.wikipedia.org/wiki/Welch%27s_t_test
Examples
--------
>>> x = [[1,2,3,5,1], [12,31,54, np.nan], [10,12,6,74,11]]
>>> sp.posthoc_ttest(x, p_adjust = 'holm')
array([[-1. , 0.04600899, 0.31269089],
[ 0.04600899, -1. , 0.6327077 ],
[ 0.31269089, 0.6327077 , -1. ]])
'''
if isinstance(a, DataFrame):
x = a.copy()
if not sort:
x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
x.sort_values(by=[group_col, val_col], ascending=True, inplace=True)
x_lens = x.groupby(by=group_col)[val_col].count().values
x_lens_cumsum = np.insert(np.cumsum(x_lens), 0, 0)[:-1]
x_grouped = np.array([x[val_col][j:(j + x_lens[i])] for i, j in enumerate(x_lens_cumsum)])
#x_grouped = [x.loc[v, val_col].values.tolist() for g, v in x.groupby(group_col, sort=False).groups.items()]
else:
x = np.array(a)
x_grouped = np.array([np.asarray(a)[~np.isnan(a)] for a in x])
x_lens = np.asarray([len(a) for a in x_grouped])
x_lens_cumsum = np.insert(np.cumsum(x_lens), 0, 0)[:-1]
if any(x_lens == 0):
raise ValueError("All groups must contain data")
x_len = len(x_grouped)
vs = np.zeros((x_len, x_len), dtype=np.float)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
def compare_pooled(i, j):
diff = x_means[i] - x_means[j]
se_diff = pooled_sd * np.sqrt(1 / x_lens[i] + 1 / x_lens[j])
t_value = diff / se_diff
return 2 * ss.t.cdf(-np.abs(t_value), x_totaldegf)
combs = it.combinations(range(x_len), 2)
if pool_sd:
x_means = np.asarray([np.mean(xi) for xi in x_grouped])
x_sd = np.asarray([np.std(xi, ddof=1) for xi in x_grouped])
x_degf = x_lens - 1
x_totaldegf = np.sum(x_degf)
pooled_sd = np.sqrt(np.sum(x_sd ** 2 * x_degf) / x_totaldegf)
for i, j in combs:
vs[i, j] = compare_pooled(i, j)
else:
for i,j in combs:
vs[i, j] = ss.ttest_ind(x_grouped[i], x_grouped[j], equal_var=equal_var)[1]
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
if isinstance(x, DataFrame):
groups_unique = x[group_col].unique()
return DataFrame(vs, index=groups_unique, columns=groups_unique)
else:
return vs
def posthoc_tukey_hsd(x, g, alpha=0.05):
'''Pairwise comparisons with TukeyHSD confidence intervals. This is a
convenience function to make statsmodels `pairwise_tukeyhsd` method more
applicable for further use.
Parameters
----------
x : array_like or pandas Series object, 1d
An array, any object exposing the array interface, containing dependent
variable values (test or response variable). Values should have a
non-nominal scale. NaN values will cause an error (please handle
manually).
g : array_like or pandas Series object, 1d
An array, any object exposing the array interface, containing
independent variable values (grouping or predictor variable). Values
should have a nominal scale (categorical).
alpha : float, optional
Significance level for the test. Default is 0.05.
Returns
-------
result : pandas DataFrame
DataFrame with 0, 1, and -1 values, where 0 is False (not significant),
1 is True (significant), and -1 is for diagonal elements.
Examples
--------
>>> x = [[1,2,3,4,5], [35,31,75,40,21], [10,6,9,6,1]]
>>> g = [['a'] * 5, ['b'] * 5, ['c'] * 5]
>>> sp.posthoc_tukey_hsd(np.concatenate(x), np.concatenate(g))
'''
result = pairwise_tukeyhsd(x, g, alpha=0.05)
groups = np.array(result.groupsunique, dtype=np.str)
groups_len = len(groups)
vs = np.zeros((groups_len, groups_len), dtype=np.int)
for a in result.summary()[1:]:
a0 = str(a[0])
a1 = str(a[1])
a0i = np.where(groups == a0)[0][0]
a1i = np.where(groups == a1)[0][0]
vs[a0i, a1i] = 1 if str(a[5]) == 'True' else 0
vs = np.triu(vs)
np.fill_diagonal(vs, -1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[tri_lower] = vs.T[tri_lower]
return DataFrame(vs, index=groups, columns=groups)
def posthoc_mannwhitney(a, val_col=None, group_col=None, use_continuity=True, alternative='two-sided', p_adjust=None, sort=True):
'''Pairwise comparisons with Mann-Whitney rank test.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame. Array must be two-dimensional.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
use_continuity : bool, optional
Whether a continuity correction (1/2.) should be taken into account.
Default is True.
alternative : ['two-sided', 'less', or 'greater'], optional
Whether to get the p-value for the one-sided hypothesis
('less' or 'greater') or for the two-sided hypothesis ('two-sided').
Defaults to 'two-sided'.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by group_col or not. Recommended
unless you sort your data manually.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
Refer to `scipy.stats.mannwhitneyu` reference page for further details.
Examples
--------
>>> x = [[1,2,3,4,5], [35,31,75,40,21], [10,6,9,6,1]]
>>> sp.posthoc_mannwhitney(x, p_adjust = 'holm')
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col, _val_col], ascending=True, inplace=True)
groups = np.unique(x[_group_col])
x_len = groups.size
vs = np.zeros((x_len, x_len))
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(x_len), 2)
for i,j in combs:
vs[i, j] = ss.mannwhitneyu(x.loc[x[_group_col] == groups[i], _val_col],
x.loc[x[_group_col] == groups[j], _val_col],
use_continuity=use_continuity,
alternative=alternative)[1]
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_wilcoxon(a, val_col=None, group_col=None, zero_method='wilcox', correction=False, p_adjust=None, sort=False):
'''Pairwise comparisons with Wilcoxon signed-rank test. It is a non-parametric
version of the paired T-test for use with non-parametric ANOVA.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame. Array must be two-dimensional.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
zero_method : string, {"pratt", "wilcox", "zsplit"}, optional
"pratt": Pratt treatment, includes zero-differences in the ranking
process (more conservative)
"wilcox": Wilcox treatment, discards all zero-differences
"zsplit": Zero rank split, just like Pratt, but spliting the zero rank
between positive and negative ones
correction : bool, optional
If True, apply continuity correction by adjusting the Wilcoxon rank
statistic by 0.5 towards the mean value when computing the z-statistic.
Default is False.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by group_col and val_col or not.
Default is False.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
Refer to `scipy.stats.wilcoxon` reference page for further details.
Examples
--------
>>> x = [[1,2,3,4,5], [35,31,75,40,21], [10,6,9,6,1]]
>>> sp.posthoc_wilcoxon(x)
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
#x.sort_values(by=[_group_col, _val_col], ascending=True, inplace=True)
groups = np.unique(x[_group_col])
x_len = groups.size
vs = np.zeros((x_len, x_len))
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(x_len), 2)
for i,j in combs:
vs[i, j] = ss.wilcoxon(x.loc[x[_group_col] == groups[i], _val_col],
x.loc[x[_group_col] == groups[j], _val_col],
zero_method=zero_method, correction=correction)[1]
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method=p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_scheffe(a, val_col=None, group_col=None, sort=False, p_adjust=None):
'''Scheffe's all-pairs comparisons test for normally distributed data with equal
group variances. For all-pairs comparisons in an one-factorial layout with
normally distributed residuals and equal variances Scheffe's test can be
performed with parametric ANOVA [1]_, [2]_, [3]_.
A total of m = k(k-1)/2 hypotheses can be tested.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
The p values are computed from the F-distribution.
References
----------
.. [1] J. Bortz (1993) Statistik für Sozialwissenschaftler. 4. Aufl., Berlin:
Springer.
.. [2] L. Sachs (1997) Angewandte Statistik, New York: Springer.
.. [3] H. Scheffe (1953) A Method for Judging all Contrasts in the Analysis
of Variance. Biometrika 40, 87-110.
Examples
--------
>>> import scikit_posthocs as sp
>>> import pandas as pd
>>> x = pd.DataFrame({"a": [1,2,3,5,1], "b": [12,31,54,62,12], "c": [10,12,6,74,11]})
>>> x = x.melt(var_name='groups', value_name='values')
>>> sp.posthoc_scheffe(x, val_col='values', group_col='groups')
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = np.unique(x[_group_col])
x_grouped = x.groupby(_group_col)[_val_col]
ni = x_grouped.count()
n = ni.sum()
xi = x_grouped.mean()
si = x_grouped.var()
sin = 1. / (n - groups.size) * np.sum(si * (ni - 1.))
def compare(i, j):
dif = xi.loc[i] - xi.loc[j]
A = sin * (1. / ni[i] + 1. / ni[j]) * (groups.size - 1.)
f_val = dif ** 2. / A
return f_val
vs = np.zeros((groups.size, groups.size), dtype=np.float)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(groups.size), 2)
for i,j in combs:
vs[i, j] = compare(groups[i], groups[j])
vs[tri_lower] = vs.T[tri_lower]
p_values = ss.f.sf(vs, groups.size - 1., n - groups.size)
np.fill_diagonal(p_values, -1)
return DataFrame(p_values, index=groups, columns=groups)
def posthoc_tamhane(a, val_col=None, group_col=None, welch=True, sort=False):
'''Tamhane's T2 all-pairs comparison test for normally distributed data with
unequal variances. Tamhane's T2 test can be performed for all-pairs
comparisons in an one-factorial layout with normally distributed residuals
but unequal groups variances. A total of m = k(k-1)/2 hypotheses can be
tested. The null hypothesis is tested in the two-tailed test against the
alternative hypothesis [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
welch : bool, optional
If True, use Welch's approximate solution for calculating the degree of
freedom. T2 test uses the usual df = N - 2 approximation.
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
The p values are computed from the t-distribution and adjusted according to
Dunn-Sidak.
References
----------
.. [1] A.C. Tamhane (1979), A Comparison of Procedures for Multiple Comparisons of
Means with Unequal Variances. Journal of the American Statistical Association,
74, 471-480.
Examples
--------
>>> import scikit_posthocs as sp
>>> import pandas as pd
>>> x = pd.DataFrame({"a": [1,2,3,5,1], "b": [12,31,54,62,12], "c": [10,12,6,74,11]})
>>> x = x.melt(var_name='groups', value_name='values')
>>> sp.posthoc_tamhane(x, val_col='values', group_col='groups')
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = x[_group_col].unique()
x_grouped = x.groupby(_group_col)[_val_col]
ni = x_grouped.count()
n = ni.sum()
xi = x_grouped.mean()
si = x_grouped.var()
sin = 1. / (n - groups.size) * np.sum(si * (ni - 1))
def compare(i, j):
dif = xi[i] - xi[j]
A = si[i] / ni[i] + si[j] / ni[j]
t_val = dif / np.sqrt(A)
if welch:
df = A ** 2. / (si[i] ** 2. / (ni[i] ** 2. * (ni[i] - 1.)) + si[j] ** 2. / (ni[j] ** 2. * (ni[j] - 1.)))
else:
## checks according to Tamhane (1979, p. 474)
ok1 = (9./10. <= ni[i]/ni[j]) and (ni[i]/ni[j] <= 10./9.)
ok2 = (9./10. <= (s2i[i] / ni[i]) / (s2i[j] / ni[j])) and ((s2i[i] / ni[i]) / (s2i[j] / ni[j]) <= 10./9.)
ok3 = (4./5. <= ni[i]/ni[j]) and (ni[i]/ni[j] <= 5./4.) and (1./2. <= (s2i[i] / ni[i]) / (s2i[j] / ni[j])) and ((s2i[i] / ni[i]) / (s2i[j] / ni[j]) <= 2.)
ok4 = (2./3. <= ni[i]/ni[j]) and (ni[i]/ni[j] <= 3./2.) and (3./4. <= (s2i[i] / ni[i]) / (s2i[j] / ni[j])) and ((s2i[i] / ni[i]) / (s2i[j] / ni[j]) <= 4./3.)
OK = any(ok1, ok2, ok3, ok4)
if not OK:
print("Sample sizes or standard errors are not balanced. T2 test is recommended.")
df = ni[i] + ni[j] - 2.
p_val = 2. * ss.t.sf(np.abs(t_val), df=df)
return p_val
vs = np.zeros((groups.size, groups.size), dtype=np.float)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(groups.size), 2)
for i,j in combs:
vs[i, j] = compare(groups[i], groups[j])
vs[tri_upper] = 1. - (1. - vs[tri_upper]) ** groups.size
vs[tri_lower] = vs.T[tri_lower]
vs[vs > 1] = 1
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_tukey(a, val_col = None, group_col = None, sort = False):
'''Performs Tukey's all-pairs comparisons test for normally distributed data
with equal group variances. For all-pairs comparisons in an
one-factorial layout with normally distributed residuals and equal variances
Tukey's test can be performed. A total of m = k(k-1)/2 hypotheses can be
tested. The null hypothesis is tested in the two-tailed test against
the alternative hypothesis [1]_, [2]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
The p values are computed from the Tukey-distribution.
References
----------
.. [1] L. Sachs (1997) Angewandte Statistik, New York: Springer.
.. [2] J. Tukey (1949) Comparing Individual Means in the Analysis of Variance,
Biometrics 5, 99-114.
Examples
--------
>>> import scikit_posthocs as sp
>>> import pandas as pd
>>> x = pd.DataFrame({"a": [1,2,3,5,1], "b": [12,31,54,62,12], "c": [10,12,6,74,11]})
>>> x = x.melt(var_name='groups', value_name='values')
>>> sp.posthoc_tukey(x, val_col='values', group_col='groups')
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col], ascending=True, inplace=True)
groups = np.unique(x[_group_col])
x_grouped = x.groupby(_group_col)[_val_col]
ni = x_grouped.count()
n = ni.sum()
xi = x_grouped.mean()
si = x_grouped.var()
sin = 1. / (n - groups.size) * np.sum(si * (ni - 1))
def compare(i, j):
dif = xi[i] - xi[j]
A = sin * 0.5 * (1. / ni.loc[i] + 1. / ni.loc[j])
q_val = dif / np.sqrt(A)
return q_val
vs = np.zeros((groups.size, groups.size), dtype=np.float)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(groups.size), 2)
for i,j in combs:
vs[i, j] = compare(groups[i], groups[j])
vs[tri_upper] = psturng(np.abs(vs[tri_upper]), groups.size, n - groups.size)
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
|
maximtrp/scikit-posthocs | scikit_posthocs/_plotting.py | sign_array | python | def sign_array(p_values, alpha=0.05):
p_values = np.array(p_values)
p_values[p_values > alpha] = 0
p_values[(p_values < alpha) & (p_values > 0)] = 1
np.fill_diagonal(p_values, -1)
return p_values | Significance array
Converts an array with p values to a significance array where
0 is False (not significant), 1 is True (significant),
and -1 is for diagonal elements.
Parameters
----------
p_values : array_like or ndarray
An array, any object exposing the array interface, containing
p values.
alpha : float, optional
Significance level. Default is 0.05.
Returns
-------
Numpy array where 0 is False (not significant), 1 is True (significant),
and -1 is for diagonal elements.
Examples
--------
>>> p_values = np.array([[ 0. , 0.00119517, 0.00278329],
[ 0.00119517, 0. , 0.18672227],
[ 0.00278329, 0.18672227, 0. ]])
>>> ph.sign_array(p_values)
array([[-1, 1, 1],
[ 1, -1, 0],
[ 1, 0, -1]]) | train | https://github.com/maximtrp/scikit-posthocs/blob/5476b09e2a325cd4e31c0b0bc6906ab5cd77fc5d/scikit_posthocs/_plotting.py#L8-L47 | null | import numpy as np
from matplotlib.colors import ListedColormap
from matplotlib.colorbar import ColorbarBase
from seaborn import heatmap
from pandas import DataFrame
def sign_table(p_values, lower=True, upper=True):
"""
Significance table
Returns table that can be used in a publication. P values are replaced
with asterisks: \* - p < 0.05, \*\* - p < 0.01, \*\*\* - p < 0.001.
Parameters
----------
p_values : array_like, or ndarray, or pandas DataFrame
An array, any object exposing the array interface, containing
p values.
lower : bool, optional
Defines whether to return the lower triangle.
upper : bool, optional
Defines whether to return the upper triangle.
Returns
-------
Numpy array or pandas DataFrame with asterisks masked p values.
Examples
--------
>>> p_values = np.array([[-1. , 0.00119517, 0.00278329],
[ 0.00119517, -1. , 0.18672227],
[ 0.00278329, 0.18672227, -1. ]])
>>> ph.sign_table(p_values)
array([['-', '**', '**'],
['**', '-', 'NS'],
['**', 'NS', '-']], dtype=object)
"""
if not any([lower, upper]):
raise ValueError("Either lower or upper triangle must be returned")
if not isinstance(p_values, DataFrame):
p_values = np.array(p_values, dtype=np.float)
ns = p_values > 0.05
three = (p_values < 0.001) & (p_values >= 0)
two = (p_values < 0.01) & (p_values >= 0.001)
one = (p_values < 0.05) & (p_values >= 0.01)
p_values = p_values.astype(object)
p_values[ns] = 'NS'
p_values[three] = '***'
p_values[two] = '**'
p_values[one] = '*'
if not isinstance(p_values, DataFrame):
np.fill_diagonal(p_values, '-')
if not lower:
p_values[np.tril_indices(p_values.shape[0], -1)] = ''
elif not upper:
p_values[np.triu_indices(p_values.shape[0], 1)] = ''
else:
np.fill_diagonal(p_values.values, '-')
if not lower:
p_values.values[np.tril_indices(p_values.shape[0], -1)] = ''
elif not upper:
p_values.values[np.triu_indices(p_values.shape[0], 1)] = ''
return p_values
def sign_plot(x, g=None, flat=False, labels=True, cmap=None,
cbar_ax_bbox=None, ax=None, **kwargs):
"""
Significance plot, a heatmap of p values (based on Seaborn).
Parameters
----------
x : array_like, ndarray or DataFrame
If flat is False (default), x must be an array, any object exposing
the array interface, containing p values. If flat is True, x must be
a sign_array (returned by `scikit_posthocs.sign_array` function)
g : array_like or ndarray, optional
An array, any object exposing the array interface, containing
group names.
flat : bool, optional
If `flat` is True, plots a significance array as a heatmap using
seaborn. If `flat` is False (default), plots an array of p values.
Non-flat mode is useful if you need to differentiate significance
levels visually. It is the preferred mode.
labels : bool, optional
Plot axes labels (default) or not.
cmap : list, optional
1) If flat is False (default):
List consisting of five elements, that will be exported to
ListedColormap method of matplotlib. First is for diagonal
elements, second is for non-significant elements, third is for
p < 0.001, fourth is for p < 0.01, fifth is for p < 0.05.
2) If flat is True:
List consisting of three elements, that will be exported to
ListedColormap method of matplotlib. First is for diagonal
elements, second is for non-significant elements, third is for
significant ones.
3) If not defined, default colormaps will be used.
cbar_ax_bbox : list, optional
Colorbar axes position rect [left, bottom, width, height] where
all quantities are in fractions of figure width and height.
Refer to `matplotlib.figure.Figure.add_axes` for more information.
Default is [0.95, 0.35, 0.04, 0.3].
ax : matplotlib Axes, optional
Axes in which to draw the plot, otherwise use the currently-active Axes.
kwargs : other keyword arguments
Keyword arguments to be passed to seaborn heatmap method. These
keyword args cannot be used: cbar, vmin, vmax, center.
Returns
-------
Axes object with the heatmap (and ColorBase object of cbar if `flat` is set to
False).
Examples
--------
>>> x = np.array([[-1, 1, 1],
[ 1, -1, 0],
[ 1, 0, -1]])
>>> ph.sign_plot(x, flat = True)
"""
try:
del kwargs['cbar'], kwargs['vmin'], kwargs['vmax'], kwargs['center']
except:
pass
if isinstance(x, DataFrame):
df = x.copy()
else:
x = np.array(x)
g = g or np.arange(x.shape[0])
df = DataFrame(x, index=g, columns=g)
dtype = df.values.dtype
if not np.issubdtype(dtype, np.integer) and flat:
raise ValueError("X should be a sign_array or DataFrame of integer values")
elif not np.issubdtype(dtype, np.floating) and not flat:
raise ValueError("X should be an array or DataFrame of float p values")
if not cmap and flat:
# format: diagonal, non-significant, significant
cmap = ['1', '#fbd7d4', '#1a9641']
elif not cmap and not flat:
# format: diagonal, non-significant, p<0.001, p<0.01, p<0.05
cmap = ['1', '#fbd7d4', '#005a32', '#238b45', '#a1d99b']
if flat:
g = heatmap(df, vmin=-1, vmax=1, cmap=ListedColormap(cmap), cbar=False, ax=ax,
**kwargs)
if not labels:
g.set_xlabel('')
g.set_ylabel('')
return g
else:
df[(x <= 0.001) & (x >= 0)] = 1
df[(x <= 0.01) & (x > 0.001)] = 2
df[(x <= 0.05) & (x > 0.01)] = 3
df[(x > 0.05)] = 0
np.fill_diagonal(df.values, -1)
if len(cmap) != 5:
raise ValueError("Cmap list must contain 5 items")
g = heatmap(df, vmin=-1, vmax=3, cmap=ListedColormap(cmap), center=1, cbar=False,
ax=ax, **kwargs)
if not labels:
g.set_xlabel('')
g.set_ylabel('')
cbar_ax = g.figure.add_axes(cbar_ax_bbox or [0.95, 0.35, 0.04, 0.3])
cbar = ColorbarBase(cbar_ax, cmap=ListedColormap(cmap[2:] + [cmap[1]]),
boundaries=[0, 1, 2, 3, 4])
cbar.set_ticks(np.linspace(0.5, 3.5, 4))
cbar.set_ticklabels(['p < 0.001', 'p < 0.01', 'p < 0.05', 'NS'])
cbar.outline.set_linewidth(1)
cbar.outline.set_edgecolor('0.5')
cbar.ax.tick_params(size=0)
return g, cbar
|
maximtrp/scikit-posthocs | scikit_posthocs/_plotting.py | sign_table | python | def sign_table(p_values, lower=True, upper=True):
if not any([lower, upper]):
raise ValueError("Either lower or upper triangle must be returned")
if not isinstance(p_values, DataFrame):
p_values = np.array(p_values, dtype=np.float)
ns = p_values > 0.05
three = (p_values < 0.001) & (p_values >= 0)
two = (p_values < 0.01) & (p_values >= 0.001)
one = (p_values < 0.05) & (p_values >= 0.01)
p_values = p_values.astype(object)
p_values[ns] = 'NS'
p_values[three] = '***'
p_values[two] = '**'
p_values[one] = '*'
if not isinstance(p_values, DataFrame):
np.fill_diagonal(p_values, '-')
if not lower:
p_values[np.tril_indices(p_values.shape[0], -1)] = ''
elif not upper:
p_values[np.triu_indices(p_values.shape[0], 1)] = ''
else:
np.fill_diagonal(p_values.values, '-')
if not lower:
p_values.values[np.tril_indices(p_values.shape[0], -1)] = ''
elif not upper:
p_values.values[np.triu_indices(p_values.shape[0], 1)] = ''
return p_values | Significance table
Returns table that can be used in a publication. P values are replaced
with asterisks: \* - p < 0.05, \*\* - p < 0.01, \*\*\* - p < 0.001.
Parameters
----------
p_values : array_like, or ndarray, or pandas DataFrame
An array, any object exposing the array interface, containing
p values.
lower : bool, optional
Defines whether to return the lower triangle.
upper : bool, optional
Defines whether to return the upper triangle.
Returns
-------
Numpy array or pandas DataFrame with asterisks masked p values.
Examples
--------
>>> p_values = np.array([[-1. , 0.00119517, 0.00278329],
[ 0.00119517, -1. , 0.18672227],
[ 0.00278329, 0.18672227, -1. ]])
>>> ph.sign_table(p_values)
array([['-', '**', '**'],
['**', '-', 'NS'],
['**', 'NS', '-']], dtype=object) | train | https://github.com/maximtrp/scikit-posthocs/blob/5476b09e2a325cd4e31c0b0bc6906ab5cd77fc5d/scikit_posthocs/_plotting.py#L50-L115 | null | import numpy as np
from matplotlib.colors import ListedColormap
from matplotlib.colorbar import ColorbarBase
from seaborn import heatmap
from pandas import DataFrame
def sign_array(p_values, alpha=0.05):
"""
Significance array
Converts an array with p values to a significance array where
0 is False (not significant), 1 is True (significant),
and -1 is for diagonal elements.
Parameters
----------
p_values : array_like or ndarray
An array, any object exposing the array interface, containing
p values.
alpha : float, optional
Significance level. Default is 0.05.
Returns
-------
Numpy array where 0 is False (not significant), 1 is True (significant),
and -1 is for diagonal elements.
Examples
--------
>>> p_values = np.array([[ 0. , 0.00119517, 0.00278329],
[ 0.00119517, 0. , 0.18672227],
[ 0.00278329, 0.18672227, 0. ]])
>>> ph.sign_array(p_values)
array([[-1, 1, 1],
[ 1, -1, 0],
[ 1, 0, -1]])
"""
p_values = np.array(p_values)
p_values[p_values > alpha] = 0
p_values[(p_values < alpha) & (p_values > 0)] = 1
np.fill_diagonal(p_values, -1)
return p_values
def sign_plot(x, g=None, flat=False, labels=True, cmap=None,
cbar_ax_bbox=None, ax=None, **kwargs):
"""
Significance plot, a heatmap of p values (based on Seaborn).
Parameters
----------
x : array_like, ndarray or DataFrame
If flat is False (default), x must be an array, any object exposing
the array interface, containing p values. If flat is True, x must be
a sign_array (returned by `scikit_posthocs.sign_array` function)
g : array_like or ndarray, optional
An array, any object exposing the array interface, containing
group names.
flat : bool, optional
If `flat` is True, plots a significance array as a heatmap using
seaborn. If `flat` is False (default), plots an array of p values.
Non-flat mode is useful if you need to differentiate significance
levels visually. It is the preferred mode.
labels : bool, optional
Plot axes labels (default) or not.
cmap : list, optional
1) If flat is False (default):
List consisting of five elements, that will be exported to
ListedColormap method of matplotlib. First is for diagonal
elements, second is for non-significant elements, third is for
p < 0.001, fourth is for p < 0.01, fifth is for p < 0.05.
2) If flat is True:
List consisting of three elements, that will be exported to
ListedColormap method of matplotlib. First is for diagonal
elements, second is for non-significant elements, third is for
significant ones.
3) If not defined, default colormaps will be used.
cbar_ax_bbox : list, optional
Colorbar axes position rect [left, bottom, width, height] where
all quantities are in fractions of figure width and height.
Refer to `matplotlib.figure.Figure.add_axes` for more information.
Default is [0.95, 0.35, 0.04, 0.3].
ax : matplotlib Axes, optional
Axes in which to draw the plot, otherwise use the currently-active Axes.
kwargs : other keyword arguments
Keyword arguments to be passed to seaborn heatmap method. These
keyword args cannot be used: cbar, vmin, vmax, center.
Returns
-------
Axes object with the heatmap (and ColorBase object of cbar if `flat` is set to
False).
Examples
--------
>>> x = np.array([[-1, 1, 1],
[ 1, -1, 0],
[ 1, 0, -1]])
>>> ph.sign_plot(x, flat = True)
"""
try:
del kwargs['cbar'], kwargs['vmin'], kwargs['vmax'], kwargs['center']
except:
pass
if isinstance(x, DataFrame):
df = x.copy()
else:
x = np.array(x)
g = g or np.arange(x.shape[0])
df = DataFrame(x, index=g, columns=g)
dtype = df.values.dtype
if not np.issubdtype(dtype, np.integer) and flat:
raise ValueError("X should be a sign_array or DataFrame of integer values")
elif not np.issubdtype(dtype, np.floating) and not flat:
raise ValueError("X should be an array or DataFrame of float p values")
if not cmap and flat:
# format: diagonal, non-significant, significant
cmap = ['1', '#fbd7d4', '#1a9641']
elif not cmap and not flat:
# format: diagonal, non-significant, p<0.001, p<0.01, p<0.05
cmap = ['1', '#fbd7d4', '#005a32', '#238b45', '#a1d99b']
if flat:
g = heatmap(df, vmin=-1, vmax=1, cmap=ListedColormap(cmap), cbar=False, ax=ax,
**kwargs)
if not labels:
g.set_xlabel('')
g.set_ylabel('')
return g
else:
df[(x <= 0.001) & (x >= 0)] = 1
df[(x <= 0.01) & (x > 0.001)] = 2
df[(x <= 0.05) & (x > 0.01)] = 3
df[(x > 0.05)] = 0
np.fill_diagonal(df.values, -1)
if len(cmap) != 5:
raise ValueError("Cmap list must contain 5 items")
g = heatmap(df, vmin=-1, vmax=3, cmap=ListedColormap(cmap), center=1, cbar=False,
ax=ax, **kwargs)
if not labels:
g.set_xlabel('')
g.set_ylabel('')
cbar_ax = g.figure.add_axes(cbar_ax_bbox or [0.95, 0.35, 0.04, 0.3])
cbar = ColorbarBase(cbar_ax, cmap=ListedColormap(cmap[2:] + [cmap[1]]),
boundaries=[0, 1, 2, 3, 4])
cbar.set_ticks(np.linspace(0.5, 3.5, 4))
cbar.set_ticklabels(['p < 0.001', 'p < 0.01', 'p < 0.05', 'NS'])
cbar.outline.set_linewidth(1)
cbar.outline.set_edgecolor('0.5')
cbar.ax.tick_params(size=0)
return g, cbar
|
maximtrp/scikit-posthocs | scikit_posthocs/_plotting.py | sign_plot | python | def sign_plot(x, g=None, flat=False, labels=True, cmap=None,
cbar_ax_bbox=None, ax=None, **kwargs):
try:
del kwargs['cbar'], kwargs['vmin'], kwargs['vmax'], kwargs['center']
except:
pass
if isinstance(x, DataFrame):
df = x.copy()
else:
x = np.array(x)
g = g or np.arange(x.shape[0])
df = DataFrame(x, index=g, columns=g)
dtype = df.values.dtype
if not np.issubdtype(dtype, np.integer) and flat:
raise ValueError("X should be a sign_array or DataFrame of integer values")
elif not np.issubdtype(dtype, np.floating) and not flat:
raise ValueError("X should be an array or DataFrame of float p values")
if not cmap and flat:
# format: diagonal, non-significant, significant
cmap = ['1', '#fbd7d4', '#1a9641']
elif not cmap and not flat:
# format: diagonal, non-significant, p<0.001, p<0.01, p<0.05
cmap = ['1', '#fbd7d4', '#005a32', '#238b45', '#a1d99b']
if flat:
g = heatmap(df, vmin=-1, vmax=1, cmap=ListedColormap(cmap), cbar=False, ax=ax,
**kwargs)
if not labels:
g.set_xlabel('')
g.set_ylabel('')
return g
else:
df[(x <= 0.001) & (x >= 0)] = 1
df[(x <= 0.01) & (x > 0.001)] = 2
df[(x <= 0.05) & (x > 0.01)] = 3
df[(x > 0.05)] = 0
np.fill_diagonal(df.values, -1)
if len(cmap) != 5:
raise ValueError("Cmap list must contain 5 items")
g = heatmap(df, vmin=-1, vmax=3, cmap=ListedColormap(cmap), center=1, cbar=False,
ax=ax, **kwargs)
if not labels:
g.set_xlabel('')
g.set_ylabel('')
cbar_ax = g.figure.add_axes(cbar_ax_bbox or [0.95, 0.35, 0.04, 0.3])
cbar = ColorbarBase(cbar_ax, cmap=ListedColormap(cmap[2:] + [cmap[1]]),
boundaries=[0, 1, 2, 3, 4])
cbar.set_ticks(np.linspace(0.5, 3.5, 4))
cbar.set_ticklabels(['p < 0.001', 'p < 0.01', 'p < 0.05', 'NS'])
cbar.outline.set_linewidth(1)
cbar.outline.set_edgecolor('0.5')
cbar.ax.tick_params(size=0)
return g, cbar | Significance plot, a heatmap of p values (based on Seaborn).
Parameters
----------
x : array_like, ndarray or DataFrame
If flat is False (default), x must be an array, any object exposing
the array interface, containing p values. If flat is True, x must be
a sign_array (returned by `scikit_posthocs.sign_array` function)
g : array_like or ndarray, optional
An array, any object exposing the array interface, containing
group names.
flat : bool, optional
If `flat` is True, plots a significance array as a heatmap using
seaborn. If `flat` is False (default), plots an array of p values.
Non-flat mode is useful if you need to differentiate significance
levels visually. It is the preferred mode.
labels : bool, optional
Plot axes labels (default) or not.
cmap : list, optional
1) If flat is False (default):
List consisting of five elements, that will be exported to
ListedColormap method of matplotlib. First is for diagonal
elements, second is for non-significant elements, third is for
p < 0.001, fourth is for p < 0.01, fifth is for p < 0.05.
2) If flat is True:
List consisting of three elements, that will be exported to
ListedColormap method of matplotlib. First is for diagonal
elements, second is for non-significant elements, third is for
significant ones.
3) If not defined, default colormaps will be used.
cbar_ax_bbox : list, optional
Colorbar axes position rect [left, bottom, width, height] where
all quantities are in fractions of figure width and height.
Refer to `matplotlib.figure.Figure.add_axes` for more information.
Default is [0.95, 0.35, 0.04, 0.3].
ax : matplotlib Axes, optional
Axes in which to draw the plot, otherwise use the currently-active Axes.
kwargs : other keyword arguments
Keyword arguments to be passed to seaborn heatmap method. These
keyword args cannot be used: cbar, vmin, vmax, center.
Returns
-------
Axes object with the heatmap (and ColorBase object of cbar if `flat` is set to
False).
Examples
--------
>>> x = np.array([[-1, 1, 1],
[ 1, -1, 0],
[ 1, 0, -1]])
>>> ph.sign_plot(x, flat = True) | train | https://github.com/maximtrp/scikit-posthocs/blob/5476b09e2a325cd4e31c0b0bc6906ab5cd77fc5d/scikit_posthocs/_plotting.py#L118-L244 | null | import numpy as np
from matplotlib.colors import ListedColormap
from matplotlib.colorbar import ColorbarBase
from seaborn import heatmap
from pandas import DataFrame
def sign_array(p_values, alpha=0.05):
"""
Significance array
Converts an array with p values to a significance array where
0 is False (not significant), 1 is True (significant),
and -1 is for diagonal elements.
Parameters
----------
p_values : array_like or ndarray
An array, any object exposing the array interface, containing
p values.
alpha : float, optional
Significance level. Default is 0.05.
Returns
-------
Numpy array where 0 is False (not significant), 1 is True (significant),
and -1 is for diagonal elements.
Examples
--------
>>> p_values = np.array([[ 0. , 0.00119517, 0.00278329],
[ 0.00119517, 0. , 0.18672227],
[ 0.00278329, 0.18672227, 0. ]])
>>> ph.sign_array(p_values)
array([[-1, 1, 1],
[ 1, -1, 0],
[ 1, 0, -1]])
"""
p_values = np.array(p_values)
p_values[p_values > alpha] = 0
p_values[(p_values < alpha) & (p_values > 0)] = 1
np.fill_diagonal(p_values, -1)
return p_values
def sign_table(p_values, lower=True, upper=True):
"""
Significance table
Returns table that can be used in a publication. P values are replaced
with asterisks: \* - p < 0.05, \*\* - p < 0.01, \*\*\* - p < 0.001.
Parameters
----------
p_values : array_like, or ndarray, or pandas DataFrame
An array, any object exposing the array interface, containing
p values.
lower : bool, optional
Defines whether to return the lower triangle.
upper : bool, optional
Defines whether to return the upper triangle.
Returns
-------
Numpy array or pandas DataFrame with asterisks masked p values.
Examples
--------
>>> p_values = np.array([[-1. , 0.00119517, 0.00278329],
[ 0.00119517, -1. , 0.18672227],
[ 0.00278329, 0.18672227, -1. ]])
>>> ph.sign_table(p_values)
array([['-', '**', '**'],
['**', '-', 'NS'],
['**', 'NS', '-']], dtype=object)
"""
if not any([lower, upper]):
raise ValueError("Either lower or upper triangle must be returned")
if not isinstance(p_values, DataFrame):
p_values = np.array(p_values, dtype=np.float)
ns = p_values > 0.05
three = (p_values < 0.001) & (p_values >= 0)
two = (p_values < 0.01) & (p_values >= 0.001)
one = (p_values < 0.05) & (p_values >= 0.01)
p_values = p_values.astype(object)
p_values[ns] = 'NS'
p_values[three] = '***'
p_values[two] = '**'
p_values[one] = '*'
if not isinstance(p_values, DataFrame):
np.fill_diagonal(p_values, '-')
if not lower:
p_values[np.tril_indices(p_values.shape[0], -1)] = ''
elif not upper:
p_values[np.triu_indices(p_values.shape[0], 1)] = ''
else:
np.fill_diagonal(p_values.values, '-')
if not lower:
p_values.values[np.tril_indices(p_values.shape[0], -1)] = ''
elif not upper:
p_values.values[np.triu_indices(p_values.shape[0], 1)] = ''
return p_values
|
mattja/sdeint | sdeint/_broadcast.py | broadcast_to | python | def broadcast_to(array, shape, subok=False):
return _broadcast_to(array, shape, subok=subok, readonly=True) | Broadcast an array to a new shape.
Parameters
----------
array : array_like
The array to broadcast.
shape : tuple
The shape of the desired array.
subok : bool, optional
If True, then sub-classes will be passed-through, otherwise
the returned array will be forced to be a base-class array (default).
Returns
-------
broadcast : array
A readonly view on the original array with the given shape. It is
typically not contiguous. Furthermore, more than one element of a
broadcasted array may refer to a single memory location.
Raises
------
ValueError
If the array is not compatible with the new shape according to NumPy's
broadcasting rules.
Notes
-----
.. versionadded:: 1.10.0
Examples
--------
>>> x = np.array([1, 2, 3])
>>> np.broadcast_to(x, (3, 3))
array([[1, 2, 3],
[1, 2, 3],
[1, 2, 3]]) | train | https://github.com/mattja/sdeint/blob/7cf807cdf97b3bb39d29e1c2dc834b519499b601/sdeint/_broadcast.py#L70-L108 | [
"def _broadcast_to(array, shape, subok, readonly):\n shape = tuple(shape) if np.iterable(shape) else (shape,)\n array = np.array(array, copy=False, subok=subok)\n if not shape and array.shape:\n raise ValueError('cannot broadcast a non-scalar to a scalar array')\n if any(size < 0 for size in shap... | """
Backport these functions from numpy v1.10 numpy/lib/stride_tricks.py,
to support people using an older version of numpy.
"""
# Copyright (c) 2005-2015, NumPy Developers.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# * Neither the name of the NumPy Developers nor the names of any
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy as np
def _maybe_view_as_subclass(original_array, new_array):
if type(original_array) is not type(new_array):
# if input was an ndarray subclass and subclasses were OK,
# then view the result as that subclass.
new_array = new_array.view(type=type(original_array))
# Since we have done something akin to a view from original_array, we
# should let the subclass finalize (if it has it implemented, i.e., is
# not None).
if new_array.__array_finalize__:
new_array.__array_finalize__(original_array)
return new_array
def _broadcast_to(array, shape, subok, readonly):
shape = tuple(shape) if np.iterable(shape) else (shape,)
array = np.array(array, copy=False, subok=subok)
if not shape and array.shape:
raise ValueError('cannot broadcast a non-scalar to a scalar array')
if any(size < 0 for size in shape):
raise ValueError('all elements of broadcast shape must be non-'
'negative')
broadcast = np.nditer(
(array,), flags=['multi_index', 'refs_ok', 'zerosize_ok'],
op_flags=['readonly'], itershape=shape, order='C').itviews[0]
result = _maybe_view_as_subclass(array, broadcast)
if not readonly and array.flags.writeable:
result.flags.writeable = True
return result
|
mattja/sdeint | sdeint/wiener.py | deltaW | python | def deltaW(N, m, h):
return np.random.normal(0.0, np.sqrt(h), (N, m)) | Generate sequence of Wiener increments for m independent Wiener
processes W_j(t) j=0..m-1 for each of N time intervals of length h.
Returns:
dW (array of shape (N, m)): The [n, j] element has the value
W_j((n+1)*h) - W_j(n*h) | train | https://github.com/mattja/sdeint/blob/7cf807cdf97b3bb39d29e1c2dc834b519499b601/sdeint/wiener.py#L42-L50 | null | # Copyright 2015 Matthew J. Aburn
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version. See <http://www.gnu.org/licenses/>.
"""
Simulation of standard multiple stochastic integrals, both Ito and Stratonovich
I_{ij}(t) = \int_{0}^{t}\int_{0}^{s} dW_i(u) dW_j(s) (Ito)
J_{ij}(t) = \int_{0}^{t}\int_{0}^{s} \circ dW_i(u) \circ dW_j(s) (Stratonovich)
These multiple integrals I and J are important building blocks that will be
used by most of the higher-order algorithms that integrate multi-dimensional
SODEs.
We first implement the method of Kloeden, Platen and Wright (1992) to
approximate the integrals by the first n terms from the series expansion of a
Brownian bridge process. By default using n=5.
Finally we implement the method of Wiktorsson (2001) which improves on the
previous method by also approximating the tail-sum distribution by a
multivariate normal distribution.
References:
P. Kloeden, E. Platen and I. Wright (1992) The approximation of multiple
stochastic integrals
M. Wiktorsson (2001) Joint Characteristic Function and Simultaneous
Simulation of Iterated Ito Integrals for Multiple Independent Brownian
Motions
"""
import numpy as np
numpy_version = list(map(int, np.version.short_version.split('.')))
if numpy_version >= [1,10,0]:
broadcast_to = np.broadcast_to
else:
from ._broadcast import broadcast_to
def _t(a):
"""transpose the last two axes of a three axis array"""
return a.transpose((0, 2, 1))
def _dot(a, b):
""" for rank 3 arrays a and b, return \sum_k a_ij^k . b_ik^l (no sum on i)
i.e. This is just normal matrix multiplication at each point on first axis
"""
return np.einsum('ijk,ikl->ijl', a, b)
def _Aterm(N, h, m, k, dW):
"""kth term in the sum of Wiktorsson2001 equation (2.2)"""
sqrt2h = np.sqrt(2.0/h)
Xk = np.random.normal(0.0, 1.0, (N, m, 1))
Yk = np.random.normal(0.0, 1.0, (N, m, 1))
term1 = _dot(Xk, _t(Yk + sqrt2h*dW))
term2 = _dot(Yk + sqrt2h*dW, _t(Xk))
return (term1 - term2)/k
def Ikpw(dW, h, n=5):
"""matrix I approximating repeated Ito integrals for each of N time
intervals, based on the method of Kloeden, Platen and Wright (1992).
Args:
dW (array of shape (N, m)): giving m independent Weiner increments for
each time step N. (You can make this array using sdeint.deltaW())
h (float): the time step size
n (int, optional): how many terms to take in the series expansion
Returns:
(A, I) where
A: array of shape (N, m, m) giving the Levy areas that were used.
I: array of shape (N, m, m) giving an m x m matrix of repeated Ito
integral values for each of the N time intervals.
"""
N = dW.shape[0]
m = dW.shape[1]
if dW.ndim < 3:
dW = dW.reshape((N, -1, 1)) # change to array of shape (N, m, 1)
if dW.shape[2] != 1 or dW.ndim > 3:
raise(ValueError)
A = _Aterm(N, h, m, 1, dW)
for k in range(2, n+1):
A += _Aterm(N, h, m, k, dW)
A = (h/(2.0*np.pi))*A
I = 0.5*(_dot(dW, _t(dW)) - np.diag(h*np.ones(m))) + A
dW = dW.reshape((N, -1)) # change back to shape (N, m)
return (A, I)
def Jkpw(dW, h, n=5):
"""matrix J approximating repeated Stratonovich integrals for each of N
time intervals, based on the method of Kloeden, Platen and Wright (1992).
Args:
dW (array of shape (N, m)): giving m independent Weiner increments for
each time step N. (You can make this array using sdeint.deltaW())
h (float): the time step size
n (int, optional): how many terms to take in the series expansion
Returns:
(A, J) where
A: array of shape (N, m, m) giving the Levy areas that were used.
J: array of shape (N, m, m) giving an m x m matrix of repeated
Stratonovich integral values for each of the N time intervals.
"""
m = dW.shape[1]
A, I = Ikpw(dW, h, n)
J = I + 0.5*h*np.eye(m).reshape((1, m, m))
return (A, J)
# The code below this point implements the method of Wiktorsson2001.
def _vec(A):
"""
Linear operator _vec() from Wiktorsson2001 p478
Args:
A: a rank 3 array of shape N x m x n, giving a matrix A[j] for each
interval of time j in 0..N-1
Returns:
array of shape N x mn x 1, made by stacking the columns of matrix A[j] on
top of each other, for each j in 0..N-1
"""
N, m, n = A.shape
return A.reshape((N, m*n, 1), order='F')
def _unvec(vecA, m=None):
"""inverse of _vec() operator"""
N = vecA.shape[0]
if m is None:
m = np.sqrt(vecA.shape[1] + 0.25).astype(np.int64)
return vecA.reshape((N, m, -1), order='F')
def _kp(a, b):
"""Special case Kronecker tensor product of a[i] and b[i] at each
time interval i for i = 0 .. N-1
It is specialized for the case where both a and b are shape N x m x 1
"""
if a.shape != b.shape or a.shape[-1] != 1:
raise(ValueError)
N = a.shape[0]
# take the outer product over the last two axes, then reshape:
return np.einsum('ijk,ilk->ijkl', a, b).reshape(N, -1, 1)
def _kp2(A, B):
"""Special case Kronecker tensor product of A[i] and B[i] at each
time interval i for i = 0 .. N-1
Specialized for the case A and B rank 3 with A.shape[0]==B.shape[0]
"""
N = A.shape[0]
if B.shape[0] != N:
raise(ValueError)
newshape1 = A.shape[1]*B.shape[1]
return np.einsum('ijk,ilm->ijlkm', A, B).reshape(N, newshape1, -1)
def _P(m):
"""Returns m^2 x m^2 permutation matrix that swaps rows i and j where
j = 1 + m((i - 1) mod m) + (i - 1) div m, for i = 1 .. m^2
"""
P = np.zeros((m**2,m**2), dtype=np.int64)
for i in range(1, m**2 + 1):
j = 1 + m*((i - 1) % m) + (i - 1)//m
P[i-1, j-1] = 1
return P
def _K(m):
""" matrix K_m from Wiktorsson2001 """
M = m*(m - 1)//2
K = np.zeros((M, m**2), dtype=np.int64)
row = 0
for j in range(1, m):
col = (j - 1)*m + j
s = m - j
K[row:(row+s), col:(col+s)] = np.eye(s)
row += s
return K
def _AtildeTerm(N, h, m, k, dW, Km0, Pm0):
"""kth term in the sum for Atilde (Wiktorsson2001 p481, 1st eqn)"""
M = m*(m-1)//2
Xk = np.random.normal(0.0, 1.0, (N, m, 1))
Yk = np.random.normal(0.0, 1.0, (N, m, 1))
factor1 = np.dot(Km0, Pm0 - np.eye(m**2))
factor1 = broadcast_to(factor1, (N, M, m**2))
factor2 = _kp(Yk + np.sqrt(2.0/h)*dW, Xk)
return _dot(factor1, factor2)/k
def _sigmainf(N, h, m, dW, Km0, Pm0):
"""Asymptotic covariance matrix \Sigma_\infty Wiktorsson2001 eqn (4.5)"""
M = m*(m-1)//2
Im = broadcast_to(np.eye(m), (N, m, m))
IM = broadcast_to(np.eye(M), (N, M, M))
Ims0 = np.eye(m**2)
factor1 = broadcast_to((2.0/h)*np.dot(Km0, Ims0 - Pm0), (N, M, m**2))
factor2 = _kp2(Im, _dot(dW, _t(dW)))
factor3 = broadcast_to(np.dot(Ims0 - Pm0, Km0.T), (N, m**2, M))
return 2*IM + _dot(_dot(factor1, factor2), factor3)
def _a(n):
""" \sum_{n+1}^\infty 1/k^2 """
return np.pi**2/6.0 - sum(1.0/k**2 for k in range(1, n+1))
def Iwik(dW, h, n=5):
"""matrix I approximating repeated Ito integrals for each of N time
intervals, using the method of Wiktorsson (2001).
Args:
dW (array of shape (N, m)): giving m independent Weiner increments for
each time step N. (You can make this array using sdeint.deltaW())
h (float): the time step size
n (int, optional): how many terms to take in the series expansion
Returns:
(Atilde, I) where
Atilde: array of shape (N,m(m-1)//2,1) giving the area integrals used.
I: array of shape (N, m, m) giving an m x m matrix of repeated Ito
integral values for each of the N time intervals.
"""
N = dW.shape[0]
m = dW.shape[1]
if dW.ndim < 3:
dW = dW.reshape((N, -1, 1)) # change to array of shape (N, m, 1)
if dW.shape[2] != 1 or dW.ndim > 3:
raise(ValueError)
if m == 1:
return (np.zeros((N, 1, 1)), (dW*dW - h)/2.0)
Pm0 = _P(m)
Km0 = _K(m)
M = m*(m-1)//2
Atilde_n = _AtildeTerm(N, h, m, 1, dW, Km0, Pm0)
for k in range(2, n+1):
Atilde_n += _AtildeTerm(N, h, m, k, dW, Km0, Pm0)
Atilde_n = (h/(2.0*np.pi))*Atilde_n # approximation after n terms
S = _sigmainf(N, h, m, dW, Km0, Pm0)
normdW2 = np.sum(np.abs(dW)**2, axis=1)
radical = np.sqrt(1.0 + normdW2/h).reshape((N, 1, 1))
IM = broadcast_to(np.eye(M), (N, M, M))
Im = broadcast_to(np.eye(m), (N, m, m))
Ims0 = np.eye(m**2)
sqrtS = (S + 2.0*radical*IM)/(np.sqrt(2.0)*(1.0 + radical))
G = np.random.normal(0.0, 1.0, (N, M, 1))
tailsum = h/(2.0*np.pi)*_a(n)**0.5*_dot(sqrtS, G)
Atilde = Atilde_n + tailsum # our final approximation of the areas
factor3 = broadcast_to(np.dot(Ims0 - Pm0, Km0.T), (N, m**2, M))
vecI = 0.5*(_kp(dW, dW) - _vec(h*Im)) + _dot(factor3, Atilde)
I = _unvec(vecI)
dW = dW.reshape((N, -1)) # change back to shape (N, m)
return (Atilde, I)
def Jwik(dW, h, n=5):
"""matrix J approximating repeated Stratonovich integrals for each of N
time intervals, using the method of Wiktorsson (2001).
Args:
dW (array of shape (N, m)): giving m independent Weiner increments for
each time step N. (You can make this array using sdeint.deltaW())
h (float): the time step size
n (int, optional): how many terms to take in the series expansion
Returns:
(Atilde, J) where
Atilde: array of shape (N,m(m-1)//2,1) giving the area integrals used.
J: array of shape (N, m, m) giving an m x m matrix of repeated
Stratonovich integral values for each of the N time intervals.
"""
m = dW.shape[1]
Atilde, I = Iwik(dW, h, n)
J = I + 0.5*h*np.eye(m).reshape((1, m, m))
return (Atilde, J)
|
mattja/sdeint | sdeint/wiener.py | _Aterm | python | def _Aterm(N, h, m, k, dW):
sqrt2h = np.sqrt(2.0/h)
Xk = np.random.normal(0.0, 1.0, (N, m, 1))
Yk = np.random.normal(0.0, 1.0, (N, m, 1))
term1 = _dot(Xk, _t(Yk + sqrt2h*dW))
term2 = _dot(Yk + sqrt2h*dW, _t(Xk))
return (term1 - term2)/k | kth term in the sum of Wiktorsson2001 equation (2.2) | train | https://github.com/mattja/sdeint/blob/7cf807cdf97b3bb39d29e1c2dc834b519499b601/sdeint/wiener.py#L65-L72 | [
"def _t(a):\n \"\"\"transpose the last two axes of a three axis array\"\"\"\n return a.transpose((0, 2, 1))\n",
"def _dot(a, b):\n \"\"\" for rank 3 arrays a and b, return \\sum_k a_ij^k . b_ik^l (no sum on i)\n i.e. This is just normal matrix multiplication at each point on first axis\n \"\"\"\n ... | # Copyright 2015 Matthew J. Aburn
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version. See <http://www.gnu.org/licenses/>.
"""
Simulation of standard multiple stochastic integrals, both Ito and Stratonovich
I_{ij}(t) = \int_{0}^{t}\int_{0}^{s} dW_i(u) dW_j(s) (Ito)
J_{ij}(t) = \int_{0}^{t}\int_{0}^{s} \circ dW_i(u) \circ dW_j(s) (Stratonovich)
These multiple integrals I and J are important building blocks that will be
used by most of the higher-order algorithms that integrate multi-dimensional
SODEs.
We first implement the method of Kloeden, Platen and Wright (1992) to
approximate the integrals by the first n terms from the series expansion of a
Brownian bridge process. By default using n=5.
Finally we implement the method of Wiktorsson (2001) which improves on the
previous method by also approximating the tail-sum distribution by a
multivariate normal distribution.
References:
P. Kloeden, E. Platen and I. Wright (1992) The approximation of multiple
stochastic integrals
M. Wiktorsson (2001) Joint Characteristic Function and Simultaneous
Simulation of Iterated Ito Integrals for Multiple Independent Brownian
Motions
"""
import numpy as np
numpy_version = list(map(int, np.version.short_version.split('.')))
if numpy_version >= [1,10,0]:
broadcast_to = np.broadcast_to
else:
from ._broadcast import broadcast_to
def deltaW(N, m, h):
"""Generate sequence of Wiener increments for m independent Wiener
processes W_j(t) j=0..m-1 for each of N time intervals of length h.
Returns:
dW (array of shape (N, m)): The [n, j] element has the value
W_j((n+1)*h) - W_j(n*h)
"""
return np.random.normal(0.0, np.sqrt(h), (N, m))
def _t(a):
"""transpose the last two axes of a three axis array"""
return a.transpose((0, 2, 1))
def _dot(a, b):
""" for rank 3 arrays a and b, return \sum_k a_ij^k . b_ik^l (no sum on i)
i.e. This is just normal matrix multiplication at each point on first axis
"""
return np.einsum('ijk,ikl->ijl', a, b)
def Ikpw(dW, h, n=5):
"""matrix I approximating repeated Ito integrals for each of N time
intervals, based on the method of Kloeden, Platen and Wright (1992).
Args:
dW (array of shape (N, m)): giving m independent Weiner increments for
each time step N. (You can make this array using sdeint.deltaW())
h (float): the time step size
n (int, optional): how many terms to take in the series expansion
Returns:
(A, I) where
A: array of shape (N, m, m) giving the Levy areas that were used.
I: array of shape (N, m, m) giving an m x m matrix of repeated Ito
integral values for each of the N time intervals.
"""
N = dW.shape[0]
m = dW.shape[1]
if dW.ndim < 3:
dW = dW.reshape((N, -1, 1)) # change to array of shape (N, m, 1)
if dW.shape[2] != 1 or dW.ndim > 3:
raise(ValueError)
A = _Aterm(N, h, m, 1, dW)
for k in range(2, n+1):
A += _Aterm(N, h, m, k, dW)
A = (h/(2.0*np.pi))*A
I = 0.5*(_dot(dW, _t(dW)) - np.diag(h*np.ones(m))) + A
dW = dW.reshape((N, -1)) # change back to shape (N, m)
return (A, I)
def Jkpw(dW, h, n=5):
"""matrix J approximating repeated Stratonovich integrals for each of N
time intervals, based on the method of Kloeden, Platen and Wright (1992).
Args:
dW (array of shape (N, m)): giving m independent Weiner increments for
each time step N. (You can make this array using sdeint.deltaW())
h (float): the time step size
n (int, optional): how many terms to take in the series expansion
Returns:
(A, J) where
A: array of shape (N, m, m) giving the Levy areas that were used.
J: array of shape (N, m, m) giving an m x m matrix of repeated
Stratonovich integral values for each of the N time intervals.
"""
m = dW.shape[1]
A, I = Ikpw(dW, h, n)
J = I + 0.5*h*np.eye(m).reshape((1, m, m))
return (A, J)
# The code below this point implements the method of Wiktorsson2001.
def _vec(A):
"""
Linear operator _vec() from Wiktorsson2001 p478
Args:
A: a rank 3 array of shape N x m x n, giving a matrix A[j] for each
interval of time j in 0..N-1
Returns:
array of shape N x mn x 1, made by stacking the columns of matrix A[j] on
top of each other, for each j in 0..N-1
"""
N, m, n = A.shape
return A.reshape((N, m*n, 1), order='F')
def _unvec(vecA, m=None):
"""inverse of _vec() operator"""
N = vecA.shape[0]
if m is None:
m = np.sqrt(vecA.shape[1] + 0.25).astype(np.int64)
return vecA.reshape((N, m, -1), order='F')
def _kp(a, b):
"""Special case Kronecker tensor product of a[i] and b[i] at each
time interval i for i = 0 .. N-1
It is specialized for the case where both a and b are shape N x m x 1
"""
if a.shape != b.shape or a.shape[-1] != 1:
raise(ValueError)
N = a.shape[0]
# take the outer product over the last two axes, then reshape:
return np.einsum('ijk,ilk->ijkl', a, b).reshape(N, -1, 1)
def _kp2(A, B):
"""Special case Kronecker tensor product of A[i] and B[i] at each
time interval i for i = 0 .. N-1
Specialized for the case A and B rank 3 with A.shape[0]==B.shape[0]
"""
N = A.shape[0]
if B.shape[0] != N:
raise(ValueError)
newshape1 = A.shape[1]*B.shape[1]
return np.einsum('ijk,ilm->ijlkm', A, B).reshape(N, newshape1, -1)
def _P(m):
"""Returns m^2 x m^2 permutation matrix that swaps rows i and j where
j = 1 + m((i - 1) mod m) + (i - 1) div m, for i = 1 .. m^2
"""
P = np.zeros((m**2,m**2), dtype=np.int64)
for i in range(1, m**2 + 1):
j = 1 + m*((i - 1) % m) + (i - 1)//m
P[i-1, j-1] = 1
return P
def _K(m):
""" matrix K_m from Wiktorsson2001 """
M = m*(m - 1)//2
K = np.zeros((M, m**2), dtype=np.int64)
row = 0
for j in range(1, m):
col = (j - 1)*m + j
s = m - j
K[row:(row+s), col:(col+s)] = np.eye(s)
row += s
return K
def _AtildeTerm(N, h, m, k, dW, Km0, Pm0):
"""kth term in the sum for Atilde (Wiktorsson2001 p481, 1st eqn)"""
M = m*(m-1)//2
Xk = np.random.normal(0.0, 1.0, (N, m, 1))
Yk = np.random.normal(0.0, 1.0, (N, m, 1))
factor1 = np.dot(Km0, Pm0 - np.eye(m**2))
factor1 = broadcast_to(factor1, (N, M, m**2))
factor2 = _kp(Yk + np.sqrt(2.0/h)*dW, Xk)
return _dot(factor1, factor2)/k
def _sigmainf(N, h, m, dW, Km0, Pm0):
"""Asymptotic covariance matrix \Sigma_\infty Wiktorsson2001 eqn (4.5)"""
M = m*(m-1)//2
Im = broadcast_to(np.eye(m), (N, m, m))
IM = broadcast_to(np.eye(M), (N, M, M))
Ims0 = np.eye(m**2)
factor1 = broadcast_to((2.0/h)*np.dot(Km0, Ims0 - Pm0), (N, M, m**2))
factor2 = _kp2(Im, _dot(dW, _t(dW)))
factor3 = broadcast_to(np.dot(Ims0 - Pm0, Km0.T), (N, m**2, M))
return 2*IM + _dot(_dot(factor1, factor2), factor3)
def _a(n):
""" \sum_{n+1}^\infty 1/k^2 """
return np.pi**2/6.0 - sum(1.0/k**2 for k in range(1, n+1))
def Iwik(dW, h, n=5):
"""matrix I approximating repeated Ito integrals for each of N time
intervals, using the method of Wiktorsson (2001).
Args:
dW (array of shape (N, m)): giving m independent Weiner increments for
each time step N. (You can make this array using sdeint.deltaW())
h (float): the time step size
n (int, optional): how many terms to take in the series expansion
Returns:
(Atilde, I) where
Atilde: array of shape (N,m(m-1)//2,1) giving the area integrals used.
I: array of shape (N, m, m) giving an m x m matrix of repeated Ito
integral values for each of the N time intervals.
"""
N = dW.shape[0]
m = dW.shape[1]
if dW.ndim < 3:
dW = dW.reshape((N, -1, 1)) # change to array of shape (N, m, 1)
if dW.shape[2] != 1 or dW.ndim > 3:
raise(ValueError)
if m == 1:
return (np.zeros((N, 1, 1)), (dW*dW - h)/2.0)
Pm0 = _P(m)
Km0 = _K(m)
M = m*(m-1)//2
Atilde_n = _AtildeTerm(N, h, m, 1, dW, Km0, Pm0)
for k in range(2, n+1):
Atilde_n += _AtildeTerm(N, h, m, k, dW, Km0, Pm0)
Atilde_n = (h/(2.0*np.pi))*Atilde_n # approximation after n terms
S = _sigmainf(N, h, m, dW, Km0, Pm0)
normdW2 = np.sum(np.abs(dW)**2, axis=1)
radical = np.sqrt(1.0 + normdW2/h).reshape((N, 1, 1))
IM = broadcast_to(np.eye(M), (N, M, M))
Im = broadcast_to(np.eye(m), (N, m, m))
Ims0 = np.eye(m**2)
sqrtS = (S + 2.0*radical*IM)/(np.sqrt(2.0)*(1.0 + radical))
G = np.random.normal(0.0, 1.0, (N, M, 1))
tailsum = h/(2.0*np.pi)*_a(n)**0.5*_dot(sqrtS, G)
Atilde = Atilde_n + tailsum # our final approximation of the areas
factor3 = broadcast_to(np.dot(Ims0 - Pm0, Km0.T), (N, m**2, M))
vecI = 0.5*(_kp(dW, dW) - _vec(h*Im)) + _dot(factor3, Atilde)
I = _unvec(vecI)
dW = dW.reshape((N, -1)) # change back to shape (N, m)
return (Atilde, I)
def Jwik(dW, h, n=5):
"""matrix J approximating repeated Stratonovich integrals for each of N
time intervals, using the method of Wiktorsson (2001).
Args:
dW (array of shape (N, m)): giving m independent Weiner increments for
each time step N. (You can make this array using sdeint.deltaW())
h (float): the time step size
n (int, optional): how many terms to take in the series expansion
Returns:
(Atilde, J) where
Atilde: array of shape (N,m(m-1)//2,1) giving the area integrals used.
J: array of shape (N, m, m) giving an m x m matrix of repeated
Stratonovich integral values for each of the N time intervals.
"""
m = dW.shape[1]
Atilde, I = Iwik(dW, h, n)
J = I + 0.5*h*np.eye(m).reshape((1, m, m))
return (Atilde, J)
|
mattja/sdeint | sdeint/wiener.py | Ikpw | python | def Ikpw(dW, h, n=5):
N = dW.shape[0]
m = dW.shape[1]
if dW.ndim < 3:
dW = dW.reshape((N, -1, 1)) # change to array of shape (N, m, 1)
if dW.shape[2] != 1 or dW.ndim > 3:
raise(ValueError)
A = _Aterm(N, h, m, 1, dW)
for k in range(2, n+1):
A += _Aterm(N, h, m, k, dW)
A = (h/(2.0*np.pi))*A
I = 0.5*(_dot(dW, _t(dW)) - np.diag(h*np.ones(m))) + A
dW = dW.reshape((N, -1)) # change back to shape (N, m)
return (A, I) | matrix I approximating repeated Ito integrals for each of N time
intervals, based on the method of Kloeden, Platen and Wright (1992).
Args:
dW (array of shape (N, m)): giving m independent Weiner increments for
each time step N. (You can make this array using sdeint.deltaW())
h (float): the time step size
n (int, optional): how many terms to take in the series expansion
Returns:
(A, I) where
A: array of shape (N, m, m) giving the Levy areas that were used.
I: array of shape (N, m, m) giving an m x m matrix of repeated Ito
integral values for each of the N time intervals. | train | https://github.com/mattja/sdeint/blob/7cf807cdf97b3bb39d29e1c2dc834b519499b601/sdeint/wiener.py#L75-L103 | [
"def _t(a):\n \"\"\"transpose the last two axes of a three axis array\"\"\"\n return a.transpose((0, 2, 1))\n",
"def _dot(a, b):\n \"\"\" for rank 3 arrays a and b, return \\sum_k a_ij^k . b_ik^l (no sum on i)\n i.e. This is just normal matrix multiplication at each point on first axis\n \"\"\"\n ... | # Copyright 2015 Matthew J. Aburn
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version. See <http://www.gnu.org/licenses/>.
"""
Simulation of standard multiple stochastic integrals, both Ito and Stratonovich
I_{ij}(t) = \int_{0}^{t}\int_{0}^{s} dW_i(u) dW_j(s) (Ito)
J_{ij}(t) = \int_{0}^{t}\int_{0}^{s} \circ dW_i(u) \circ dW_j(s) (Stratonovich)
These multiple integrals I and J are important building blocks that will be
used by most of the higher-order algorithms that integrate multi-dimensional
SODEs.
We first implement the method of Kloeden, Platen and Wright (1992) to
approximate the integrals by the first n terms from the series expansion of a
Brownian bridge process. By default using n=5.
Finally we implement the method of Wiktorsson (2001) which improves on the
previous method by also approximating the tail-sum distribution by a
multivariate normal distribution.
References:
P. Kloeden, E. Platen and I. Wright (1992) The approximation of multiple
stochastic integrals
M. Wiktorsson (2001) Joint Characteristic Function and Simultaneous
Simulation of Iterated Ito Integrals for Multiple Independent Brownian
Motions
"""
import numpy as np
numpy_version = list(map(int, np.version.short_version.split('.')))
if numpy_version >= [1,10,0]:
broadcast_to = np.broadcast_to
else:
from ._broadcast import broadcast_to
def deltaW(N, m, h):
"""Generate sequence of Wiener increments for m independent Wiener
processes W_j(t) j=0..m-1 for each of N time intervals of length h.
Returns:
dW (array of shape (N, m)): The [n, j] element has the value
W_j((n+1)*h) - W_j(n*h)
"""
return np.random.normal(0.0, np.sqrt(h), (N, m))
def _t(a):
"""transpose the last two axes of a three axis array"""
return a.transpose((0, 2, 1))
def _dot(a, b):
""" for rank 3 arrays a and b, return \sum_k a_ij^k . b_ik^l (no sum on i)
i.e. This is just normal matrix multiplication at each point on first axis
"""
return np.einsum('ijk,ikl->ijl', a, b)
def _Aterm(N, h, m, k, dW):
"""kth term in the sum of Wiktorsson2001 equation (2.2)"""
sqrt2h = np.sqrt(2.0/h)
Xk = np.random.normal(0.0, 1.0, (N, m, 1))
Yk = np.random.normal(0.0, 1.0, (N, m, 1))
term1 = _dot(Xk, _t(Yk + sqrt2h*dW))
term2 = _dot(Yk + sqrt2h*dW, _t(Xk))
return (term1 - term2)/k
def Jkpw(dW, h, n=5):
"""matrix J approximating repeated Stratonovich integrals for each of N
time intervals, based on the method of Kloeden, Platen and Wright (1992).
Args:
dW (array of shape (N, m)): giving m independent Weiner increments for
each time step N. (You can make this array using sdeint.deltaW())
h (float): the time step size
n (int, optional): how many terms to take in the series expansion
Returns:
(A, J) where
A: array of shape (N, m, m) giving the Levy areas that were used.
J: array of shape (N, m, m) giving an m x m matrix of repeated
Stratonovich integral values for each of the N time intervals.
"""
m = dW.shape[1]
A, I = Ikpw(dW, h, n)
J = I + 0.5*h*np.eye(m).reshape((1, m, m))
return (A, J)
# The code below this point implements the method of Wiktorsson2001.
def _vec(A):
"""
Linear operator _vec() from Wiktorsson2001 p478
Args:
A: a rank 3 array of shape N x m x n, giving a matrix A[j] for each
interval of time j in 0..N-1
Returns:
array of shape N x mn x 1, made by stacking the columns of matrix A[j] on
top of each other, for each j in 0..N-1
"""
N, m, n = A.shape
return A.reshape((N, m*n, 1), order='F')
def _unvec(vecA, m=None):
"""inverse of _vec() operator"""
N = vecA.shape[0]
if m is None:
m = np.sqrt(vecA.shape[1] + 0.25).astype(np.int64)
return vecA.reshape((N, m, -1), order='F')
def _kp(a, b):
"""Special case Kronecker tensor product of a[i] and b[i] at each
time interval i for i = 0 .. N-1
It is specialized for the case where both a and b are shape N x m x 1
"""
if a.shape != b.shape or a.shape[-1] != 1:
raise(ValueError)
N = a.shape[0]
# take the outer product over the last two axes, then reshape:
return np.einsum('ijk,ilk->ijkl', a, b).reshape(N, -1, 1)
def _kp2(A, B):
"""Special case Kronecker tensor product of A[i] and B[i] at each
time interval i for i = 0 .. N-1
Specialized for the case A and B rank 3 with A.shape[0]==B.shape[0]
"""
N = A.shape[0]
if B.shape[0] != N:
raise(ValueError)
newshape1 = A.shape[1]*B.shape[1]
return np.einsum('ijk,ilm->ijlkm', A, B).reshape(N, newshape1, -1)
def _P(m):
"""Returns m^2 x m^2 permutation matrix that swaps rows i and j where
j = 1 + m((i - 1) mod m) + (i - 1) div m, for i = 1 .. m^2
"""
P = np.zeros((m**2,m**2), dtype=np.int64)
for i in range(1, m**2 + 1):
j = 1 + m*((i - 1) % m) + (i - 1)//m
P[i-1, j-1] = 1
return P
def _K(m):
""" matrix K_m from Wiktorsson2001 """
M = m*(m - 1)//2
K = np.zeros((M, m**2), dtype=np.int64)
row = 0
for j in range(1, m):
col = (j - 1)*m + j
s = m - j
K[row:(row+s), col:(col+s)] = np.eye(s)
row += s
return K
def _AtildeTerm(N, h, m, k, dW, Km0, Pm0):
"""kth term in the sum for Atilde (Wiktorsson2001 p481, 1st eqn)"""
M = m*(m-1)//2
Xk = np.random.normal(0.0, 1.0, (N, m, 1))
Yk = np.random.normal(0.0, 1.0, (N, m, 1))
factor1 = np.dot(Km0, Pm0 - np.eye(m**2))
factor1 = broadcast_to(factor1, (N, M, m**2))
factor2 = _kp(Yk + np.sqrt(2.0/h)*dW, Xk)
return _dot(factor1, factor2)/k
def _sigmainf(N, h, m, dW, Km0, Pm0):
"""Asymptotic covariance matrix \Sigma_\infty Wiktorsson2001 eqn (4.5)"""
M = m*(m-1)//2
Im = broadcast_to(np.eye(m), (N, m, m))
IM = broadcast_to(np.eye(M), (N, M, M))
Ims0 = np.eye(m**2)
factor1 = broadcast_to((2.0/h)*np.dot(Km0, Ims0 - Pm0), (N, M, m**2))
factor2 = _kp2(Im, _dot(dW, _t(dW)))
factor3 = broadcast_to(np.dot(Ims0 - Pm0, Km0.T), (N, m**2, M))
return 2*IM + _dot(_dot(factor1, factor2), factor3)
def _a(n):
""" \sum_{n+1}^\infty 1/k^2 """
return np.pi**2/6.0 - sum(1.0/k**2 for k in range(1, n+1))
def Iwik(dW, h, n=5):
"""matrix I approximating repeated Ito integrals for each of N time
intervals, using the method of Wiktorsson (2001).
Args:
dW (array of shape (N, m)): giving m independent Weiner increments for
each time step N. (You can make this array using sdeint.deltaW())
h (float): the time step size
n (int, optional): how many terms to take in the series expansion
Returns:
(Atilde, I) where
Atilde: array of shape (N,m(m-1)//2,1) giving the area integrals used.
I: array of shape (N, m, m) giving an m x m matrix of repeated Ito
integral values for each of the N time intervals.
"""
N = dW.shape[0]
m = dW.shape[1]
if dW.ndim < 3:
dW = dW.reshape((N, -1, 1)) # change to array of shape (N, m, 1)
if dW.shape[2] != 1 or dW.ndim > 3:
raise(ValueError)
if m == 1:
return (np.zeros((N, 1, 1)), (dW*dW - h)/2.0)
Pm0 = _P(m)
Km0 = _K(m)
M = m*(m-1)//2
Atilde_n = _AtildeTerm(N, h, m, 1, dW, Km0, Pm0)
for k in range(2, n+1):
Atilde_n += _AtildeTerm(N, h, m, k, dW, Km0, Pm0)
Atilde_n = (h/(2.0*np.pi))*Atilde_n # approximation after n terms
S = _sigmainf(N, h, m, dW, Km0, Pm0)
normdW2 = np.sum(np.abs(dW)**2, axis=1)
radical = np.sqrt(1.0 + normdW2/h).reshape((N, 1, 1))
IM = broadcast_to(np.eye(M), (N, M, M))
Im = broadcast_to(np.eye(m), (N, m, m))
Ims0 = np.eye(m**2)
sqrtS = (S + 2.0*radical*IM)/(np.sqrt(2.0)*(1.0 + radical))
G = np.random.normal(0.0, 1.0, (N, M, 1))
tailsum = h/(2.0*np.pi)*_a(n)**0.5*_dot(sqrtS, G)
Atilde = Atilde_n + tailsum # our final approximation of the areas
factor3 = broadcast_to(np.dot(Ims0 - Pm0, Km0.T), (N, m**2, M))
vecI = 0.5*(_kp(dW, dW) - _vec(h*Im)) + _dot(factor3, Atilde)
I = _unvec(vecI)
dW = dW.reshape((N, -1)) # change back to shape (N, m)
return (Atilde, I)
def Jwik(dW, h, n=5):
"""matrix J approximating repeated Stratonovich integrals for each of N
time intervals, using the method of Wiktorsson (2001).
Args:
dW (array of shape (N, m)): giving m independent Weiner increments for
each time step N. (You can make this array using sdeint.deltaW())
h (float): the time step size
n (int, optional): how many terms to take in the series expansion
Returns:
(Atilde, J) where
Atilde: array of shape (N,m(m-1)//2,1) giving the area integrals used.
J: array of shape (N, m, m) giving an m x m matrix of repeated
Stratonovich integral values for each of the N time intervals.
"""
m = dW.shape[1]
Atilde, I = Iwik(dW, h, n)
J = I + 0.5*h*np.eye(m).reshape((1, m, m))
return (Atilde, J)
|
mattja/sdeint | sdeint/wiener.py | Jkpw | python | def Jkpw(dW, h, n=5):
m = dW.shape[1]
A, I = Ikpw(dW, h, n)
J = I + 0.5*h*np.eye(m).reshape((1, m, m))
return (A, J) | matrix J approximating repeated Stratonovich integrals for each of N
time intervals, based on the method of Kloeden, Platen and Wright (1992).
Args:
dW (array of shape (N, m)): giving m independent Weiner increments for
each time step N. (You can make this array using sdeint.deltaW())
h (float): the time step size
n (int, optional): how many terms to take in the series expansion
Returns:
(A, J) where
A: array of shape (N, m, m) giving the Levy areas that were used.
J: array of shape (N, m, m) giving an m x m matrix of repeated
Stratonovich integral values for each of the N time intervals. | train | https://github.com/mattja/sdeint/blob/7cf807cdf97b3bb39d29e1c2dc834b519499b601/sdeint/wiener.py#L106-L125 | [
"def Ikpw(dW, h, n=5):\n \"\"\"matrix I approximating repeated Ito integrals for each of N time\n intervals, based on the method of Kloeden, Platen and Wright (1992).\n\n Args:\n dW (array of shape (N, m)): giving m independent Weiner increments for\n each time step N. (You can make this array ... | # Copyright 2015 Matthew J. Aburn
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version. See <http://www.gnu.org/licenses/>.
"""
Simulation of standard multiple stochastic integrals, both Ito and Stratonovich
I_{ij}(t) = \int_{0}^{t}\int_{0}^{s} dW_i(u) dW_j(s) (Ito)
J_{ij}(t) = \int_{0}^{t}\int_{0}^{s} \circ dW_i(u) \circ dW_j(s) (Stratonovich)
These multiple integrals I and J are important building blocks that will be
used by most of the higher-order algorithms that integrate multi-dimensional
SODEs.
We first implement the method of Kloeden, Platen and Wright (1992) to
approximate the integrals by the first n terms from the series expansion of a
Brownian bridge process. By default using n=5.
Finally we implement the method of Wiktorsson (2001) which improves on the
previous method by also approximating the tail-sum distribution by a
multivariate normal distribution.
References:
P. Kloeden, E. Platen and I. Wright (1992) The approximation of multiple
stochastic integrals
M. Wiktorsson (2001) Joint Characteristic Function and Simultaneous
Simulation of Iterated Ito Integrals for Multiple Independent Brownian
Motions
"""
import numpy as np
numpy_version = list(map(int, np.version.short_version.split('.')))
if numpy_version >= [1,10,0]:
broadcast_to = np.broadcast_to
else:
from ._broadcast import broadcast_to
def deltaW(N, m, h):
"""Generate sequence of Wiener increments for m independent Wiener
processes W_j(t) j=0..m-1 for each of N time intervals of length h.
Returns:
dW (array of shape (N, m)): The [n, j] element has the value
W_j((n+1)*h) - W_j(n*h)
"""
return np.random.normal(0.0, np.sqrt(h), (N, m))
def _t(a):
"""transpose the last two axes of a three axis array"""
return a.transpose((0, 2, 1))
def _dot(a, b):
""" for rank 3 arrays a and b, return \sum_k a_ij^k . b_ik^l (no sum on i)
i.e. This is just normal matrix multiplication at each point on first axis
"""
return np.einsum('ijk,ikl->ijl', a, b)
def _Aterm(N, h, m, k, dW):
"""kth term in the sum of Wiktorsson2001 equation (2.2)"""
sqrt2h = np.sqrt(2.0/h)
Xk = np.random.normal(0.0, 1.0, (N, m, 1))
Yk = np.random.normal(0.0, 1.0, (N, m, 1))
term1 = _dot(Xk, _t(Yk + sqrt2h*dW))
term2 = _dot(Yk + sqrt2h*dW, _t(Xk))
return (term1 - term2)/k
def Ikpw(dW, h, n=5):
"""matrix I approximating repeated Ito integrals for each of N time
intervals, based on the method of Kloeden, Platen and Wright (1992).
Args:
dW (array of shape (N, m)): giving m independent Weiner increments for
each time step N. (You can make this array using sdeint.deltaW())
h (float): the time step size
n (int, optional): how many terms to take in the series expansion
Returns:
(A, I) where
A: array of shape (N, m, m) giving the Levy areas that were used.
I: array of shape (N, m, m) giving an m x m matrix of repeated Ito
integral values for each of the N time intervals.
"""
N = dW.shape[0]
m = dW.shape[1]
if dW.ndim < 3:
dW = dW.reshape((N, -1, 1)) # change to array of shape (N, m, 1)
if dW.shape[2] != 1 or dW.ndim > 3:
raise(ValueError)
A = _Aterm(N, h, m, 1, dW)
for k in range(2, n+1):
A += _Aterm(N, h, m, k, dW)
A = (h/(2.0*np.pi))*A
I = 0.5*(_dot(dW, _t(dW)) - np.diag(h*np.ones(m))) + A
dW = dW.reshape((N, -1)) # change back to shape (N, m)
return (A, I)
# The code below this point implements the method of Wiktorsson2001.
def _vec(A):
"""
Linear operator _vec() from Wiktorsson2001 p478
Args:
A: a rank 3 array of shape N x m x n, giving a matrix A[j] for each
interval of time j in 0..N-1
Returns:
array of shape N x mn x 1, made by stacking the columns of matrix A[j] on
top of each other, for each j in 0..N-1
"""
N, m, n = A.shape
return A.reshape((N, m*n, 1), order='F')
def _unvec(vecA, m=None):
"""inverse of _vec() operator"""
N = vecA.shape[0]
if m is None:
m = np.sqrt(vecA.shape[1] + 0.25).astype(np.int64)
return vecA.reshape((N, m, -1), order='F')
def _kp(a, b):
"""Special case Kronecker tensor product of a[i] and b[i] at each
time interval i for i = 0 .. N-1
It is specialized for the case where both a and b are shape N x m x 1
"""
if a.shape != b.shape or a.shape[-1] != 1:
raise(ValueError)
N = a.shape[0]
# take the outer product over the last two axes, then reshape:
return np.einsum('ijk,ilk->ijkl', a, b).reshape(N, -1, 1)
def _kp2(A, B):
"""Special case Kronecker tensor product of A[i] and B[i] at each
time interval i for i = 0 .. N-1
Specialized for the case A and B rank 3 with A.shape[0]==B.shape[0]
"""
N = A.shape[0]
if B.shape[0] != N:
raise(ValueError)
newshape1 = A.shape[1]*B.shape[1]
return np.einsum('ijk,ilm->ijlkm', A, B).reshape(N, newshape1, -1)
def _P(m):
"""Returns m^2 x m^2 permutation matrix that swaps rows i and j where
j = 1 + m((i - 1) mod m) + (i - 1) div m, for i = 1 .. m^2
"""
P = np.zeros((m**2,m**2), dtype=np.int64)
for i in range(1, m**2 + 1):
j = 1 + m*((i - 1) % m) + (i - 1)//m
P[i-1, j-1] = 1
return P
def _K(m):
""" matrix K_m from Wiktorsson2001 """
M = m*(m - 1)//2
K = np.zeros((M, m**2), dtype=np.int64)
row = 0
for j in range(1, m):
col = (j - 1)*m + j
s = m - j
K[row:(row+s), col:(col+s)] = np.eye(s)
row += s
return K
def _AtildeTerm(N, h, m, k, dW, Km0, Pm0):
"""kth term in the sum for Atilde (Wiktorsson2001 p481, 1st eqn)"""
M = m*(m-1)//2
Xk = np.random.normal(0.0, 1.0, (N, m, 1))
Yk = np.random.normal(0.0, 1.0, (N, m, 1))
factor1 = np.dot(Km0, Pm0 - np.eye(m**2))
factor1 = broadcast_to(factor1, (N, M, m**2))
factor2 = _kp(Yk + np.sqrt(2.0/h)*dW, Xk)
return _dot(factor1, factor2)/k
def _sigmainf(N, h, m, dW, Km0, Pm0):
"""Asymptotic covariance matrix \Sigma_\infty Wiktorsson2001 eqn (4.5)"""
M = m*(m-1)//2
Im = broadcast_to(np.eye(m), (N, m, m))
IM = broadcast_to(np.eye(M), (N, M, M))
Ims0 = np.eye(m**2)
factor1 = broadcast_to((2.0/h)*np.dot(Km0, Ims0 - Pm0), (N, M, m**2))
factor2 = _kp2(Im, _dot(dW, _t(dW)))
factor3 = broadcast_to(np.dot(Ims0 - Pm0, Km0.T), (N, m**2, M))
return 2*IM + _dot(_dot(factor1, factor2), factor3)
def _a(n):
""" \sum_{n+1}^\infty 1/k^2 """
return np.pi**2/6.0 - sum(1.0/k**2 for k in range(1, n+1))
def Iwik(dW, h, n=5):
"""matrix I approximating repeated Ito integrals for each of N time
intervals, using the method of Wiktorsson (2001).
Args:
dW (array of shape (N, m)): giving m independent Weiner increments for
each time step N. (You can make this array using sdeint.deltaW())
h (float): the time step size
n (int, optional): how many terms to take in the series expansion
Returns:
(Atilde, I) where
Atilde: array of shape (N,m(m-1)//2,1) giving the area integrals used.
I: array of shape (N, m, m) giving an m x m matrix of repeated Ito
integral values for each of the N time intervals.
"""
N = dW.shape[0]
m = dW.shape[1]
if dW.ndim < 3:
dW = dW.reshape((N, -1, 1)) # change to array of shape (N, m, 1)
if dW.shape[2] != 1 or dW.ndim > 3:
raise(ValueError)
if m == 1:
return (np.zeros((N, 1, 1)), (dW*dW - h)/2.0)
Pm0 = _P(m)
Km0 = _K(m)
M = m*(m-1)//2
Atilde_n = _AtildeTerm(N, h, m, 1, dW, Km0, Pm0)
for k in range(2, n+1):
Atilde_n += _AtildeTerm(N, h, m, k, dW, Km0, Pm0)
Atilde_n = (h/(2.0*np.pi))*Atilde_n # approximation after n terms
S = _sigmainf(N, h, m, dW, Km0, Pm0)
normdW2 = np.sum(np.abs(dW)**2, axis=1)
radical = np.sqrt(1.0 + normdW2/h).reshape((N, 1, 1))
IM = broadcast_to(np.eye(M), (N, M, M))
Im = broadcast_to(np.eye(m), (N, m, m))
Ims0 = np.eye(m**2)
sqrtS = (S + 2.0*radical*IM)/(np.sqrt(2.0)*(1.0 + radical))
G = np.random.normal(0.0, 1.0, (N, M, 1))
tailsum = h/(2.0*np.pi)*_a(n)**0.5*_dot(sqrtS, G)
Atilde = Atilde_n + tailsum # our final approximation of the areas
factor3 = broadcast_to(np.dot(Ims0 - Pm0, Km0.T), (N, m**2, M))
vecI = 0.5*(_kp(dW, dW) - _vec(h*Im)) + _dot(factor3, Atilde)
I = _unvec(vecI)
dW = dW.reshape((N, -1)) # change back to shape (N, m)
return (Atilde, I)
def Jwik(dW, h, n=5):
"""matrix J approximating repeated Stratonovich integrals for each of N
time intervals, using the method of Wiktorsson (2001).
Args:
dW (array of shape (N, m)): giving m independent Weiner increments for
each time step N. (You can make this array using sdeint.deltaW())
h (float): the time step size
n (int, optional): how many terms to take in the series expansion
Returns:
(Atilde, J) where
Atilde: array of shape (N,m(m-1)//2,1) giving the area integrals used.
J: array of shape (N, m, m) giving an m x m matrix of repeated
Stratonovich integral values for each of the N time intervals.
"""
m = dW.shape[1]
Atilde, I = Iwik(dW, h, n)
J = I + 0.5*h*np.eye(m).reshape((1, m, m))
return (Atilde, J)
|
mattja/sdeint | sdeint/wiener.py | _vec | python | def _vec(A):
N, m, n = A.shape
return A.reshape((N, m*n, 1), order='F') | Linear operator _vec() from Wiktorsson2001 p478
Args:
A: a rank 3 array of shape N x m x n, giving a matrix A[j] for each
interval of time j in 0..N-1
Returns:
array of shape N x mn x 1, made by stacking the columns of matrix A[j] on
top of each other, for each j in 0..N-1 | train | https://github.com/mattja/sdeint/blob/7cf807cdf97b3bb39d29e1c2dc834b519499b601/sdeint/wiener.py#L130-L141 | null | # Copyright 2015 Matthew J. Aburn
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version. See <http://www.gnu.org/licenses/>.
"""
Simulation of standard multiple stochastic integrals, both Ito and Stratonovich
I_{ij}(t) = \int_{0}^{t}\int_{0}^{s} dW_i(u) dW_j(s) (Ito)
J_{ij}(t) = \int_{0}^{t}\int_{0}^{s} \circ dW_i(u) \circ dW_j(s) (Stratonovich)
These multiple integrals I and J are important building blocks that will be
used by most of the higher-order algorithms that integrate multi-dimensional
SODEs.
We first implement the method of Kloeden, Platen and Wright (1992) to
approximate the integrals by the first n terms from the series expansion of a
Brownian bridge process. By default using n=5.
Finally we implement the method of Wiktorsson (2001) which improves on the
previous method by also approximating the tail-sum distribution by a
multivariate normal distribution.
References:
P. Kloeden, E. Platen and I. Wright (1992) The approximation of multiple
stochastic integrals
M. Wiktorsson (2001) Joint Characteristic Function and Simultaneous
Simulation of Iterated Ito Integrals for Multiple Independent Brownian
Motions
"""
import numpy as np
numpy_version = list(map(int, np.version.short_version.split('.')))
if numpy_version >= [1,10,0]:
broadcast_to = np.broadcast_to
else:
from ._broadcast import broadcast_to
def deltaW(N, m, h):
"""Generate sequence of Wiener increments for m independent Wiener
processes W_j(t) j=0..m-1 for each of N time intervals of length h.
Returns:
dW (array of shape (N, m)): The [n, j] element has the value
W_j((n+1)*h) - W_j(n*h)
"""
return np.random.normal(0.0, np.sqrt(h), (N, m))
def _t(a):
"""transpose the last two axes of a three axis array"""
return a.transpose((0, 2, 1))
def _dot(a, b):
""" for rank 3 arrays a and b, return \sum_k a_ij^k . b_ik^l (no sum on i)
i.e. This is just normal matrix multiplication at each point on first axis
"""
return np.einsum('ijk,ikl->ijl', a, b)
def _Aterm(N, h, m, k, dW):
"""kth term in the sum of Wiktorsson2001 equation (2.2)"""
sqrt2h = np.sqrt(2.0/h)
Xk = np.random.normal(0.0, 1.0, (N, m, 1))
Yk = np.random.normal(0.0, 1.0, (N, m, 1))
term1 = _dot(Xk, _t(Yk + sqrt2h*dW))
term2 = _dot(Yk + sqrt2h*dW, _t(Xk))
return (term1 - term2)/k
def Ikpw(dW, h, n=5):
"""matrix I approximating repeated Ito integrals for each of N time
intervals, based on the method of Kloeden, Platen and Wright (1992).
Args:
dW (array of shape (N, m)): giving m independent Weiner increments for
each time step N. (You can make this array using sdeint.deltaW())
h (float): the time step size
n (int, optional): how many terms to take in the series expansion
Returns:
(A, I) where
A: array of shape (N, m, m) giving the Levy areas that were used.
I: array of shape (N, m, m) giving an m x m matrix of repeated Ito
integral values for each of the N time intervals.
"""
N = dW.shape[0]
m = dW.shape[1]
if dW.ndim < 3:
dW = dW.reshape((N, -1, 1)) # change to array of shape (N, m, 1)
if dW.shape[2] != 1 or dW.ndim > 3:
raise(ValueError)
A = _Aterm(N, h, m, 1, dW)
for k in range(2, n+1):
A += _Aterm(N, h, m, k, dW)
A = (h/(2.0*np.pi))*A
I = 0.5*(_dot(dW, _t(dW)) - np.diag(h*np.ones(m))) + A
dW = dW.reshape((N, -1)) # change back to shape (N, m)
return (A, I)
def Jkpw(dW, h, n=5):
"""matrix J approximating repeated Stratonovich integrals for each of N
time intervals, based on the method of Kloeden, Platen and Wright (1992).
Args:
dW (array of shape (N, m)): giving m independent Weiner increments for
each time step N. (You can make this array using sdeint.deltaW())
h (float): the time step size
n (int, optional): how many terms to take in the series expansion
Returns:
(A, J) where
A: array of shape (N, m, m) giving the Levy areas that were used.
J: array of shape (N, m, m) giving an m x m matrix of repeated
Stratonovich integral values for each of the N time intervals.
"""
m = dW.shape[1]
A, I = Ikpw(dW, h, n)
J = I + 0.5*h*np.eye(m).reshape((1, m, m))
return (A, J)
# The code below this point implements the method of Wiktorsson2001.
def _unvec(vecA, m=None):
"""inverse of _vec() operator"""
N = vecA.shape[0]
if m is None:
m = np.sqrt(vecA.shape[1] + 0.25).astype(np.int64)
return vecA.reshape((N, m, -1), order='F')
def _kp(a, b):
"""Special case Kronecker tensor product of a[i] and b[i] at each
time interval i for i = 0 .. N-1
It is specialized for the case where both a and b are shape N x m x 1
"""
if a.shape != b.shape or a.shape[-1] != 1:
raise(ValueError)
N = a.shape[0]
# take the outer product over the last two axes, then reshape:
return np.einsum('ijk,ilk->ijkl', a, b).reshape(N, -1, 1)
def _kp2(A, B):
"""Special case Kronecker tensor product of A[i] and B[i] at each
time interval i for i = 0 .. N-1
Specialized for the case A and B rank 3 with A.shape[0]==B.shape[0]
"""
N = A.shape[0]
if B.shape[0] != N:
raise(ValueError)
newshape1 = A.shape[1]*B.shape[1]
return np.einsum('ijk,ilm->ijlkm', A, B).reshape(N, newshape1, -1)
def _P(m):
"""Returns m^2 x m^2 permutation matrix that swaps rows i and j where
j = 1 + m((i - 1) mod m) + (i - 1) div m, for i = 1 .. m^2
"""
P = np.zeros((m**2,m**2), dtype=np.int64)
for i in range(1, m**2 + 1):
j = 1 + m*((i - 1) % m) + (i - 1)//m
P[i-1, j-1] = 1
return P
def _K(m):
""" matrix K_m from Wiktorsson2001 """
M = m*(m - 1)//2
K = np.zeros((M, m**2), dtype=np.int64)
row = 0
for j in range(1, m):
col = (j - 1)*m + j
s = m - j
K[row:(row+s), col:(col+s)] = np.eye(s)
row += s
return K
def _AtildeTerm(N, h, m, k, dW, Km0, Pm0):
"""kth term in the sum for Atilde (Wiktorsson2001 p481, 1st eqn)"""
M = m*(m-1)//2
Xk = np.random.normal(0.0, 1.0, (N, m, 1))
Yk = np.random.normal(0.0, 1.0, (N, m, 1))
factor1 = np.dot(Km0, Pm0 - np.eye(m**2))
factor1 = broadcast_to(factor1, (N, M, m**2))
factor2 = _kp(Yk + np.sqrt(2.0/h)*dW, Xk)
return _dot(factor1, factor2)/k
def _sigmainf(N, h, m, dW, Km0, Pm0):
"""Asymptotic covariance matrix \Sigma_\infty Wiktorsson2001 eqn (4.5)"""
M = m*(m-1)//2
Im = broadcast_to(np.eye(m), (N, m, m))
IM = broadcast_to(np.eye(M), (N, M, M))
Ims0 = np.eye(m**2)
factor1 = broadcast_to((2.0/h)*np.dot(Km0, Ims0 - Pm0), (N, M, m**2))
factor2 = _kp2(Im, _dot(dW, _t(dW)))
factor3 = broadcast_to(np.dot(Ims0 - Pm0, Km0.T), (N, m**2, M))
return 2*IM + _dot(_dot(factor1, factor2), factor3)
def _a(n):
""" \sum_{n+1}^\infty 1/k^2 """
return np.pi**2/6.0 - sum(1.0/k**2 for k in range(1, n+1))
def Iwik(dW, h, n=5):
"""matrix I approximating repeated Ito integrals for each of N time
intervals, using the method of Wiktorsson (2001).
Args:
dW (array of shape (N, m)): giving m independent Weiner increments for
each time step N. (You can make this array using sdeint.deltaW())
h (float): the time step size
n (int, optional): how many terms to take in the series expansion
Returns:
(Atilde, I) where
Atilde: array of shape (N,m(m-1)//2,1) giving the area integrals used.
I: array of shape (N, m, m) giving an m x m matrix of repeated Ito
integral values for each of the N time intervals.
"""
N = dW.shape[0]
m = dW.shape[1]
if dW.ndim < 3:
dW = dW.reshape((N, -1, 1)) # change to array of shape (N, m, 1)
if dW.shape[2] != 1 or dW.ndim > 3:
raise(ValueError)
if m == 1:
return (np.zeros((N, 1, 1)), (dW*dW - h)/2.0)
Pm0 = _P(m)
Km0 = _K(m)
M = m*(m-1)//2
Atilde_n = _AtildeTerm(N, h, m, 1, dW, Km0, Pm0)
for k in range(2, n+1):
Atilde_n += _AtildeTerm(N, h, m, k, dW, Km0, Pm0)
Atilde_n = (h/(2.0*np.pi))*Atilde_n # approximation after n terms
S = _sigmainf(N, h, m, dW, Km0, Pm0)
normdW2 = np.sum(np.abs(dW)**2, axis=1)
radical = np.sqrt(1.0 + normdW2/h).reshape((N, 1, 1))
IM = broadcast_to(np.eye(M), (N, M, M))
Im = broadcast_to(np.eye(m), (N, m, m))
Ims0 = np.eye(m**2)
sqrtS = (S + 2.0*radical*IM)/(np.sqrt(2.0)*(1.0 + radical))
G = np.random.normal(0.0, 1.0, (N, M, 1))
tailsum = h/(2.0*np.pi)*_a(n)**0.5*_dot(sqrtS, G)
Atilde = Atilde_n + tailsum # our final approximation of the areas
factor3 = broadcast_to(np.dot(Ims0 - Pm0, Km0.T), (N, m**2, M))
vecI = 0.5*(_kp(dW, dW) - _vec(h*Im)) + _dot(factor3, Atilde)
I = _unvec(vecI)
dW = dW.reshape((N, -1)) # change back to shape (N, m)
return (Atilde, I)
def Jwik(dW, h, n=5):
"""matrix J approximating repeated Stratonovich integrals for each of N
time intervals, using the method of Wiktorsson (2001).
Args:
dW (array of shape (N, m)): giving m independent Weiner increments for
each time step N. (You can make this array using sdeint.deltaW())
h (float): the time step size
n (int, optional): how many terms to take in the series expansion
Returns:
(Atilde, J) where
Atilde: array of shape (N,m(m-1)//2,1) giving the area integrals used.
J: array of shape (N, m, m) giving an m x m matrix of repeated
Stratonovich integral values for each of the N time intervals.
"""
m = dW.shape[1]
Atilde, I = Iwik(dW, h, n)
J = I + 0.5*h*np.eye(m).reshape((1, m, m))
return (Atilde, J)
|
mattja/sdeint | sdeint/wiener.py | _unvec | python | def _unvec(vecA, m=None):
N = vecA.shape[0]
if m is None:
m = np.sqrt(vecA.shape[1] + 0.25).astype(np.int64)
return vecA.reshape((N, m, -1), order='F') | inverse of _vec() operator | train | https://github.com/mattja/sdeint/blob/7cf807cdf97b3bb39d29e1c2dc834b519499b601/sdeint/wiener.py#L144-L149 | null | # Copyright 2015 Matthew J. Aburn
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version. See <http://www.gnu.org/licenses/>.
"""
Simulation of standard multiple stochastic integrals, both Ito and Stratonovich
I_{ij}(t) = \int_{0}^{t}\int_{0}^{s} dW_i(u) dW_j(s) (Ito)
J_{ij}(t) = \int_{0}^{t}\int_{0}^{s} \circ dW_i(u) \circ dW_j(s) (Stratonovich)
These multiple integrals I and J are important building blocks that will be
used by most of the higher-order algorithms that integrate multi-dimensional
SODEs.
We first implement the method of Kloeden, Platen and Wright (1992) to
approximate the integrals by the first n terms from the series expansion of a
Brownian bridge process. By default using n=5.
Finally we implement the method of Wiktorsson (2001) which improves on the
previous method by also approximating the tail-sum distribution by a
multivariate normal distribution.
References:
P. Kloeden, E. Platen and I. Wright (1992) The approximation of multiple
stochastic integrals
M. Wiktorsson (2001) Joint Characteristic Function and Simultaneous
Simulation of Iterated Ito Integrals for Multiple Independent Brownian
Motions
"""
import numpy as np
numpy_version = list(map(int, np.version.short_version.split('.')))
if numpy_version >= [1,10,0]:
broadcast_to = np.broadcast_to
else:
from ._broadcast import broadcast_to
def deltaW(N, m, h):
"""Generate sequence of Wiener increments for m independent Wiener
processes W_j(t) j=0..m-1 for each of N time intervals of length h.
Returns:
dW (array of shape (N, m)): The [n, j] element has the value
W_j((n+1)*h) - W_j(n*h)
"""
return np.random.normal(0.0, np.sqrt(h), (N, m))
def _t(a):
"""transpose the last two axes of a three axis array"""
return a.transpose((0, 2, 1))
def _dot(a, b):
""" for rank 3 arrays a and b, return \sum_k a_ij^k . b_ik^l (no sum on i)
i.e. This is just normal matrix multiplication at each point on first axis
"""
return np.einsum('ijk,ikl->ijl', a, b)
def _Aterm(N, h, m, k, dW):
"""kth term in the sum of Wiktorsson2001 equation (2.2)"""
sqrt2h = np.sqrt(2.0/h)
Xk = np.random.normal(0.0, 1.0, (N, m, 1))
Yk = np.random.normal(0.0, 1.0, (N, m, 1))
term1 = _dot(Xk, _t(Yk + sqrt2h*dW))
term2 = _dot(Yk + sqrt2h*dW, _t(Xk))
return (term1 - term2)/k
def Ikpw(dW, h, n=5):
"""matrix I approximating repeated Ito integrals for each of N time
intervals, based on the method of Kloeden, Platen and Wright (1992).
Args:
dW (array of shape (N, m)): giving m independent Weiner increments for
each time step N. (You can make this array using sdeint.deltaW())
h (float): the time step size
n (int, optional): how many terms to take in the series expansion
Returns:
(A, I) where
A: array of shape (N, m, m) giving the Levy areas that were used.
I: array of shape (N, m, m) giving an m x m matrix of repeated Ito
integral values for each of the N time intervals.
"""
N = dW.shape[0]
m = dW.shape[1]
if dW.ndim < 3:
dW = dW.reshape((N, -1, 1)) # change to array of shape (N, m, 1)
if dW.shape[2] != 1 or dW.ndim > 3:
raise(ValueError)
A = _Aterm(N, h, m, 1, dW)
for k in range(2, n+1):
A += _Aterm(N, h, m, k, dW)
A = (h/(2.0*np.pi))*A
I = 0.5*(_dot(dW, _t(dW)) - np.diag(h*np.ones(m))) + A
dW = dW.reshape((N, -1)) # change back to shape (N, m)
return (A, I)
def Jkpw(dW, h, n=5):
"""matrix J approximating repeated Stratonovich integrals for each of N
time intervals, based on the method of Kloeden, Platen and Wright (1992).
Args:
dW (array of shape (N, m)): giving m independent Weiner increments for
each time step N. (You can make this array using sdeint.deltaW())
h (float): the time step size
n (int, optional): how many terms to take in the series expansion
Returns:
(A, J) where
A: array of shape (N, m, m) giving the Levy areas that were used.
J: array of shape (N, m, m) giving an m x m matrix of repeated
Stratonovich integral values for each of the N time intervals.
"""
m = dW.shape[1]
A, I = Ikpw(dW, h, n)
J = I + 0.5*h*np.eye(m).reshape((1, m, m))
return (A, J)
# The code below this point implements the method of Wiktorsson2001.
def _vec(A):
"""
Linear operator _vec() from Wiktorsson2001 p478
Args:
A: a rank 3 array of shape N x m x n, giving a matrix A[j] for each
interval of time j in 0..N-1
Returns:
array of shape N x mn x 1, made by stacking the columns of matrix A[j] on
top of each other, for each j in 0..N-1
"""
N, m, n = A.shape
return A.reshape((N, m*n, 1), order='F')
def _kp(a, b):
"""Special case Kronecker tensor product of a[i] and b[i] at each
time interval i for i = 0 .. N-1
It is specialized for the case where both a and b are shape N x m x 1
"""
if a.shape != b.shape or a.shape[-1] != 1:
raise(ValueError)
N = a.shape[0]
# take the outer product over the last two axes, then reshape:
return np.einsum('ijk,ilk->ijkl', a, b).reshape(N, -1, 1)
def _kp2(A, B):
"""Special case Kronecker tensor product of A[i] and B[i] at each
time interval i for i = 0 .. N-1
Specialized for the case A and B rank 3 with A.shape[0]==B.shape[0]
"""
N = A.shape[0]
if B.shape[0] != N:
raise(ValueError)
newshape1 = A.shape[1]*B.shape[1]
return np.einsum('ijk,ilm->ijlkm', A, B).reshape(N, newshape1, -1)
def _P(m):
"""Returns m^2 x m^2 permutation matrix that swaps rows i and j where
j = 1 + m((i - 1) mod m) + (i - 1) div m, for i = 1 .. m^2
"""
P = np.zeros((m**2,m**2), dtype=np.int64)
for i in range(1, m**2 + 1):
j = 1 + m*((i - 1) % m) + (i - 1)//m
P[i-1, j-1] = 1
return P
def _K(m):
""" matrix K_m from Wiktorsson2001 """
M = m*(m - 1)//2
K = np.zeros((M, m**2), dtype=np.int64)
row = 0
for j in range(1, m):
col = (j - 1)*m + j
s = m - j
K[row:(row+s), col:(col+s)] = np.eye(s)
row += s
return K
def _AtildeTerm(N, h, m, k, dW, Km0, Pm0):
"""kth term in the sum for Atilde (Wiktorsson2001 p481, 1st eqn)"""
M = m*(m-1)//2
Xk = np.random.normal(0.0, 1.0, (N, m, 1))
Yk = np.random.normal(0.0, 1.0, (N, m, 1))
factor1 = np.dot(Km0, Pm0 - np.eye(m**2))
factor1 = broadcast_to(factor1, (N, M, m**2))
factor2 = _kp(Yk + np.sqrt(2.0/h)*dW, Xk)
return _dot(factor1, factor2)/k
def _sigmainf(N, h, m, dW, Km0, Pm0):
"""Asymptotic covariance matrix \Sigma_\infty Wiktorsson2001 eqn (4.5)"""
M = m*(m-1)//2
Im = broadcast_to(np.eye(m), (N, m, m))
IM = broadcast_to(np.eye(M), (N, M, M))
Ims0 = np.eye(m**2)
factor1 = broadcast_to((2.0/h)*np.dot(Km0, Ims0 - Pm0), (N, M, m**2))
factor2 = _kp2(Im, _dot(dW, _t(dW)))
factor3 = broadcast_to(np.dot(Ims0 - Pm0, Km0.T), (N, m**2, M))
return 2*IM + _dot(_dot(factor1, factor2), factor3)
def _a(n):
""" \sum_{n+1}^\infty 1/k^2 """
return np.pi**2/6.0 - sum(1.0/k**2 for k in range(1, n+1))
def Iwik(dW, h, n=5):
"""matrix I approximating repeated Ito integrals for each of N time
intervals, using the method of Wiktorsson (2001).
Args:
dW (array of shape (N, m)): giving m independent Weiner increments for
each time step N. (You can make this array using sdeint.deltaW())
h (float): the time step size
n (int, optional): how many terms to take in the series expansion
Returns:
(Atilde, I) where
Atilde: array of shape (N,m(m-1)//2,1) giving the area integrals used.
I: array of shape (N, m, m) giving an m x m matrix of repeated Ito
integral values for each of the N time intervals.
"""
N = dW.shape[0]
m = dW.shape[1]
if dW.ndim < 3:
dW = dW.reshape((N, -1, 1)) # change to array of shape (N, m, 1)
if dW.shape[2] != 1 or dW.ndim > 3:
raise(ValueError)
if m == 1:
return (np.zeros((N, 1, 1)), (dW*dW - h)/2.0)
Pm0 = _P(m)
Km0 = _K(m)
M = m*(m-1)//2
Atilde_n = _AtildeTerm(N, h, m, 1, dW, Km0, Pm0)
for k in range(2, n+1):
Atilde_n += _AtildeTerm(N, h, m, k, dW, Km0, Pm0)
Atilde_n = (h/(2.0*np.pi))*Atilde_n # approximation after n terms
S = _sigmainf(N, h, m, dW, Km0, Pm0)
normdW2 = np.sum(np.abs(dW)**2, axis=1)
radical = np.sqrt(1.0 + normdW2/h).reshape((N, 1, 1))
IM = broadcast_to(np.eye(M), (N, M, M))
Im = broadcast_to(np.eye(m), (N, m, m))
Ims0 = np.eye(m**2)
sqrtS = (S + 2.0*radical*IM)/(np.sqrt(2.0)*(1.0 + radical))
G = np.random.normal(0.0, 1.0, (N, M, 1))
tailsum = h/(2.0*np.pi)*_a(n)**0.5*_dot(sqrtS, G)
Atilde = Atilde_n + tailsum # our final approximation of the areas
factor3 = broadcast_to(np.dot(Ims0 - Pm0, Km0.T), (N, m**2, M))
vecI = 0.5*(_kp(dW, dW) - _vec(h*Im)) + _dot(factor3, Atilde)
I = _unvec(vecI)
dW = dW.reshape((N, -1)) # change back to shape (N, m)
return (Atilde, I)
def Jwik(dW, h, n=5):
"""matrix J approximating repeated Stratonovich integrals for each of N
time intervals, using the method of Wiktorsson (2001).
Args:
dW (array of shape (N, m)): giving m independent Weiner increments for
each time step N. (You can make this array using sdeint.deltaW())
h (float): the time step size
n (int, optional): how many terms to take in the series expansion
Returns:
(Atilde, J) where
Atilde: array of shape (N,m(m-1)//2,1) giving the area integrals used.
J: array of shape (N, m, m) giving an m x m matrix of repeated
Stratonovich integral values for each of the N time intervals.
"""
m = dW.shape[1]
Atilde, I = Iwik(dW, h, n)
J = I + 0.5*h*np.eye(m).reshape((1, m, m))
return (Atilde, J)
|
mattja/sdeint | sdeint/wiener.py | _kp | python | def _kp(a, b):
if a.shape != b.shape or a.shape[-1] != 1:
raise(ValueError)
N = a.shape[0]
# take the outer product over the last two axes, then reshape:
return np.einsum('ijk,ilk->ijkl', a, b).reshape(N, -1, 1) | Special case Kronecker tensor product of a[i] and b[i] at each
time interval i for i = 0 .. N-1
It is specialized for the case where both a and b are shape N x m x 1 | train | https://github.com/mattja/sdeint/blob/7cf807cdf97b3bb39d29e1c2dc834b519499b601/sdeint/wiener.py#L152-L161 | null | # Copyright 2015 Matthew J. Aburn
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version. See <http://www.gnu.org/licenses/>.
"""
Simulation of standard multiple stochastic integrals, both Ito and Stratonovich
I_{ij}(t) = \int_{0}^{t}\int_{0}^{s} dW_i(u) dW_j(s) (Ito)
J_{ij}(t) = \int_{0}^{t}\int_{0}^{s} \circ dW_i(u) \circ dW_j(s) (Stratonovich)
These multiple integrals I and J are important building blocks that will be
used by most of the higher-order algorithms that integrate multi-dimensional
SODEs.
We first implement the method of Kloeden, Platen and Wright (1992) to
approximate the integrals by the first n terms from the series expansion of a
Brownian bridge process. By default using n=5.
Finally we implement the method of Wiktorsson (2001) which improves on the
previous method by also approximating the tail-sum distribution by a
multivariate normal distribution.
References:
P. Kloeden, E. Platen and I. Wright (1992) The approximation of multiple
stochastic integrals
M. Wiktorsson (2001) Joint Characteristic Function and Simultaneous
Simulation of Iterated Ito Integrals for Multiple Independent Brownian
Motions
"""
import numpy as np
numpy_version = list(map(int, np.version.short_version.split('.')))
if numpy_version >= [1,10,0]:
broadcast_to = np.broadcast_to
else:
from ._broadcast import broadcast_to
def deltaW(N, m, h):
"""Generate sequence of Wiener increments for m independent Wiener
processes W_j(t) j=0..m-1 for each of N time intervals of length h.
Returns:
dW (array of shape (N, m)): The [n, j] element has the value
W_j((n+1)*h) - W_j(n*h)
"""
return np.random.normal(0.0, np.sqrt(h), (N, m))
def _t(a):
"""transpose the last two axes of a three axis array"""
return a.transpose((0, 2, 1))
def _dot(a, b):
""" for rank 3 arrays a and b, return \sum_k a_ij^k . b_ik^l (no sum on i)
i.e. This is just normal matrix multiplication at each point on first axis
"""
return np.einsum('ijk,ikl->ijl', a, b)
def _Aterm(N, h, m, k, dW):
"""kth term in the sum of Wiktorsson2001 equation (2.2)"""
sqrt2h = np.sqrt(2.0/h)
Xk = np.random.normal(0.0, 1.0, (N, m, 1))
Yk = np.random.normal(0.0, 1.0, (N, m, 1))
term1 = _dot(Xk, _t(Yk + sqrt2h*dW))
term2 = _dot(Yk + sqrt2h*dW, _t(Xk))
return (term1 - term2)/k
def Ikpw(dW, h, n=5):
"""matrix I approximating repeated Ito integrals for each of N time
intervals, based on the method of Kloeden, Platen and Wright (1992).
Args:
dW (array of shape (N, m)): giving m independent Weiner increments for
each time step N. (You can make this array using sdeint.deltaW())
h (float): the time step size
n (int, optional): how many terms to take in the series expansion
Returns:
(A, I) where
A: array of shape (N, m, m) giving the Levy areas that were used.
I: array of shape (N, m, m) giving an m x m matrix of repeated Ito
integral values for each of the N time intervals.
"""
N = dW.shape[0]
m = dW.shape[1]
if dW.ndim < 3:
dW = dW.reshape((N, -1, 1)) # change to array of shape (N, m, 1)
if dW.shape[2] != 1 or dW.ndim > 3:
raise(ValueError)
A = _Aterm(N, h, m, 1, dW)
for k in range(2, n+1):
A += _Aterm(N, h, m, k, dW)
A = (h/(2.0*np.pi))*A
I = 0.5*(_dot(dW, _t(dW)) - np.diag(h*np.ones(m))) + A
dW = dW.reshape((N, -1)) # change back to shape (N, m)
return (A, I)
def Jkpw(dW, h, n=5):
"""matrix J approximating repeated Stratonovich integrals for each of N
time intervals, based on the method of Kloeden, Platen and Wright (1992).
Args:
dW (array of shape (N, m)): giving m independent Weiner increments for
each time step N. (You can make this array using sdeint.deltaW())
h (float): the time step size
n (int, optional): how many terms to take in the series expansion
Returns:
(A, J) where
A: array of shape (N, m, m) giving the Levy areas that were used.
J: array of shape (N, m, m) giving an m x m matrix of repeated
Stratonovich integral values for each of the N time intervals.
"""
m = dW.shape[1]
A, I = Ikpw(dW, h, n)
J = I + 0.5*h*np.eye(m).reshape((1, m, m))
return (A, J)
# The code below this point implements the method of Wiktorsson2001.
def _vec(A):
"""
Linear operator _vec() from Wiktorsson2001 p478
Args:
A: a rank 3 array of shape N x m x n, giving a matrix A[j] for each
interval of time j in 0..N-1
Returns:
array of shape N x mn x 1, made by stacking the columns of matrix A[j] on
top of each other, for each j in 0..N-1
"""
N, m, n = A.shape
return A.reshape((N, m*n, 1), order='F')
def _unvec(vecA, m=None):
"""inverse of _vec() operator"""
N = vecA.shape[0]
if m is None:
m = np.sqrt(vecA.shape[1] + 0.25).astype(np.int64)
return vecA.reshape((N, m, -1), order='F')
def _kp2(A, B):
"""Special case Kronecker tensor product of A[i] and B[i] at each
time interval i for i = 0 .. N-1
Specialized for the case A and B rank 3 with A.shape[0]==B.shape[0]
"""
N = A.shape[0]
if B.shape[0] != N:
raise(ValueError)
newshape1 = A.shape[1]*B.shape[1]
return np.einsum('ijk,ilm->ijlkm', A, B).reshape(N, newshape1, -1)
def _P(m):
"""Returns m^2 x m^2 permutation matrix that swaps rows i and j where
j = 1 + m((i - 1) mod m) + (i - 1) div m, for i = 1 .. m^2
"""
P = np.zeros((m**2,m**2), dtype=np.int64)
for i in range(1, m**2 + 1):
j = 1 + m*((i - 1) % m) + (i - 1)//m
P[i-1, j-1] = 1
return P
def _K(m):
""" matrix K_m from Wiktorsson2001 """
M = m*(m - 1)//2
K = np.zeros((M, m**2), dtype=np.int64)
row = 0
for j in range(1, m):
col = (j - 1)*m + j
s = m - j
K[row:(row+s), col:(col+s)] = np.eye(s)
row += s
return K
def _AtildeTerm(N, h, m, k, dW, Km0, Pm0):
"""kth term in the sum for Atilde (Wiktorsson2001 p481, 1st eqn)"""
M = m*(m-1)//2
Xk = np.random.normal(0.0, 1.0, (N, m, 1))
Yk = np.random.normal(0.0, 1.0, (N, m, 1))
factor1 = np.dot(Km0, Pm0 - np.eye(m**2))
factor1 = broadcast_to(factor1, (N, M, m**2))
factor2 = _kp(Yk + np.sqrt(2.0/h)*dW, Xk)
return _dot(factor1, factor2)/k
def _sigmainf(N, h, m, dW, Km0, Pm0):
"""Asymptotic covariance matrix \Sigma_\infty Wiktorsson2001 eqn (4.5)"""
M = m*(m-1)//2
Im = broadcast_to(np.eye(m), (N, m, m))
IM = broadcast_to(np.eye(M), (N, M, M))
Ims0 = np.eye(m**2)
factor1 = broadcast_to((2.0/h)*np.dot(Km0, Ims0 - Pm0), (N, M, m**2))
factor2 = _kp2(Im, _dot(dW, _t(dW)))
factor3 = broadcast_to(np.dot(Ims0 - Pm0, Km0.T), (N, m**2, M))
return 2*IM + _dot(_dot(factor1, factor2), factor3)
def _a(n):
""" \sum_{n+1}^\infty 1/k^2 """
return np.pi**2/6.0 - sum(1.0/k**2 for k in range(1, n+1))
def Iwik(dW, h, n=5):
"""matrix I approximating repeated Ito integrals for each of N time
intervals, using the method of Wiktorsson (2001).
Args:
dW (array of shape (N, m)): giving m independent Weiner increments for
each time step N. (You can make this array using sdeint.deltaW())
h (float): the time step size
n (int, optional): how many terms to take in the series expansion
Returns:
(Atilde, I) where
Atilde: array of shape (N,m(m-1)//2,1) giving the area integrals used.
I: array of shape (N, m, m) giving an m x m matrix of repeated Ito
integral values for each of the N time intervals.
"""
N = dW.shape[0]
m = dW.shape[1]
if dW.ndim < 3:
dW = dW.reshape((N, -1, 1)) # change to array of shape (N, m, 1)
if dW.shape[2] != 1 or dW.ndim > 3:
raise(ValueError)
if m == 1:
return (np.zeros((N, 1, 1)), (dW*dW - h)/2.0)
Pm0 = _P(m)
Km0 = _K(m)
M = m*(m-1)//2
Atilde_n = _AtildeTerm(N, h, m, 1, dW, Km0, Pm0)
for k in range(2, n+1):
Atilde_n += _AtildeTerm(N, h, m, k, dW, Km0, Pm0)
Atilde_n = (h/(2.0*np.pi))*Atilde_n # approximation after n terms
S = _sigmainf(N, h, m, dW, Km0, Pm0)
normdW2 = np.sum(np.abs(dW)**2, axis=1)
radical = np.sqrt(1.0 + normdW2/h).reshape((N, 1, 1))
IM = broadcast_to(np.eye(M), (N, M, M))
Im = broadcast_to(np.eye(m), (N, m, m))
Ims0 = np.eye(m**2)
sqrtS = (S + 2.0*radical*IM)/(np.sqrt(2.0)*(1.0 + radical))
G = np.random.normal(0.0, 1.0, (N, M, 1))
tailsum = h/(2.0*np.pi)*_a(n)**0.5*_dot(sqrtS, G)
Atilde = Atilde_n + tailsum # our final approximation of the areas
factor3 = broadcast_to(np.dot(Ims0 - Pm0, Km0.T), (N, m**2, M))
vecI = 0.5*(_kp(dW, dW) - _vec(h*Im)) + _dot(factor3, Atilde)
I = _unvec(vecI)
dW = dW.reshape((N, -1)) # change back to shape (N, m)
return (Atilde, I)
def Jwik(dW, h, n=5):
"""matrix J approximating repeated Stratonovich integrals for each of N
time intervals, using the method of Wiktorsson (2001).
Args:
dW (array of shape (N, m)): giving m independent Weiner increments for
each time step N. (You can make this array using sdeint.deltaW())
h (float): the time step size
n (int, optional): how many terms to take in the series expansion
Returns:
(Atilde, J) where
Atilde: array of shape (N,m(m-1)//2,1) giving the area integrals used.
J: array of shape (N, m, m) giving an m x m matrix of repeated
Stratonovich integral values for each of the N time intervals.
"""
m = dW.shape[1]
Atilde, I = Iwik(dW, h, n)
J = I + 0.5*h*np.eye(m).reshape((1, m, m))
return (Atilde, J)
|
mattja/sdeint | sdeint/wiener.py | _kp2 | python | def _kp2(A, B):
N = A.shape[0]
if B.shape[0] != N:
raise(ValueError)
newshape1 = A.shape[1]*B.shape[1]
return np.einsum('ijk,ilm->ijlkm', A, B).reshape(N, newshape1, -1) | Special case Kronecker tensor product of A[i] and B[i] at each
time interval i for i = 0 .. N-1
Specialized for the case A and B rank 3 with A.shape[0]==B.shape[0] | train | https://github.com/mattja/sdeint/blob/7cf807cdf97b3bb39d29e1c2dc834b519499b601/sdeint/wiener.py#L164-L173 | null | # Copyright 2015 Matthew J. Aburn
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version. See <http://www.gnu.org/licenses/>.
"""
Simulation of standard multiple stochastic integrals, both Ito and Stratonovich
I_{ij}(t) = \int_{0}^{t}\int_{0}^{s} dW_i(u) dW_j(s) (Ito)
J_{ij}(t) = \int_{0}^{t}\int_{0}^{s} \circ dW_i(u) \circ dW_j(s) (Stratonovich)
These multiple integrals I and J are important building blocks that will be
used by most of the higher-order algorithms that integrate multi-dimensional
SODEs.
We first implement the method of Kloeden, Platen and Wright (1992) to
approximate the integrals by the first n terms from the series expansion of a
Brownian bridge process. By default using n=5.
Finally we implement the method of Wiktorsson (2001) which improves on the
previous method by also approximating the tail-sum distribution by a
multivariate normal distribution.
References:
P. Kloeden, E. Platen and I. Wright (1992) The approximation of multiple
stochastic integrals
M. Wiktorsson (2001) Joint Characteristic Function and Simultaneous
Simulation of Iterated Ito Integrals for Multiple Independent Brownian
Motions
"""
import numpy as np
numpy_version = list(map(int, np.version.short_version.split('.')))
if numpy_version >= [1,10,0]:
broadcast_to = np.broadcast_to
else:
from ._broadcast import broadcast_to
def deltaW(N, m, h):
"""Generate sequence of Wiener increments for m independent Wiener
processes W_j(t) j=0..m-1 for each of N time intervals of length h.
Returns:
dW (array of shape (N, m)): The [n, j] element has the value
W_j((n+1)*h) - W_j(n*h)
"""
return np.random.normal(0.0, np.sqrt(h), (N, m))
def _t(a):
"""transpose the last two axes of a three axis array"""
return a.transpose((0, 2, 1))
def _dot(a, b):
""" for rank 3 arrays a and b, return \sum_k a_ij^k . b_ik^l (no sum on i)
i.e. This is just normal matrix multiplication at each point on first axis
"""
return np.einsum('ijk,ikl->ijl', a, b)
def _Aterm(N, h, m, k, dW):
"""kth term in the sum of Wiktorsson2001 equation (2.2)"""
sqrt2h = np.sqrt(2.0/h)
Xk = np.random.normal(0.0, 1.0, (N, m, 1))
Yk = np.random.normal(0.0, 1.0, (N, m, 1))
term1 = _dot(Xk, _t(Yk + sqrt2h*dW))
term2 = _dot(Yk + sqrt2h*dW, _t(Xk))
return (term1 - term2)/k
def Ikpw(dW, h, n=5):
"""matrix I approximating repeated Ito integrals for each of N time
intervals, based on the method of Kloeden, Platen and Wright (1992).
Args:
dW (array of shape (N, m)): giving m independent Weiner increments for
each time step N. (You can make this array using sdeint.deltaW())
h (float): the time step size
n (int, optional): how many terms to take in the series expansion
Returns:
(A, I) where
A: array of shape (N, m, m) giving the Levy areas that were used.
I: array of shape (N, m, m) giving an m x m matrix of repeated Ito
integral values for each of the N time intervals.
"""
N = dW.shape[0]
m = dW.shape[1]
if dW.ndim < 3:
dW = dW.reshape((N, -1, 1)) # change to array of shape (N, m, 1)
if dW.shape[2] != 1 or dW.ndim > 3:
raise(ValueError)
A = _Aterm(N, h, m, 1, dW)
for k in range(2, n+1):
A += _Aterm(N, h, m, k, dW)
A = (h/(2.0*np.pi))*A
I = 0.5*(_dot(dW, _t(dW)) - np.diag(h*np.ones(m))) + A
dW = dW.reshape((N, -1)) # change back to shape (N, m)
return (A, I)
def Jkpw(dW, h, n=5):
"""matrix J approximating repeated Stratonovich integrals for each of N
time intervals, based on the method of Kloeden, Platen and Wright (1992).
Args:
dW (array of shape (N, m)): giving m independent Weiner increments for
each time step N. (You can make this array using sdeint.deltaW())
h (float): the time step size
n (int, optional): how many terms to take in the series expansion
Returns:
(A, J) where
A: array of shape (N, m, m) giving the Levy areas that were used.
J: array of shape (N, m, m) giving an m x m matrix of repeated
Stratonovich integral values for each of the N time intervals.
"""
m = dW.shape[1]
A, I = Ikpw(dW, h, n)
J = I + 0.5*h*np.eye(m).reshape((1, m, m))
return (A, J)
# The code below this point implements the method of Wiktorsson2001.
def _vec(A):
"""
Linear operator _vec() from Wiktorsson2001 p478
Args:
A: a rank 3 array of shape N x m x n, giving a matrix A[j] for each
interval of time j in 0..N-1
Returns:
array of shape N x mn x 1, made by stacking the columns of matrix A[j] on
top of each other, for each j in 0..N-1
"""
N, m, n = A.shape
return A.reshape((N, m*n, 1), order='F')
def _unvec(vecA, m=None):
"""inverse of _vec() operator"""
N = vecA.shape[0]
if m is None:
m = np.sqrt(vecA.shape[1] + 0.25).astype(np.int64)
return vecA.reshape((N, m, -1), order='F')
def _kp(a, b):
"""Special case Kronecker tensor product of a[i] and b[i] at each
time interval i for i = 0 .. N-1
It is specialized for the case where both a and b are shape N x m x 1
"""
if a.shape != b.shape or a.shape[-1] != 1:
raise(ValueError)
N = a.shape[0]
# take the outer product over the last two axes, then reshape:
return np.einsum('ijk,ilk->ijkl', a, b).reshape(N, -1, 1)
def _P(m):
"""Returns m^2 x m^2 permutation matrix that swaps rows i and j where
j = 1 + m((i - 1) mod m) + (i - 1) div m, for i = 1 .. m^2
"""
P = np.zeros((m**2,m**2), dtype=np.int64)
for i in range(1, m**2 + 1):
j = 1 + m*((i - 1) % m) + (i - 1)//m
P[i-1, j-1] = 1
return P
def _K(m):
""" matrix K_m from Wiktorsson2001 """
M = m*(m - 1)//2
K = np.zeros((M, m**2), dtype=np.int64)
row = 0
for j in range(1, m):
col = (j - 1)*m + j
s = m - j
K[row:(row+s), col:(col+s)] = np.eye(s)
row += s
return K
def _AtildeTerm(N, h, m, k, dW, Km0, Pm0):
"""kth term in the sum for Atilde (Wiktorsson2001 p481, 1st eqn)"""
M = m*(m-1)//2
Xk = np.random.normal(0.0, 1.0, (N, m, 1))
Yk = np.random.normal(0.0, 1.0, (N, m, 1))
factor1 = np.dot(Km0, Pm0 - np.eye(m**2))
factor1 = broadcast_to(factor1, (N, M, m**2))
factor2 = _kp(Yk + np.sqrt(2.0/h)*dW, Xk)
return _dot(factor1, factor2)/k
def _sigmainf(N, h, m, dW, Km0, Pm0):
"""Asymptotic covariance matrix \Sigma_\infty Wiktorsson2001 eqn (4.5)"""
M = m*(m-1)//2
Im = broadcast_to(np.eye(m), (N, m, m))
IM = broadcast_to(np.eye(M), (N, M, M))
Ims0 = np.eye(m**2)
factor1 = broadcast_to((2.0/h)*np.dot(Km0, Ims0 - Pm0), (N, M, m**2))
factor2 = _kp2(Im, _dot(dW, _t(dW)))
factor3 = broadcast_to(np.dot(Ims0 - Pm0, Km0.T), (N, m**2, M))
return 2*IM + _dot(_dot(factor1, factor2), factor3)
def _a(n):
""" \sum_{n+1}^\infty 1/k^2 """
return np.pi**2/6.0 - sum(1.0/k**2 for k in range(1, n+1))
def Iwik(dW, h, n=5):
"""matrix I approximating repeated Ito integrals for each of N time
intervals, using the method of Wiktorsson (2001).
Args:
dW (array of shape (N, m)): giving m independent Weiner increments for
each time step N. (You can make this array using sdeint.deltaW())
h (float): the time step size
n (int, optional): how many terms to take in the series expansion
Returns:
(Atilde, I) where
Atilde: array of shape (N,m(m-1)//2,1) giving the area integrals used.
I: array of shape (N, m, m) giving an m x m matrix of repeated Ito
integral values for each of the N time intervals.
"""
N = dW.shape[0]
m = dW.shape[1]
if dW.ndim < 3:
dW = dW.reshape((N, -1, 1)) # change to array of shape (N, m, 1)
if dW.shape[2] != 1 or dW.ndim > 3:
raise(ValueError)
if m == 1:
return (np.zeros((N, 1, 1)), (dW*dW - h)/2.0)
Pm0 = _P(m)
Km0 = _K(m)
M = m*(m-1)//2
Atilde_n = _AtildeTerm(N, h, m, 1, dW, Km0, Pm0)
for k in range(2, n+1):
Atilde_n += _AtildeTerm(N, h, m, k, dW, Km0, Pm0)
Atilde_n = (h/(2.0*np.pi))*Atilde_n # approximation after n terms
S = _sigmainf(N, h, m, dW, Km0, Pm0)
normdW2 = np.sum(np.abs(dW)**2, axis=1)
radical = np.sqrt(1.0 + normdW2/h).reshape((N, 1, 1))
IM = broadcast_to(np.eye(M), (N, M, M))
Im = broadcast_to(np.eye(m), (N, m, m))
Ims0 = np.eye(m**2)
sqrtS = (S + 2.0*radical*IM)/(np.sqrt(2.0)*(1.0 + radical))
G = np.random.normal(0.0, 1.0, (N, M, 1))
tailsum = h/(2.0*np.pi)*_a(n)**0.5*_dot(sqrtS, G)
Atilde = Atilde_n + tailsum # our final approximation of the areas
factor3 = broadcast_to(np.dot(Ims0 - Pm0, Km0.T), (N, m**2, M))
vecI = 0.5*(_kp(dW, dW) - _vec(h*Im)) + _dot(factor3, Atilde)
I = _unvec(vecI)
dW = dW.reshape((N, -1)) # change back to shape (N, m)
return (Atilde, I)
def Jwik(dW, h, n=5):
"""matrix J approximating repeated Stratonovich integrals for each of N
time intervals, using the method of Wiktorsson (2001).
Args:
dW (array of shape (N, m)): giving m independent Weiner increments for
each time step N. (You can make this array using sdeint.deltaW())
h (float): the time step size
n (int, optional): how many terms to take in the series expansion
Returns:
(Atilde, J) where
Atilde: array of shape (N,m(m-1)//2,1) giving the area integrals used.
J: array of shape (N, m, m) giving an m x m matrix of repeated
Stratonovich integral values for each of the N time intervals.
"""
m = dW.shape[1]
Atilde, I = Iwik(dW, h, n)
J = I + 0.5*h*np.eye(m).reshape((1, m, m))
return (Atilde, J)
|
mattja/sdeint | sdeint/wiener.py | _P | python | def _P(m):
P = np.zeros((m**2,m**2), dtype=np.int64)
for i in range(1, m**2 + 1):
j = 1 + m*((i - 1) % m) + (i - 1)//m
P[i-1, j-1] = 1
return P | Returns m^2 x m^2 permutation matrix that swaps rows i and j where
j = 1 + m((i - 1) mod m) + (i - 1) div m, for i = 1 .. m^2 | train | https://github.com/mattja/sdeint/blob/7cf807cdf97b3bb39d29e1c2dc834b519499b601/sdeint/wiener.py#L176-L184 | null | # Copyright 2015 Matthew J. Aburn
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version. See <http://www.gnu.org/licenses/>.
"""
Simulation of standard multiple stochastic integrals, both Ito and Stratonovich
I_{ij}(t) = \int_{0}^{t}\int_{0}^{s} dW_i(u) dW_j(s) (Ito)
J_{ij}(t) = \int_{0}^{t}\int_{0}^{s} \circ dW_i(u) \circ dW_j(s) (Stratonovich)
These multiple integrals I and J are important building blocks that will be
used by most of the higher-order algorithms that integrate multi-dimensional
SODEs.
We first implement the method of Kloeden, Platen and Wright (1992) to
approximate the integrals by the first n terms from the series expansion of a
Brownian bridge process. By default using n=5.
Finally we implement the method of Wiktorsson (2001) which improves on the
previous method by also approximating the tail-sum distribution by a
multivariate normal distribution.
References:
P. Kloeden, E. Platen and I. Wright (1992) The approximation of multiple
stochastic integrals
M. Wiktorsson (2001) Joint Characteristic Function and Simultaneous
Simulation of Iterated Ito Integrals for Multiple Independent Brownian
Motions
"""
import numpy as np
numpy_version = list(map(int, np.version.short_version.split('.')))
if numpy_version >= [1,10,0]:
broadcast_to = np.broadcast_to
else:
from ._broadcast import broadcast_to
def deltaW(N, m, h):
"""Generate sequence of Wiener increments for m independent Wiener
processes W_j(t) j=0..m-1 for each of N time intervals of length h.
Returns:
dW (array of shape (N, m)): The [n, j] element has the value
W_j((n+1)*h) - W_j(n*h)
"""
return np.random.normal(0.0, np.sqrt(h), (N, m))
def _t(a):
"""transpose the last two axes of a three axis array"""
return a.transpose((0, 2, 1))
def _dot(a, b):
""" for rank 3 arrays a and b, return \sum_k a_ij^k . b_ik^l (no sum on i)
i.e. This is just normal matrix multiplication at each point on first axis
"""
return np.einsum('ijk,ikl->ijl', a, b)
def _Aterm(N, h, m, k, dW):
"""kth term in the sum of Wiktorsson2001 equation (2.2)"""
sqrt2h = np.sqrt(2.0/h)
Xk = np.random.normal(0.0, 1.0, (N, m, 1))
Yk = np.random.normal(0.0, 1.0, (N, m, 1))
term1 = _dot(Xk, _t(Yk + sqrt2h*dW))
term2 = _dot(Yk + sqrt2h*dW, _t(Xk))
return (term1 - term2)/k
def Ikpw(dW, h, n=5):
"""matrix I approximating repeated Ito integrals for each of N time
intervals, based on the method of Kloeden, Platen and Wright (1992).
Args:
dW (array of shape (N, m)): giving m independent Weiner increments for
each time step N. (You can make this array using sdeint.deltaW())
h (float): the time step size
n (int, optional): how many terms to take in the series expansion
Returns:
(A, I) where
A: array of shape (N, m, m) giving the Levy areas that were used.
I: array of shape (N, m, m) giving an m x m matrix of repeated Ito
integral values for each of the N time intervals.
"""
N = dW.shape[0]
m = dW.shape[1]
if dW.ndim < 3:
dW = dW.reshape((N, -1, 1)) # change to array of shape (N, m, 1)
if dW.shape[2] != 1 or dW.ndim > 3:
raise(ValueError)
A = _Aterm(N, h, m, 1, dW)
for k in range(2, n+1):
A += _Aterm(N, h, m, k, dW)
A = (h/(2.0*np.pi))*A
I = 0.5*(_dot(dW, _t(dW)) - np.diag(h*np.ones(m))) + A
dW = dW.reshape((N, -1)) # change back to shape (N, m)
return (A, I)
def Jkpw(dW, h, n=5):
"""matrix J approximating repeated Stratonovich integrals for each of N
time intervals, based on the method of Kloeden, Platen and Wright (1992).
Args:
dW (array of shape (N, m)): giving m independent Weiner increments for
each time step N. (You can make this array using sdeint.deltaW())
h (float): the time step size
n (int, optional): how many terms to take in the series expansion
Returns:
(A, J) where
A: array of shape (N, m, m) giving the Levy areas that were used.
J: array of shape (N, m, m) giving an m x m matrix of repeated
Stratonovich integral values for each of the N time intervals.
"""
m = dW.shape[1]
A, I = Ikpw(dW, h, n)
J = I + 0.5*h*np.eye(m).reshape((1, m, m))
return (A, J)
# The code below this point implements the method of Wiktorsson2001.
def _vec(A):
"""
Linear operator _vec() from Wiktorsson2001 p478
Args:
A: a rank 3 array of shape N x m x n, giving a matrix A[j] for each
interval of time j in 0..N-1
Returns:
array of shape N x mn x 1, made by stacking the columns of matrix A[j] on
top of each other, for each j in 0..N-1
"""
N, m, n = A.shape
return A.reshape((N, m*n, 1), order='F')
def _unvec(vecA, m=None):
"""inverse of _vec() operator"""
N = vecA.shape[0]
if m is None:
m = np.sqrt(vecA.shape[1] + 0.25).astype(np.int64)
return vecA.reshape((N, m, -1), order='F')
def _kp(a, b):
"""Special case Kronecker tensor product of a[i] and b[i] at each
time interval i for i = 0 .. N-1
It is specialized for the case where both a and b are shape N x m x 1
"""
if a.shape != b.shape or a.shape[-1] != 1:
raise(ValueError)
N = a.shape[0]
# take the outer product over the last two axes, then reshape:
return np.einsum('ijk,ilk->ijkl', a, b).reshape(N, -1, 1)
def _kp2(A, B):
"""Special case Kronecker tensor product of A[i] and B[i] at each
time interval i for i = 0 .. N-1
Specialized for the case A and B rank 3 with A.shape[0]==B.shape[0]
"""
N = A.shape[0]
if B.shape[0] != N:
raise(ValueError)
newshape1 = A.shape[1]*B.shape[1]
return np.einsum('ijk,ilm->ijlkm', A, B).reshape(N, newshape1, -1)
def _K(m):
""" matrix K_m from Wiktorsson2001 """
M = m*(m - 1)//2
K = np.zeros((M, m**2), dtype=np.int64)
row = 0
for j in range(1, m):
col = (j - 1)*m + j
s = m - j
K[row:(row+s), col:(col+s)] = np.eye(s)
row += s
return K
def _AtildeTerm(N, h, m, k, dW, Km0, Pm0):
"""kth term in the sum for Atilde (Wiktorsson2001 p481, 1st eqn)"""
M = m*(m-1)//2
Xk = np.random.normal(0.0, 1.0, (N, m, 1))
Yk = np.random.normal(0.0, 1.0, (N, m, 1))
factor1 = np.dot(Km0, Pm0 - np.eye(m**2))
factor1 = broadcast_to(factor1, (N, M, m**2))
factor2 = _kp(Yk + np.sqrt(2.0/h)*dW, Xk)
return _dot(factor1, factor2)/k
def _sigmainf(N, h, m, dW, Km0, Pm0):
"""Asymptotic covariance matrix \Sigma_\infty Wiktorsson2001 eqn (4.5)"""
M = m*(m-1)//2
Im = broadcast_to(np.eye(m), (N, m, m))
IM = broadcast_to(np.eye(M), (N, M, M))
Ims0 = np.eye(m**2)
factor1 = broadcast_to((2.0/h)*np.dot(Km0, Ims0 - Pm0), (N, M, m**2))
factor2 = _kp2(Im, _dot(dW, _t(dW)))
factor3 = broadcast_to(np.dot(Ims0 - Pm0, Km0.T), (N, m**2, M))
return 2*IM + _dot(_dot(factor1, factor2), factor3)
def _a(n):
""" \sum_{n+1}^\infty 1/k^2 """
return np.pi**2/6.0 - sum(1.0/k**2 for k in range(1, n+1))
def Iwik(dW, h, n=5):
"""matrix I approximating repeated Ito integrals for each of N time
intervals, using the method of Wiktorsson (2001).
Args:
dW (array of shape (N, m)): giving m independent Weiner increments for
each time step N. (You can make this array using sdeint.deltaW())
h (float): the time step size
n (int, optional): how many terms to take in the series expansion
Returns:
(Atilde, I) where
Atilde: array of shape (N,m(m-1)//2,1) giving the area integrals used.
I: array of shape (N, m, m) giving an m x m matrix of repeated Ito
integral values for each of the N time intervals.
"""
N = dW.shape[0]
m = dW.shape[1]
if dW.ndim < 3:
dW = dW.reshape((N, -1, 1)) # change to array of shape (N, m, 1)
if dW.shape[2] != 1 or dW.ndim > 3:
raise(ValueError)
if m == 1:
return (np.zeros((N, 1, 1)), (dW*dW - h)/2.0)
Pm0 = _P(m)
Km0 = _K(m)
M = m*(m-1)//2
Atilde_n = _AtildeTerm(N, h, m, 1, dW, Km0, Pm0)
for k in range(2, n+1):
Atilde_n += _AtildeTerm(N, h, m, k, dW, Km0, Pm0)
Atilde_n = (h/(2.0*np.pi))*Atilde_n # approximation after n terms
S = _sigmainf(N, h, m, dW, Km0, Pm0)
normdW2 = np.sum(np.abs(dW)**2, axis=1)
radical = np.sqrt(1.0 + normdW2/h).reshape((N, 1, 1))
IM = broadcast_to(np.eye(M), (N, M, M))
Im = broadcast_to(np.eye(m), (N, m, m))
Ims0 = np.eye(m**2)
sqrtS = (S + 2.0*radical*IM)/(np.sqrt(2.0)*(1.0 + radical))
G = np.random.normal(0.0, 1.0, (N, M, 1))
tailsum = h/(2.0*np.pi)*_a(n)**0.5*_dot(sqrtS, G)
Atilde = Atilde_n + tailsum # our final approximation of the areas
factor3 = broadcast_to(np.dot(Ims0 - Pm0, Km0.T), (N, m**2, M))
vecI = 0.5*(_kp(dW, dW) - _vec(h*Im)) + _dot(factor3, Atilde)
I = _unvec(vecI)
dW = dW.reshape((N, -1)) # change back to shape (N, m)
return (Atilde, I)
def Jwik(dW, h, n=5):
"""matrix J approximating repeated Stratonovich integrals for each of N
time intervals, using the method of Wiktorsson (2001).
Args:
dW (array of shape (N, m)): giving m independent Weiner increments for
each time step N. (You can make this array using sdeint.deltaW())
h (float): the time step size
n (int, optional): how many terms to take in the series expansion
Returns:
(Atilde, J) where
Atilde: array of shape (N,m(m-1)//2,1) giving the area integrals used.
J: array of shape (N, m, m) giving an m x m matrix of repeated
Stratonovich integral values for each of the N time intervals.
"""
m = dW.shape[1]
Atilde, I = Iwik(dW, h, n)
J = I + 0.5*h*np.eye(m).reshape((1, m, m))
return (Atilde, J)
|
mattja/sdeint | sdeint/wiener.py | _K | python | def _K(m):
M = m*(m - 1)//2
K = np.zeros((M, m**2), dtype=np.int64)
row = 0
for j in range(1, m):
col = (j - 1)*m + j
s = m - j
K[row:(row+s), col:(col+s)] = np.eye(s)
row += s
return K | matrix K_m from Wiktorsson2001 | train | https://github.com/mattja/sdeint/blob/7cf807cdf97b3bb39d29e1c2dc834b519499b601/sdeint/wiener.py#L187-L197 | null | # Copyright 2015 Matthew J. Aburn
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version. See <http://www.gnu.org/licenses/>.
"""
Simulation of standard multiple stochastic integrals, both Ito and Stratonovich
I_{ij}(t) = \int_{0}^{t}\int_{0}^{s} dW_i(u) dW_j(s) (Ito)
J_{ij}(t) = \int_{0}^{t}\int_{0}^{s} \circ dW_i(u) \circ dW_j(s) (Stratonovich)
These multiple integrals I and J are important building blocks that will be
used by most of the higher-order algorithms that integrate multi-dimensional
SODEs.
We first implement the method of Kloeden, Platen and Wright (1992) to
approximate the integrals by the first n terms from the series expansion of a
Brownian bridge process. By default using n=5.
Finally we implement the method of Wiktorsson (2001) which improves on the
previous method by also approximating the tail-sum distribution by a
multivariate normal distribution.
References:
P. Kloeden, E. Platen and I. Wright (1992) The approximation of multiple
stochastic integrals
M. Wiktorsson (2001) Joint Characteristic Function and Simultaneous
Simulation of Iterated Ito Integrals for Multiple Independent Brownian
Motions
"""
import numpy as np
numpy_version = list(map(int, np.version.short_version.split('.')))
if numpy_version >= [1,10,0]:
broadcast_to = np.broadcast_to
else:
from ._broadcast import broadcast_to
def deltaW(N, m, h):
"""Generate sequence of Wiener increments for m independent Wiener
processes W_j(t) j=0..m-1 for each of N time intervals of length h.
Returns:
dW (array of shape (N, m)): The [n, j] element has the value
W_j((n+1)*h) - W_j(n*h)
"""
return np.random.normal(0.0, np.sqrt(h), (N, m))
def _t(a):
"""transpose the last two axes of a three axis array"""
return a.transpose((0, 2, 1))
def _dot(a, b):
""" for rank 3 arrays a and b, return \sum_k a_ij^k . b_ik^l (no sum on i)
i.e. This is just normal matrix multiplication at each point on first axis
"""
return np.einsum('ijk,ikl->ijl', a, b)
def _Aterm(N, h, m, k, dW):
"""kth term in the sum of Wiktorsson2001 equation (2.2)"""
sqrt2h = np.sqrt(2.0/h)
Xk = np.random.normal(0.0, 1.0, (N, m, 1))
Yk = np.random.normal(0.0, 1.0, (N, m, 1))
term1 = _dot(Xk, _t(Yk + sqrt2h*dW))
term2 = _dot(Yk + sqrt2h*dW, _t(Xk))
return (term1 - term2)/k
def Ikpw(dW, h, n=5):
"""matrix I approximating repeated Ito integrals for each of N time
intervals, based on the method of Kloeden, Platen and Wright (1992).
Args:
dW (array of shape (N, m)): giving m independent Weiner increments for
each time step N. (You can make this array using sdeint.deltaW())
h (float): the time step size
n (int, optional): how many terms to take in the series expansion
Returns:
(A, I) where
A: array of shape (N, m, m) giving the Levy areas that were used.
I: array of shape (N, m, m) giving an m x m matrix of repeated Ito
integral values for each of the N time intervals.
"""
N = dW.shape[0]
m = dW.shape[1]
if dW.ndim < 3:
dW = dW.reshape((N, -1, 1)) # change to array of shape (N, m, 1)
if dW.shape[2] != 1 or dW.ndim > 3:
raise(ValueError)
A = _Aterm(N, h, m, 1, dW)
for k in range(2, n+1):
A += _Aterm(N, h, m, k, dW)
A = (h/(2.0*np.pi))*A
I = 0.5*(_dot(dW, _t(dW)) - np.diag(h*np.ones(m))) + A
dW = dW.reshape((N, -1)) # change back to shape (N, m)
return (A, I)
def Jkpw(dW, h, n=5):
"""matrix J approximating repeated Stratonovich integrals for each of N
time intervals, based on the method of Kloeden, Platen and Wright (1992).
Args:
dW (array of shape (N, m)): giving m independent Weiner increments for
each time step N. (You can make this array using sdeint.deltaW())
h (float): the time step size
n (int, optional): how many terms to take in the series expansion
Returns:
(A, J) where
A: array of shape (N, m, m) giving the Levy areas that were used.
J: array of shape (N, m, m) giving an m x m matrix of repeated
Stratonovich integral values for each of the N time intervals.
"""
m = dW.shape[1]
A, I = Ikpw(dW, h, n)
J = I + 0.5*h*np.eye(m).reshape((1, m, m))
return (A, J)
# The code below this point implements the method of Wiktorsson2001.
def _vec(A):
"""
Linear operator _vec() from Wiktorsson2001 p478
Args:
A: a rank 3 array of shape N x m x n, giving a matrix A[j] for each
interval of time j in 0..N-1
Returns:
array of shape N x mn x 1, made by stacking the columns of matrix A[j] on
top of each other, for each j in 0..N-1
"""
N, m, n = A.shape
return A.reshape((N, m*n, 1), order='F')
def _unvec(vecA, m=None):
"""inverse of _vec() operator"""
N = vecA.shape[0]
if m is None:
m = np.sqrt(vecA.shape[1] + 0.25).astype(np.int64)
return vecA.reshape((N, m, -1), order='F')
def _kp(a, b):
"""Special case Kronecker tensor product of a[i] and b[i] at each
time interval i for i = 0 .. N-1
It is specialized for the case where both a and b are shape N x m x 1
"""
if a.shape != b.shape or a.shape[-1] != 1:
raise(ValueError)
N = a.shape[0]
# take the outer product over the last two axes, then reshape:
return np.einsum('ijk,ilk->ijkl', a, b).reshape(N, -1, 1)
def _kp2(A, B):
"""Special case Kronecker tensor product of A[i] and B[i] at each
time interval i for i = 0 .. N-1
Specialized for the case A and B rank 3 with A.shape[0]==B.shape[0]
"""
N = A.shape[0]
if B.shape[0] != N:
raise(ValueError)
newshape1 = A.shape[1]*B.shape[1]
return np.einsum('ijk,ilm->ijlkm', A, B).reshape(N, newshape1, -1)
def _P(m):
"""Returns m^2 x m^2 permutation matrix that swaps rows i and j where
j = 1 + m((i - 1) mod m) + (i - 1) div m, for i = 1 .. m^2
"""
P = np.zeros((m**2,m**2), dtype=np.int64)
for i in range(1, m**2 + 1):
j = 1 + m*((i - 1) % m) + (i - 1)//m
P[i-1, j-1] = 1
return P
def _AtildeTerm(N, h, m, k, dW, Km0, Pm0):
"""kth term in the sum for Atilde (Wiktorsson2001 p481, 1st eqn)"""
M = m*(m-1)//2
Xk = np.random.normal(0.0, 1.0, (N, m, 1))
Yk = np.random.normal(0.0, 1.0, (N, m, 1))
factor1 = np.dot(Km0, Pm0 - np.eye(m**2))
factor1 = broadcast_to(factor1, (N, M, m**2))
factor2 = _kp(Yk + np.sqrt(2.0/h)*dW, Xk)
return _dot(factor1, factor2)/k
def _sigmainf(N, h, m, dW, Km0, Pm0):
"""Asymptotic covariance matrix \Sigma_\infty Wiktorsson2001 eqn (4.5)"""
M = m*(m-1)//2
Im = broadcast_to(np.eye(m), (N, m, m))
IM = broadcast_to(np.eye(M), (N, M, M))
Ims0 = np.eye(m**2)
factor1 = broadcast_to((2.0/h)*np.dot(Km0, Ims0 - Pm0), (N, M, m**2))
factor2 = _kp2(Im, _dot(dW, _t(dW)))
factor3 = broadcast_to(np.dot(Ims0 - Pm0, Km0.T), (N, m**2, M))
return 2*IM + _dot(_dot(factor1, factor2), factor3)
def _a(n):
""" \sum_{n+1}^\infty 1/k^2 """
return np.pi**2/6.0 - sum(1.0/k**2 for k in range(1, n+1))
def Iwik(dW, h, n=5):
"""matrix I approximating repeated Ito integrals for each of N time
intervals, using the method of Wiktorsson (2001).
Args:
dW (array of shape (N, m)): giving m independent Weiner increments for
each time step N. (You can make this array using sdeint.deltaW())
h (float): the time step size
n (int, optional): how many terms to take in the series expansion
Returns:
(Atilde, I) where
Atilde: array of shape (N,m(m-1)//2,1) giving the area integrals used.
I: array of shape (N, m, m) giving an m x m matrix of repeated Ito
integral values for each of the N time intervals.
"""
N = dW.shape[0]
m = dW.shape[1]
if dW.ndim < 3:
dW = dW.reshape((N, -1, 1)) # change to array of shape (N, m, 1)
if dW.shape[2] != 1 or dW.ndim > 3:
raise(ValueError)
if m == 1:
return (np.zeros((N, 1, 1)), (dW*dW - h)/2.0)
Pm0 = _P(m)
Km0 = _K(m)
M = m*(m-1)//2
Atilde_n = _AtildeTerm(N, h, m, 1, dW, Km0, Pm0)
for k in range(2, n+1):
Atilde_n += _AtildeTerm(N, h, m, k, dW, Km0, Pm0)
Atilde_n = (h/(2.0*np.pi))*Atilde_n # approximation after n terms
S = _sigmainf(N, h, m, dW, Km0, Pm0)
normdW2 = np.sum(np.abs(dW)**2, axis=1)
radical = np.sqrt(1.0 + normdW2/h).reshape((N, 1, 1))
IM = broadcast_to(np.eye(M), (N, M, M))
Im = broadcast_to(np.eye(m), (N, m, m))
Ims0 = np.eye(m**2)
sqrtS = (S + 2.0*radical*IM)/(np.sqrt(2.0)*(1.0 + radical))
G = np.random.normal(0.0, 1.0, (N, M, 1))
tailsum = h/(2.0*np.pi)*_a(n)**0.5*_dot(sqrtS, G)
Atilde = Atilde_n + tailsum # our final approximation of the areas
factor3 = broadcast_to(np.dot(Ims0 - Pm0, Km0.T), (N, m**2, M))
vecI = 0.5*(_kp(dW, dW) - _vec(h*Im)) + _dot(factor3, Atilde)
I = _unvec(vecI)
dW = dW.reshape((N, -1)) # change back to shape (N, m)
return (Atilde, I)
def Jwik(dW, h, n=5):
"""matrix J approximating repeated Stratonovich integrals for each of N
time intervals, using the method of Wiktorsson (2001).
Args:
dW (array of shape (N, m)): giving m independent Weiner increments for
each time step N. (You can make this array using sdeint.deltaW())
h (float): the time step size
n (int, optional): how many terms to take in the series expansion
Returns:
(Atilde, J) where
Atilde: array of shape (N,m(m-1)//2,1) giving the area integrals used.
J: array of shape (N, m, m) giving an m x m matrix of repeated
Stratonovich integral values for each of the N time intervals.
"""
m = dW.shape[1]
Atilde, I = Iwik(dW, h, n)
J = I + 0.5*h*np.eye(m).reshape((1, m, m))
return (Atilde, J)
|
mattja/sdeint | sdeint/wiener.py | _AtildeTerm | python | def _AtildeTerm(N, h, m, k, dW, Km0, Pm0):
M = m*(m-1)//2
Xk = np.random.normal(0.0, 1.0, (N, m, 1))
Yk = np.random.normal(0.0, 1.0, (N, m, 1))
factor1 = np.dot(Km0, Pm0 - np.eye(m**2))
factor1 = broadcast_to(factor1, (N, M, m**2))
factor2 = _kp(Yk + np.sqrt(2.0/h)*dW, Xk)
return _dot(factor1, factor2)/k | kth term in the sum for Atilde (Wiktorsson2001 p481, 1st eqn) | train | https://github.com/mattja/sdeint/blob/7cf807cdf97b3bb39d29e1c2dc834b519499b601/sdeint/wiener.py#L200-L208 | [
"def _dot(a, b):\n \"\"\" for rank 3 arrays a and b, return \\sum_k a_ij^k . b_ik^l (no sum on i)\n i.e. This is just normal matrix multiplication at each point on first axis\n \"\"\"\n return np.einsum('ijk,ikl->ijl', a, b)\n",
"def _kp(a, b):\n \"\"\"Special case Kronecker tensor product of a[i] ... | # Copyright 2015 Matthew J. Aburn
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version. See <http://www.gnu.org/licenses/>.
"""
Simulation of standard multiple stochastic integrals, both Ito and Stratonovich
I_{ij}(t) = \int_{0}^{t}\int_{0}^{s} dW_i(u) dW_j(s) (Ito)
J_{ij}(t) = \int_{0}^{t}\int_{0}^{s} \circ dW_i(u) \circ dW_j(s) (Stratonovich)
These multiple integrals I and J are important building blocks that will be
used by most of the higher-order algorithms that integrate multi-dimensional
SODEs.
We first implement the method of Kloeden, Platen and Wright (1992) to
approximate the integrals by the first n terms from the series expansion of a
Brownian bridge process. By default using n=5.
Finally we implement the method of Wiktorsson (2001) which improves on the
previous method by also approximating the tail-sum distribution by a
multivariate normal distribution.
References:
P. Kloeden, E. Platen and I. Wright (1992) The approximation of multiple
stochastic integrals
M. Wiktorsson (2001) Joint Characteristic Function and Simultaneous
Simulation of Iterated Ito Integrals for Multiple Independent Brownian
Motions
"""
import numpy as np
numpy_version = list(map(int, np.version.short_version.split('.')))
if numpy_version >= [1,10,0]:
broadcast_to = np.broadcast_to
else:
from ._broadcast import broadcast_to
def deltaW(N, m, h):
"""Generate sequence of Wiener increments for m independent Wiener
processes W_j(t) j=0..m-1 for each of N time intervals of length h.
Returns:
dW (array of shape (N, m)): The [n, j] element has the value
W_j((n+1)*h) - W_j(n*h)
"""
return np.random.normal(0.0, np.sqrt(h), (N, m))
def _t(a):
"""transpose the last two axes of a three axis array"""
return a.transpose((0, 2, 1))
def _dot(a, b):
""" for rank 3 arrays a and b, return \sum_k a_ij^k . b_ik^l (no sum on i)
i.e. This is just normal matrix multiplication at each point on first axis
"""
return np.einsum('ijk,ikl->ijl', a, b)
def _Aterm(N, h, m, k, dW):
"""kth term in the sum of Wiktorsson2001 equation (2.2)"""
sqrt2h = np.sqrt(2.0/h)
Xk = np.random.normal(0.0, 1.0, (N, m, 1))
Yk = np.random.normal(0.0, 1.0, (N, m, 1))
term1 = _dot(Xk, _t(Yk + sqrt2h*dW))
term2 = _dot(Yk + sqrt2h*dW, _t(Xk))
return (term1 - term2)/k
def Ikpw(dW, h, n=5):
"""matrix I approximating repeated Ito integrals for each of N time
intervals, based on the method of Kloeden, Platen and Wright (1992).
Args:
dW (array of shape (N, m)): giving m independent Weiner increments for
each time step N. (You can make this array using sdeint.deltaW())
h (float): the time step size
n (int, optional): how many terms to take in the series expansion
Returns:
(A, I) where
A: array of shape (N, m, m) giving the Levy areas that were used.
I: array of shape (N, m, m) giving an m x m matrix of repeated Ito
integral values for each of the N time intervals.
"""
N = dW.shape[0]
m = dW.shape[1]
if dW.ndim < 3:
dW = dW.reshape((N, -1, 1)) # change to array of shape (N, m, 1)
if dW.shape[2] != 1 or dW.ndim > 3:
raise(ValueError)
A = _Aterm(N, h, m, 1, dW)
for k in range(2, n+1):
A += _Aterm(N, h, m, k, dW)
A = (h/(2.0*np.pi))*A
I = 0.5*(_dot(dW, _t(dW)) - np.diag(h*np.ones(m))) + A
dW = dW.reshape((N, -1)) # change back to shape (N, m)
return (A, I)
def Jkpw(dW, h, n=5):
"""matrix J approximating repeated Stratonovich integrals for each of N
time intervals, based on the method of Kloeden, Platen and Wright (1992).
Args:
dW (array of shape (N, m)): giving m independent Weiner increments for
each time step N. (You can make this array using sdeint.deltaW())
h (float): the time step size
n (int, optional): how many terms to take in the series expansion
Returns:
(A, J) where
A: array of shape (N, m, m) giving the Levy areas that were used.
J: array of shape (N, m, m) giving an m x m matrix of repeated
Stratonovich integral values for each of the N time intervals.
"""
m = dW.shape[1]
A, I = Ikpw(dW, h, n)
J = I + 0.5*h*np.eye(m).reshape((1, m, m))
return (A, J)
# The code below this point implements the method of Wiktorsson2001.
def _vec(A):
"""
Linear operator _vec() from Wiktorsson2001 p478
Args:
A: a rank 3 array of shape N x m x n, giving a matrix A[j] for each
interval of time j in 0..N-1
Returns:
array of shape N x mn x 1, made by stacking the columns of matrix A[j] on
top of each other, for each j in 0..N-1
"""
N, m, n = A.shape
return A.reshape((N, m*n, 1), order='F')
def _unvec(vecA, m=None):
"""inverse of _vec() operator"""
N = vecA.shape[0]
if m is None:
m = np.sqrt(vecA.shape[1] + 0.25).astype(np.int64)
return vecA.reshape((N, m, -1), order='F')
def _kp(a, b):
"""Special case Kronecker tensor product of a[i] and b[i] at each
time interval i for i = 0 .. N-1
It is specialized for the case where both a and b are shape N x m x 1
"""
if a.shape != b.shape or a.shape[-1] != 1:
raise(ValueError)
N = a.shape[0]
# take the outer product over the last two axes, then reshape:
return np.einsum('ijk,ilk->ijkl', a, b).reshape(N, -1, 1)
def _kp2(A, B):
"""Special case Kronecker tensor product of A[i] and B[i] at each
time interval i for i = 0 .. N-1
Specialized for the case A and B rank 3 with A.shape[0]==B.shape[0]
"""
N = A.shape[0]
if B.shape[0] != N:
raise(ValueError)
newshape1 = A.shape[1]*B.shape[1]
return np.einsum('ijk,ilm->ijlkm', A, B).reshape(N, newshape1, -1)
def _P(m):
"""Returns m^2 x m^2 permutation matrix that swaps rows i and j where
j = 1 + m((i - 1) mod m) + (i - 1) div m, for i = 1 .. m^2
"""
P = np.zeros((m**2,m**2), dtype=np.int64)
for i in range(1, m**2 + 1):
j = 1 + m*((i - 1) % m) + (i - 1)//m
P[i-1, j-1] = 1
return P
def _K(m):
""" matrix K_m from Wiktorsson2001 """
M = m*(m - 1)//2
K = np.zeros((M, m**2), dtype=np.int64)
row = 0
for j in range(1, m):
col = (j - 1)*m + j
s = m - j
K[row:(row+s), col:(col+s)] = np.eye(s)
row += s
return K
def _sigmainf(N, h, m, dW, Km0, Pm0):
"""Asymptotic covariance matrix \Sigma_\infty Wiktorsson2001 eqn (4.5)"""
M = m*(m-1)//2
Im = broadcast_to(np.eye(m), (N, m, m))
IM = broadcast_to(np.eye(M), (N, M, M))
Ims0 = np.eye(m**2)
factor1 = broadcast_to((2.0/h)*np.dot(Km0, Ims0 - Pm0), (N, M, m**2))
factor2 = _kp2(Im, _dot(dW, _t(dW)))
factor3 = broadcast_to(np.dot(Ims0 - Pm0, Km0.T), (N, m**2, M))
return 2*IM + _dot(_dot(factor1, factor2), factor3)
def _a(n):
""" \sum_{n+1}^\infty 1/k^2 """
return np.pi**2/6.0 - sum(1.0/k**2 for k in range(1, n+1))
def Iwik(dW, h, n=5):
"""matrix I approximating repeated Ito integrals for each of N time
intervals, using the method of Wiktorsson (2001).
Args:
dW (array of shape (N, m)): giving m independent Weiner increments for
each time step N. (You can make this array using sdeint.deltaW())
h (float): the time step size
n (int, optional): how many terms to take in the series expansion
Returns:
(Atilde, I) where
Atilde: array of shape (N,m(m-1)//2,1) giving the area integrals used.
I: array of shape (N, m, m) giving an m x m matrix of repeated Ito
integral values for each of the N time intervals.
"""
N = dW.shape[0]
m = dW.shape[1]
if dW.ndim < 3:
dW = dW.reshape((N, -1, 1)) # change to array of shape (N, m, 1)
if dW.shape[2] != 1 or dW.ndim > 3:
raise(ValueError)
if m == 1:
return (np.zeros((N, 1, 1)), (dW*dW - h)/2.0)
Pm0 = _P(m)
Km0 = _K(m)
M = m*(m-1)//2
Atilde_n = _AtildeTerm(N, h, m, 1, dW, Km0, Pm0)
for k in range(2, n+1):
Atilde_n += _AtildeTerm(N, h, m, k, dW, Km0, Pm0)
Atilde_n = (h/(2.0*np.pi))*Atilde_n # approximation after n terms
S = _sigmainf(N, h, m, dW, Km0, Pm0)
normdW2 = np.sum(np.abs(dW)**2, axis=1)
radical = np.sqrt(1.0 + normdW2/h).reshape((N, 1, 1))
IM = broadcast_to(np.eye(M), (N, M, M))
Im = broadcast_to(np.eye(m), (N, m, m))
Ims0 = np.eye(m**2)
sqrtS = (S + 2.0*radical*IM)/(np.sqrt(2.0)*(1.0 + radical))
G = np.random.normal(0.0, 1.0, (N, M, 1))
tailsum = h/(2.0*np.pi)*_a(n)**0.5*_dot(sqrtS, G)
Atilde = Atilde_n + tailsum # our final approximation of the areas
factor3 = broadcast_to(np.dot(Ims0 - Pm0, Km0.T), (N, m**2, M))
vecI = 0.5*(_kp(dW, dW) - _vec(h*Im)) + _dot(factor3, Atilde)
I = _unvec(vecI)
dW = dW.reshape((N, -1)) # change back to shape (N, m)
return (Atilde, I)
def Jwik(dW, h, n=5):
"""matrix J approximating repeated Stratonovich integrals for each of N
time intervals, using the method of Wiktorsson (2001).
Args:
dW (array of shape (N, m)): giving m independent Weiner increments for
each time step N. (You can make this array using sdeint.deltaW())
h (float): the time step size
n (int, optional): how many terms to take in the series expansion
Returns:
(Atilde, J) where
Atilde: array of shape (N,m(m-1)//2,1) giving the area integrals used.
J: array of shape (N, m, m) giving an m x m matrix of repeated
Stratonovich integral values for each of the N time intervals.
"""
m = dW.shape[1]
Atilde, I = Iwik(dW, h, n)
J = I + 0.5*h*np.eye(m).reshape((1, m, m))
return (Atilde, J)
|
mattja/sdeint | sdeint/wiener.py | _sigmainf | python | def _sigmainf(N, h, m, dW, Km0, Pm0):
M = m*(m-1)//2
Im = broadcast_to(np.eye(m), (N, m, m))
IM = broadcast_to(np.eye(M), (N, M, M))
Ims0 = np.eye(m**2)
factor1 = broadcast_to((2.0/h)*np.dot(Km0, Ims0 - Pm0), (N, M, m**2))
factor2 = _kp2(Im, _dot(dW, _t(dW)))
factor3 = broadcast_to(np.dot(Ims0 - Pm0, Km0.T), (N, m**2, M))
return 2*IM + _dot(_dot(factor1, factor2), factor3) | Asymptotic covariance matrix \Sigma_\infty Wiktorsson2001 eqn (4.5) | train | https://github.com/mattja/sdeint/blob/7cf807cdf97b3bb39d29e1c2dc834b519499b601/sdeint/wiener.py#L211-L220 | [
"def _t(a):\n \"\"\"transpose the last two axes of a three axis array\"\"\"\n return a.transpose((0, 2, 1))\n",
"def _dot(a, b):\n \"\"\" for rank 3 arrays a and b, return \\sum_k a_ij^k . b_ik^l (no sum on i)\n i.e. This is just normal matrix multiplication at each point on first axis\n \"\"\"\n ... | # Copyright 2015 Matthew J. Aburn
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version. See <http://www.gnu.org/licenses/>.
"""
Simulation of standard multiple stochastic integrals, both Ito and Stratonovich
I_{ij}(t) = \int_{0}^{t}\int_{0}^{s} dW_i(u) dW_j(s) (Ito)
J_{ij}(t) = \int_{0}^{t}\int_{0}^{s} \circ dW_i(u) \circ dW_j(s) (Stratonovich)
These multiple integrals I and J are important building blocks that will be
used by most of the higher-order algorithms that integrate multi-dimensional
SODEs.
We first implement the method of Kloeden, Platen and Wright (1992) to
approximate the integrals by the first n terms from the series expansion of a
Brownian bridge process. By default using n=5.
Finally we implement the method of Wiktorsson (2001) which improves on the
previous method by also approximating the tail-sum distribution by a
multivariate normal distribution.
References:
P. Kloeden, E. Platen and I. Wright (1992) The approximation of multiple
stochastic integrals
M. Wiktorsson (2001) Joint Characteristic Function and Simultaneous
Simulation of Iterated Ito Integrals for Multiple Independent Brownian
Motions
"""
import numpy as np
numpy_version = list(map(int, np.version.short_version.split('.')))
if numpy_version >= [1,10,0]:
broadcast_to = np.broadcast_to
else:
from ._broadcast import broadcast_to
def deltaW(N, m, h):
"""Generate sequence of Wiener increments for m independent Wiener
processes W_j(t) j=0..m-1 for each of N time intervals of length h.
Returns:
dW (array of shape (N, m)): The [n, j] element has the value
W_j((n+1)*h) - W_j(n*h)
"""
return np.random.normal(0.0, np.sqrt(h), (N, m))
def _t(a):
"""transpose the last two axes of a three axis array"""
return a.transpose((0, 2, 1))
def _dot(a, b):
""" for rank 3 arrays a and b, return \sum_k a_ij^k . b_ik^l (no sum on i)
i.e. This is just normal matrix multiplication at each point on first axis
"""
return np.einsum('ijk,ikl->ijl', a, b)
def _Aterm(N, h, m, k, dW):
"""kth term in the sum of Wiktorsson2001 equation (2.2)"""
sqrt2h = np.sqrt(2.0/h)
Xk = np.random.normal(0.0, 1.0, (N, m, 1))
Yk = np.random.normal(0.0, 1.0, (N, m, 1))
term1 = _dot(Xk, _t(Yk + sqrt2h*dW))
term2 = _dot(Yk + sqrt2h*dW, _t(Xk))
return (term1 - term2)/k
def Ikpw(dW, h, n=5):
"""matrix I approximating repeated Ito integrals for each of N time
intervals, based on the method of Kloeden, Platen and Wright (1992).
Args:
dW (array of shape (N, m)): giving m independent Weiner increments for
each time step N. (You can make this array using sdeint.deltaW())
h (float): the time step size
n (int, optional): how many terms to take in the series expansion
Returns:
(A, I) where
A: array of shape (N, m, m) giving the Levy areas that were used.
I: array of shape (N, m, m) giving an m x m matrix of repeated Ito
integral values for each of the N time intervals.
"""
N = dW.shape[0]
m = dW.shape[1]
if dW.ndim < 3:
dW = dW.reshape((N, -1, 1)) # change to array of shape (N, m, 1)
if dW.shape[2] != 1 or dW.ndim > 3:
raise(ValueError)
A = _Aterm(N, h, m, 1, dW)
for k in range(2, n+1):
A += _Aterm(N, h, m, k, dW)
A = (h/(2.0*np.pi))*A
I = 0.5*(_dot(dW, _t(dW)) - np.diag(h*np.ones(m))) + A
dW = dW.reshape((N, -1)) # change back to shape (N, m)
return (A, I)
def Jkpw(dW, h, n=5):
"""matrix J approximating repeated Stratonovich integrals for each of N
time intervals, based on the method of Kloeden, Platen and Wright (1992).
Args:
dW (array of shape (N, m)): giving m independent Weiner increments for
each time step N. (You can make this array using sdeint.deltaW())
h (float): the time step size
n (int, optional): how many terms to take in the series expansion
Returns:
(A, J) where
A: array of shape (N, m, m) giving the Levy areas that were used.
J: array of shape (N, m, m) giving an m x m matrix of repeated
Stratonovich integral values for each of the N time intervals.
"""
m = dW.shape[1]
A, I = Ikpw(dW, h, n)
J = I + 0.5*h*np.eye(m).reshape((1, m, m))
return (A, J)
# The code below this point implements the method of Wiktorsson2001.
def _vec(A):
"""
Linear operator _vec() from Wiktorsson2001 p478
Args:
A: a rank 3 array of shape N x m x n, giving a matrix A[j] for each
interval of time j in 0..N-1
Returns:
array of shape N x mn x 1, made by stacking the columns of matrix A[j] on
top of each other, for each j in 0..N-1
"""
N, m, n = A.shape
return A.reshape((N, m*n, 1), order='F')
def _unvec(vecA, m=None):
"""inverse of _vec() operator"""
N = vecA.shape[0]
if m is None:
m = np.sqrt(vecA.shape[1] + 0.25).astype(np.int64)
return vecA.reshape((N, m, -1), order='F')
def _kp(a, b):
"""Special case Kronecker tensor product of a[i] and b[i] at each
time interval i for i = 0 .. N-1
It is specialized for the case where both a and b are shape N x m x 1
"""
if a.shape != b.shape or a.shape[-1] != 1:
raise(ValueError)
N = a.shape[0]
# take the outer product over the last two axes, then reshape:
return np.einsum('ijk,ilk->ijkl', a, b).reshape(N, -1, 1)
def _kp2(A, B):
"""Special case Kronecker tensor product of A[i] and B[i] at each
time interval i for i = 0 .. N-1
Specialized for the case A and B rank 3 with A.shape[0]==B.shape[0]
"""
N = A.shape[0]
if B.shape[0] != N:
raise(ValueError)
newshape1 = A.shape[1]*B.shape[1]
return np.einsum('ijk,ilm->ijlkm', A, B).reshape(N, newshape1, -1)
def _P(m):
"""Returns m^2 x m^2 permutation matrix that swaps rows i and j where
j = 1 + m((i - 1) mod m) + (i - 1) div m, for i = 1 .. m^2
"""
P = np.zeros((m**2,m**2), dtype=np.int64)
for i in range(1, m**2 + 1):
j = 1 + m*((i - 1) % m) + (i - 1)//m
P[i-1, j-1] = 1
return P
def _K(m):
""" matrix K_m from Wiktorsson2001 """
M = m*(m - 1)//2
K = np.zeros((M, m**2), dtype=np.int64)
row = 0
for j in range(1, m):
col = (j - 1)*m + j
s = m - j
K[row:(row+s), col:(col+s)] = np.eye(s)
row += s
return K
def _AtildeTerm(N, h, m, k, dW, Km0, Pm0):
"""kth term in the sum for Atilde (Wiktorsson2001 p481, 1st eqn)"""
M = m*(m-1)//2
Xk = np.random.normal(0.0, 1.0, (N, m, 1))
Yk = np.random.normal(0.0, 1.0, (N, m, 1))
factor1 = np.dot(Km0, Pm0 - np.eye(m**2))
factor1 = broadcast_to(factor1, (N, M, m**2))
factor2 = _kp(Yk + np.sqrt(2.0/h)*dW, Xk)
return _dot(factor1, factor2)/k
def _a(n):
""" \sum_{n+1}^\infty 1/k^2 """
return np.pi**2/6.0 - sum(1.0/k**2 for k in range(1, n+1))
def Iwik(dW, h, n=5):
"""matrix I approximating repeated Ito integrals for each of N time
intervals, using the method of Wiktorsson (2001).
Args:
dW (array of shape (N, m)): giving m independent Weiner increments for
each time step N. (You can make this array using sdeint.deltaW())
h (float): the time step size
n (int, optional): how many terms to take in the series expansion
Returns:
(Atilde, I) where
Atilde: array of shape (N,m(m-1)//2,1) giving the area integrals used.
I: array of shape (N, m, m) giving an m x m matrix of repeated Ito
integral values for each of the N time intervals.
"""
N = dW.shape[0]
m = dW.shape[1]
if dW.ndim < 3:
dW = dW.reshape((N, -1, 1)) # change to array of shape (N, m, 1)
if dW.shape[2] != 1 or dW.ndim > 3:
raise(ValueError)
if m == 1:
return (np.zeros((N, 1, 1)), (dW*dW - h)/2.0)
Pm0 = _P(m)
Km0 = _K(m)
M = m*(m-1)//2
Atilde_n = _AtildeTerm(N, h, m, 1, dW, Km0, Pm0)
for k in range(2, n+1):
Atilde_n += _AtildeTerm(N, h, m, k, dW, Km0, Pm0)
Atilde_n = (h/(2.0*np.pi))*Atilde_n # approximation after n terms
S = _sigmainf(N, h, m, dW, Km0, Pm0)
normdW2 = np.sum(np.abs(dW)**2, axis=1)
radical = np.sqrt(1.0 + normdW2/h).reshape((N, 1, 1))
IM = broadcast_to(np.eye(M), (N, M, M))
Im = broadcast_to(np.eye(m), (N, m, m))
Ims0 = np.eye(m**2)
sqrtS = (S + 2.0*radical*IM)/(np.sqrt(2.0)*(1.0 + radical))
G = np.random.normal(0.0, 1.0, (N, M, 1))
tailsum = h/(2.0*np.pi)*_a(n)**0.5*_dot(sqrtS, G)
Atilde = Atilde_n + tailsum # our final approximation of the areas
factor3 = broadcast_to(np.dot(Ims0 - Pm0, Km0.T), (N, m**2, M))
vecI = 0.5*(_kp(dW, dW) - _vec(h*Im)) + _dot(factor3, Atilde)
I = _unvec(vecI)
dW = dW.reshape((N, -1)) # change back to shape (N, m)
return (Atilde, I)
def Jwik(dW, h, n=5):
"""matrix J approximating repeated Stratonovich integrals for each of N
time intervals, using the method of Wiktorsson (2001).
Args:
dW (array of shape (N, m)): giving m independent Weiner increments for
each time step N. (You can make this array using sdeint.deltaW())
h (float): the time step size
n (int, optional): how many terms to take in the series expansion
Returns:
(Atilde, J) where
Atilde: array of shape (N,m(m-1)//2,1) giving the area integrals used.
J: array of shape (N, m, m) giving an m x m matrix of repeated
Stratonovich integral values for each of the N time intervals.
"""
m = dW.shape[1]
Atilde, I = Iwik(dW, h, n)
J = I + 0.5*h*np.eye(m).reshape((1, m, m))
return (Atilde, J)
|
mattja/sdeint | sdeint/wiener.py | Iwik | python | def Iwik(dW, h, n=5):
N = dW.shape[0]
m = dW.shape[1]
if dW.ndim < 3:
dW = dW.reshape((N, -1, 1)) # change to array of shape (N, m, 1)
if dW.shape[2] != 1 or dW.ndim > 3:
raise(ValueError)
if m == 1:
return (np.zeros((N, 1, 1)), (dW*dW - h)/2.0)
Pm0 = _P(m)
Km0 = _K(m)
M = m*(m-1)//2
Atilde_n = _AtildeTerm(N, h, m, 1, dW, Km0, Pm0)
for k in range(2, n+1):
Atilde_n += _AtildeTerm(N, h, m, k, dW, Km0, Pm0)
Atilde_n = (h/(2.0*np.pi))*Atilde_n # approximation after n terms
S = _sigmainf(N, h, m, dW, Km0, Pm0)
normdW2 = np.sum(np.abs(dW)**2, axis=1)
radical = np.sqrt(1.0 + normdW2/h).reshape((N, 1, 1))
IM = broadcast_to(np.eye(M), (N, M, M))
Im = broadcast_to(np.eye(m), (N, m, m))
Ims0 = np.eye(m**2)
sqrtS = (S + 2.0*radical*IM)/(np.sqrt(2.0)*(1.0 + radical))
G = np.random.normal(0.0, 1.0, (N, M, 1))
tailsum = h/(2.0*np.pi)*_a(n)**0.5*_dot(sqrtS, G)
Atilde = Atilde_n + tailsum # our final approximation of the areas
factor3 = broadcast_to(np.dot(Ims0 - Pm0, Km0.T), (N, m**2, M))
vecI = 0.5*(_kp(dW, dW) - _vec(h*Im)) + _dot(factor3, Atilde)
I = _unvec(vecI)
dW = dW.reshape((N, -1)) # change back to shape (N, m)
return (Atilde, I) | matrix I approximating repeated Ito integrals for each of N time
intervals, using the method of Wiktorsson (2001).
Args:
dW (array of shape (N, m)): giving m independent Weiner increments for
each time step N. (You can make this array using sdeint.deltaW())
h (float): the time step size
n (int, optional): how many terms to take in the series expansion
Returns:
(Atilde, I) where
Atilde: array of shape (N,m(m-1)//2,1) giving the area integrals used.
I: array of shape (N, m, m) giving an m x m matrix of repeated Ito
integral values for each of the N time intervals. | train | https://github.com/mattja/sdeint/blob/7cf807cdf97b3bb39d29e1c2dc834b519499b601/sdeint/wiener.py#L228-L273 | [
"def _dot(a, b):\n \"\"\" for rank 3 arrays a and b, return \\sum_k a_ij^k . b_ik^l (no sum on i)\n i.e. This is just normal matrix multiplication at each point on first axis\n \"\"\"\n return np.einsum('ijk,ikl->ijl', a, b)\n",
"def _vec(A):\n \"\"\"\n Linear operator _vec() from Wiktorsson2001... | # Copyright 2015 Matthew J. Aburn
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version. See <http://www.gnu.org/licenses/>.
"""
Simulation of standard multiple stochastic integrals, both Ito and Stratonovich
I_{ij}(t) = \int_{0}^{t}\int_{0}^{s} dW_i(u) dW_j(s) (Ito)
J_{ij}(t) = \int_{0}^{t}\int_{0}^{s} \circ dW_i(u) \circ dW_j(s) (Stratonovich)
These multiple integrals I and J are important building blocks that will be
used by most of the higher-order algorithms that integrate multi-dimensional
SODEs.
We first implement the method of Kloeden, Platen and Wright (1992) to
approximate the integrals by the first n terms from the series expansion of a
Brownian bridge process. By default using n=5.
Finally we implement the method of Wiktorsson (2001) which improves on the
previous method by also approximating the tail-sum distribution by a
multivariate normal distribution.
References:
P. Kloeden, E. Platen and I. Wright (1992) The approximation of multiple
stochastic integrals
M. Wiktorsson (2001) Joint Characteristic Function and Simultaneous
Simulation of Iterated Ito Integrals for Multiple Independent Brownian
Motions
"""
import numpy as np
numpy_version = list(map(int, np.version.short_version.split('.')))
if numpy_version >= [1,10,0]:
broadcast_to = np.broadcast_to
else:
from ._broadcast import broadcast_to
def deltaW(N, m, h):
"""Generate sequence of Wiener increments for m independent Wiener
processes W_j(t) j=0..m-1 for each of N time intervals of length h.
Returns:
dW (array of shape (N, m)): The [n, j] element has the value
W_j((n+1)*h) - W_j(n*h)
"""
return np.random.normal(0.0, np.sqrt(h), (N, m))
def _t(a):
"""transpose the last two axes of a three axis array"""
return a.transpose((0, 2, 1))
def _dot(a, b):
""" for rank 3 arrays a and b, return \sum_k a_ij^k . b_ik^l (no sum on i)
i.e. This is just normal matrix multiplication at each point on first axis
"""
return np.einsum('ijk,ikl->ijl', a, b)
def _Aterm(N, h, m, k, dW):
"""kth term in the sum of Wiktorsson2001 equation (2.2)"""
sqrt2h = np.sqrt(2.0/h)
Xk = np.random.normal(0.0, 1.0, (N, m, 1))
Yk = np.random.normal(0.0, 1.0, (N, m, 1))
term1 = _dot(Xk, _t(Yk + sqrt2h*dW))
term2 = _dot(Yk + sqrt2h*dW, _t(Xk))
return (term1 - term2)/k
def Ikpw(dW, h, n=5):
"""matrix I approximating repeated Ito integrals for each of N time
intervals, based on the method of Kloeden, Platen and Wright (1992).
Args:
dW (array of shape (N, m)): giving m independent Weiner increments for
each time step N. (You can make this array using sdeint.deltaW())
h (float): the time step size
n (int, optional): how many terms to take in the series expansion
Returns:
(A, I) where
A: array of shape (N, m, m) giving the Levy areas that were used.
I: array of shape (N, m, m) giving an m x m matrix of repeated Ito
integral values for each of the N time intervals.
"""
N = dW.shape[0]
m = dW.shape[1]
if dW.ndim < 3:
dW = dW.reshape((N, -1, 1)) # change to array of shape (N, m, 1)
if dW.shape[2] != 1 or dW.ndim > 3:
raise(ValueError)
A = _Aterm(N, h, m, 1, dW)
for k in range(2, n+1):
A += _Aterm(N, h, m, k, dW)
A = (h/(2.0*np.pi))*A
I = 0.5*(_dot(dW, _t(dW)) - np.diag(h*np.ones(m))) + A
dW = dW.reshape((N, -1)) # change back to shape (N, m)
return (A, I)
def Jkpw(dW, h, n=5):
"""matrix J approximating repeated Stratonovich integrals for each of N
time intervals, based on the method of Kloeden, Platen and Wright (1992).
Args:
dW (array of shape (N, m)): giving m independent Weiner increments for
each time step N. (You can make this array using sdeint.deltaW())
h (float): the time step size
n (int, optional): how many terms to take in the series expansion
Returns:
(A, J) where
A: array of shape (N, m, m) giving the Levy areas that were used.
J: array of shape (N, m, m) giving an m x m matrix of repeated
Stratonovich integral values for each of the N time intervals.
"""
m = dW.shape[1]
A, I = Ikpw(dW, h, n)
J = I + 0.5*h*np.eye(m).reshape((1, m, m))
return (A, J)
# The code below this point implements the method of Wiktorsson2001.
def _vec(A):
"""
Linear operator _vec() from Wiktorsson2001 p478
Args:
A: a rank 3 array of shape N x m x n, giving a matrix A[j] for each
interval of time j in 0..N-1
Returns:
array of shape N x mn x 1, made by stacking the columns of matrix A[j] on
top of each other, for each j in 0..N-1
"""
N, m, n = A.shape
return A.reshape((N, m*n, 1), order='F')
def _unvec(vecA, m=None):
"""inverse of _vec() operator"""
N = vecA.shape[0]
if m is None:
m = np.sqrt(vecA.shape[1] + 0.25).astype(np.int64)
return vecA.reshape((N, m, -1), order='F')
def _kp(a, b):
"""Special case Kronecker tensor product of a[i] and b[i] at each
time interval i for i = 0 .. N-1
It is specialized for the case where both a and b are shape N x m x 1
"""
if a.shape != b.shape or a.shape[-1] != 1:
raise(ValueError)
N = a.shape[0]
# take the outer product over the last two axes, then reshape:
return np.einsum('ijk,ilk->ijkl', a, b).reshape(N, -1, 1)
def _kp2(A, B):
"""Special case Kronecker tensor product of A[i] and B[i] at each
time interval i for i = 0 .. N-1
Specialized for the case A and B rank 3 with A.shape[0]==B.shape[0]
"""
N = A.shape[0]
if B.shape[0] != N:
raise(ValueError)
newshape1 = A.shape[1]*B.shape[1]
return np.einsum('ijk,ilm->ijlkm', A, B).reshape(N, newshape1, -1)
def _P(m):
"""Returns m^2 x m^2 permutation matrix that swaps rows i and j where
j = 1 + m((i - 1) mod m) + (i - 1) div m, for i = 1 .. m^2
"""
P = np.zeros((m**2,m**2), dtype=np.int64)
for i in range(1, m**2 + 1):
j = 1 + m*((i - 1) % m) + (i - 1)//m
P[i-1, j-1] = 1
return P
def _K(m):
""" matrix K_m from Wiktorsson2001 """
M = m*(m - 1)//2
K = np.zeros((M, m**2), dtype=np.int64)
row = 0
for j in range(1, m):
col = (j - 1)*m + j
s = m - j
K[row:(row+s), col:(col+s)] = np.eye(s)
row += s
return K
def _AtildeTerm(N, h, m, k, dW, Km0, Pm0):
"""kth term in the sum for Atilde (Wiktorsson2001 p481, 1st eqn)"""
M = m*(m-1)//2
Xk = np.random.normal(0.0, 1.0, (N, m, 1))
Yk = np.random.normal(0.0, 1.0, (N, m, 1))
factor1 = np.dot(Km0, Pm0 - np.eye(m**2))
factor1 = broadcast_to(factor1, (N, M, m**2))
factor2 = _kp(Yk + np.sqrt(2.0/h)*dW, Xk)
return _dot(factor1, factor2)/k
def _sigmainf(N, h, m, dW, Km0, Pm0):
"""Asymptotic covariance matrix \Sigma_\infty Wiktorsson2001 eqn (4.5)"""
M = m*(m-1)//2
Im = broadcast_to(np.eye(m), (N, m, m))
IM = broadcast_to(np.eye(M), (N, M, M))
Ims0 = np.eye(m**2)
factor1 = broadcast_to((2.0/h)*np.dot(Km0, Ims0 - Pm0), (N, M, m**2))
factor2 = _kp2(Im, _dot(dW, _t(dW)))
factor3 = broadcast_to(np.dot(Ims0 - Pm0, Km0.T), (N, m**2, M))
return 2*IM + _dot(_dot(factor1, factor2), factor3)
def _a(n):
""" \sum_{n+1}^\infty 1/k^2 """
return np.pi**2/6.0 - sum(1.0/k**2 for k in range(1, n+1))
def Jwik(dW, h, n=5):
"""matrix J approximating repeated Stratonovich integrals for each of N
time intervals, using the method of Wiktorsson (2001).
Args:
dW (array of shape (N, m)): giving m independent Weiner increments for
each time step N. (You can make this array using sdeint.deltaW())
h (float): the time step size
n (int, optional): how many terms to take in the series expansion
Returns:
(Atilde, J) where
Atilde: array of shape (N,m(m-1)//2,1) giving the area integrals used.
J: array of shape (N, m, m) giving an m x m matrix of repeated
Stratonovich integral values for each of the N time intervals.
"""
m = dW.shape[1]
Atilde, I = Iwik(dW, h, n)
J = I + 0.5*h*np.eye(m).reshape((1, m, m))
return (Atilde, J)
|
mattja/sdeint | sdeint/wiener.py | Jwik | python | def Jwik(dW, h, n=5):
m = dW.shape[1]
Atilde, I = Iwik(dW, h, n)
J = I + 0.5*h*np.eye(m).reshape((1, m, m))
return (Atilde, J) | matrix J approximating repeated Stratonovich integrals for each of N
time intervals, using the method of Wiktorsson (2001).
Args:
dW (array of shape (N, m)): giving m independent Weiner increments for
each time step N. (You can make this array using sdeint.deltaW())
h (float): the time step size
n (int, optional): how many terms to take in the series expansion
Returns:
(Atilde, J) where
Atilde: array of shape (N,m(m-1)//2,1) giving the area integrals used.
J: array of shape (N, m, m) giving an m x m matrix of repeated
Stratonovich integral values for each of the N time intervals. | train | https://github.com/mattja/sdeint/blob/7cf807cdf97b3bb39d29e1c2dc834b519499b601/sdeint/wiener.py#L276-L295 | [
"def Iwik(dW, h, n=5):\n \"\"\"matrix I approximating repeated Ito integrals for each of N time\n intervals, using the method of Wiktorsson (2001).\n\n Args:\n dW (array of shape (N, m)): giving m independent Weiner increments for\n each time step N. (You can make this array using sdeint.deltaW... | # Copyright 2015 Matthew J. Aburn
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version. See <http://www.gnu.org/licenses/>.
"""
Simulation of standard multiple stochastic integrals, both Ito and Stratonovich
I_{ij}(t) = \int_{0}^{t}\int_{0}^{s} dW_i(u) dW_j(s) (Ito)
J_{ij}(t) = \int_{0}^{t}\int_{0}^{s} \circ dW_i(u) \circ dW_j(s) (Stratonovich)
These multiple integrals I and J are important building blocks that will be
used by most of the higher-order algorithms that integrate multi-dimensional
SODEs.
We first implement the method of Kloeden, Platen and Wright (1992) to
approximate the integrals by the first n terms from the series expansion of a
Brownian bridge process. By default using n=5.
Finally we implement the method of Wiktorsson (2001) which improves on the
previous method by also approximating the tail-sum distribution by a
multivariate normal distribution.
References:
P. Kloeden, E. Platen and I. Wright (1992) The approximation of multiple
stochastic integrals
M. Wiktorsson (2001) Joint Characteristic Function and Simultaneous
Simulation of Iterated Ito Integrals for Multiple Independent Brownian
Motions
"""
import numpy as np
numpy_version = list(map(int, np.version.short_version.split('.')))
if numpy_version >= [1,10,0]:
broadcast_to = np.broadcast_to
else:
from ._broadcast import broadcast_to
def deltaW(N, m, h):
"""Generate sequence of Wiener increments for m independent Wiener
processes W_j(t) j=0..m-1 for each of N time intervals of length h.
Returns:
dW (array of shape (N, m)): The [n, j] element has the value
W_j((n+1)*h) - W_j(n*h)
"""
return np.random.normal(0.0, np.sqrt(h), (N, m))
def _t(a):
"""transpose the last two axes of a three axis array"""
return a.transpose((0, 2, 1))
def _dot(a, b):
""" for rank 3 arrays a and b, return \sum_k a_ij^k . b_ik^l (no sum on i)
i.e. This is just normal matrix multiplication at each point on first axis
"""
return np.einsum('ijk,ikl->ijl', a, b)
def _Aterm(N, h, m, k, dW):
"""kth term in the sum of Wiktorsson2001 equation (2.2)"""
sqrt2h = np.sqrt(2.0/h)
Xk = np.random.normal(0.0, 1.0, (N, m, 1))
Yk = np.random.normal(0.0, 1.0, (N, m, 1))
term1 = _dot(Xk, _t(Yk + sqrt2h*dW))
term2 = _dot(Yk + sqrt2h*dW, _t(Xk))
return (term1 - term2)/k
def Ikpw(dW, h, n=5):
"""matrix I approximating repeated Ito integrals for each of N time
intervals, based on the method of Kloeden, Platen and Wright (1992).
Args:
dW (array of shape (N, m)): giving m independent Weiner increments for
each time step N. (You can make this array using sdeint.deltaW())
h (float): the time step size
n (int, optional): how many terms to take in the series expansion
Returns:
(A, I) where
A: array of shape (N, m, m) giving the Levy areas that were used.
I: array of shape (N, m, m) giving an m x m matrix of repeated Ito
integral values for each of the N time intervals.
"""
N = dW.shape[0]
m = dW.shape[1]
if dW.ndim < 3:
dW = dW.reshape((N, -1, 1)) # change to array of shape (N, m, 1)
if dW.shape[2] != 1 or dW.ndim > 3:
raise(ValueError)
A = _Aterm(N, h, m, 1, dW)
for k in range(2, n+1):
A += _Aterm(N, h, m, k, dW)
A = (h/(2.0*np.pi))*A
I = 0.5*(_dot(dW, _t(dW)) - np.diag(h*np.ones(m))) + A
dW = dW.reshape((N, -1)) # change back to shape (N, m)
return (A, I)
def Jkpw(dW, h, n=5):
"""matrix J approximating repeated Stratonovich integrals for each of N
time intervals, based on the method of Kloeden, Platen and Wright (1992).
Args:
dW (array of shape (N, m)): giving m independent Weiner increments for
each time step N. (You can make this array using sdeint.deltaW())
h (float): the time step size
n (int, optional): how many terms to take in the series expansion
Returns:
(A, J) where
A: array of shape (N, m, m) giving the Levy areas that were used.
J: array of shape (N, m, m) giving an m x m matrix of repeated
Stratonovich integral values for each of the N time intervals.
"""
m = dW.shape[1]
A, I = Ikpw(dW, h, n)
J = I + 0.5*h*np.eye(m).reshape((1, m, m))
return (A, J)
# The code below this point implements the method of Wiktorsson2001.
def _vec(A):
"""
Linear operator _vec() from Wiktorsson2001 p478
Args:
A: a rank 3 array of shape N x m x n, giving a matrix A[j] for each
interval of time j in 0..N-1
Returns:
array of shape N x mn x 1, made by stacking the columns of matrix A[j] on
top of each other, for each j in 0..N-1
"""
N, m, n = A.shape
return A.reshape((N, m*n, 1), order='F')
def _unvec(vecA, m=None):
"""inverse of _vec() operator"""
N = vecA.shape[0]
if m is None:
m = np.sqrt(vecA.shape[1] + 0.25).astype(np.int64)
return vecA.reshape((N, m, -1), order='F')
def _kp(a, b):
"""Special case Kronecker tensor product of a[i] and b[i] at each
time interval i for i = 0 .. N-1
It is specialized for the case where both a and b are shape N x m x 1
"""
if a.shape != b.shape or a.shape[-1] != 1:
raise(ValueError)
N = a.shape[0]
# take the outer product over the last two axes, then reshape:
return np.einsum('ijk,ilk->ijkl', a, b).reshape(N, -1, 1)
def _kp2(A, B):
"""Special case Kronecker tensor product of A[i] and B[i] at each
time interval i for i = 0 .. N-1
Specialized for the case A and B rank 3 with A.shape[0]==B.shape[0]
"""
N = A.shape[0]
if B.shape[0] != N:
raise(ValueError)
newshape1 = A.shape[1]*B.shape[1]
return np.einsum('ijk,ilm->ijlkm', A, B).reshape(N, newshape1, -1)
def _P(m):
"""Returns m^2 x m^2 permutation matrix that swaps rows i and j where
j = 1 + m((i - 1) mod m) + (i - 1) div m, for i = 1 .. m^2
"""
P = np.zeros((m**2,m**2), dtype=np.int64)
for i in range(1, m**2 + 1):
j = 1 + m*((i - 1) % m) + (i - 1)//m
P[i-1, j-1] = 1
return P
def _K(m):
""" matrix K_m from Wiktorsson2001 """
M = m*(m - 1)//2
K = np.zeros((M, m**2), dtype=np.int64)
row = 0
for j in range(1, m):
col = (j - 1)*m + j
s = m - j
K[row:(row+s), col:(col+s)] = np.eye(s)
row += s
return K
def _AtildeTerm(N, h, m, k, dW, Km0, Pm0):
"""kth term in the sum for Atilde (Wiktorsson2001 p481, 1st eqn)"""
M = m*(m-1)//2
Xk = np.random.normal(0.0, 1.0, (N, m, 1))
Yk = np.random.normal(0.0, 1.0, (N, m, 1))
factor1 = np.dot(Km0, Pm0 - np.eye(m**2))
factor1 = broadcast_to(factor1, (N, M, m**2))
factor2 = _kp(Yk + np.sqrt(2.0/h)*dW, Xk)
return _dot(factor1, factor2)/k
def _sigmainf(N, h, m, dW, Km0, Pm0):
"""Asymptotic covariance matrix \Sigma_\infty Wiktorsson2001 eqn (4.5)"""
M = m*(m-1)//2
Im = broadcast_to(np.eye(m), (N, m, m))
IM = broadcast_to(np.eye(M), (N, M, M))
Ims0 = np.eye(m**2)
factor1 = broadcast_to((2.0/h)*np.dot(Km0, Ims0 - Pm0), (N, M, m**2))
factor2 = _kp2(Im, _dot(dW, _t(dW)))
factor3 = broadcast_to(np.dot(Ims0 - Pm0, Km0.T), (N, m**2, M))
return 2*IM + _dot(_dot(factor1, factor2), factor3)
def _a(n):
""" \sum_{n+1}^\infty 1/k^2 """
return np.pi**2/6.0 - sum(1.0/k**2 for k in range(1, n+1))
def Iwik(dW, h, n=5):
"""matrix I approximating repeated Ito integrals for each of N time
intervals, using the method of Wiktorsson (2001).
Args:
dW (array of shape (N, m)): giving m independent Weiner increments for
each time step N. (You can make this array using sdeint.deltaW())
h (float): the time step size
n (int, optional): how many terms to take in the series expansion
Returns:
(Atilde, I) where
Atilde: array of shape (N,m(m-1)//2,1) giving the area integrals used.
I: array of shape (N, m, m) giving an m x m matrix of repeated Ito
integral values for each of the N time intervals.
"""
N = dW.shape[0]
m = dW.shape[1]
if dW.ndim < 3:
dW = dW.reshape((N, -1, 1)) # change to array of shape (N, m, 1)
if dW.shape[2] != 1 or dW.ndim > 3:
raise(ValueError)
if m == 1:
return (np.zeros((N, 1, 1)), (dW*dW - h)/2.0)
Pm0 = _P(m)
Km0 = _K(m)
M = m*(m-1)//2
Atilde_n = _AtildeTerm(N, h, m, 1, dW, Km0, Pm0)
for k in range(2, n+1):
Atilde_n += _AtildeTerm(N, h, m, k, dW, Km0, Pm0)
Atilde_n = (h/(2.0*np.pi))*Atilde_n # approximation after n terms
S = _sigmainf(N, h, m, dW, Km0, Pm0)
normdW2 = np.sum(np.abs(dW)**2, axis=1)
radical = np.sqrt(1.0 + normdW2/h).reshape((N, 1, 1))
IM = broadcast_to(np.eye(M), (N, M, M))
Im = broadcast_to(np.eye(m), (N, m, m))
Ims0 = np.eye(m**2)
sqrtS = (S + 2.0*radical*IM)/(np.sqrt(2.0)*(1.0 + radical))
G = np.random.normal(0.0, 1.0, (N, M, 1))
tailsum = h/(2.0*np.pi)*_a(n)**0.5*_dot(sqrtS, G)
Atilde = Atilde_n + tailsum # our final approximation of the areas
factor3 = broadcast_to(np.dot(Ims0 - Pm0, Km0.T), (N, m**2, M))
vecI = 0.5*(_kp(dW, dW) - _vec(h*Im)) + _dot(factor3, Atilde)
I = _unvec(vecI)
dW = dW.reshape((N, -1)) # change back to shape (N, m)
return (Atilde, I)
|
mattja/sdeint | sdeint/integrate.py | _check_args | python | def _check_args(f, G, y0, tspan, dW=None, IJ=None):
if not np.isclose(min(np.diff(tspan)), max(np.diff(tspan))):
raise SDEValueError('Currently time steps must be equally spaced.')
# Be flexible to allow scalar equations. convert them to a 1D vector system
if isinstance(y0, numbers.Number):
if isinstance(y0, numbers.Integral):
numtype = np.float64
else:
numtype = type(y0)
y0_orig = y0
y0 = np.array([y0], dtype=numtype)
def make_vector_fn(fn):
def newfn(y, t):
return np.array([fn(y[0], t)], dtype=numtype)
newfn.__name__ = fn.__name__
return newfn
def make_matrix_fn(fn):
def newfn(y, t):
return np.array([[fn(y[0], t)]], dtype=numtype)
newfn.__name__ = fn.__name__
return newfn
if isinstance(f(y0_orig, tspan[0]), numbers.Number):
f = make_vector_fn(f)
if isinstance(G(y0_orig, tspan[0]), numbers.Number):
G = make_matrix_fn(G)
# determine dimension d of the system
d = len(y0)
if len(f(y0, tspan[0])) != d:
raise SDEValueError('y0 and f have incompatible shapes.')
message = """y0 has length %d. So G must either be a single function
returning a matrix of shape (%d, m), or else a list of m separate
functions each returning a column of G, with shape (%d,)""" % (
d, d, d)
if callable(G):
# then G must be a function returning a d x m matrix
Gtest = G(y0, tspan[0])
if Gtest.ndim != 2 or Gtest.shape[0] != d:
raise SDEValueError(message)
# determine number of independent Wiener processes m
m = Gtest.shape[1]
else:
# G should be a list of m functions g_i giving columns of G
G = tuple(G)
m = len(G)
Gtest = np.zeros((d, m))
for k in range(0, m):
if not callable(G[k]):
raise SDEValueError(message)
Gtestk = G[k](y0, tspan[0])
if np.shape(Gtestk) != (d,):
raise SDEValueError(message)
Gtest[:,k] = Gtestk
message = """From function G, it seems m==%d. If present, the optional
parameter dW must be an array of shape (len(tspan)-1, m) giving
m independent Wiener increments for each time interval.""" % m
if dW is not None:
if not hasattr(dW, 'shape') or dW.shape != (len(tspan) - 1, m):
raise SDEValueError(message)
message = """From function G, it seems m==%d. If present, the optional
parameter I or J must be an array of shape (len(tspan)-1, m, m)
giving an m x m matrix of repeated integral values for each
time interval.""" % m
if IJ is not None:
if not hasattr(IJ, 'shape') or IJ.shape != (len(tspan) - 1, m, m):
raise SDEValueError(message)
return (d, m, f, G, y0, tspan, dW, IJ) | Do some validation common to all algorithms. Find dimension d and number
of Wiener processes m. | train | https://github.com/mattja/sdeint/blob/7cf807cdf97b3bb39d29e1c2dc834b519499b601/sdeint/integrate.py#L47-L115 | [
"f = lambda y, t: np.array([1.0, 2.0, 3.0, 4.0])\n",
"G = lambda y, t: np.ones((3, 3))\n",
"f = lambda y, t: -1.0 * y\n",
"G = lambda y, t: 0.2\n",
"f = lambda y, t: -1.0 * y\n",
"G = lambda y, t: 0.2\n",
"def f(y, t):\n return np.array([ -1.0*y[0],\n y[2],\n ... | # Copyright 2015 Matthew J. Aburn
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version. See <http://www.gnu.org/licenses/>.
"""Numerical integration algorithms for Ito and Stratonovich stochastic
ordinary differential equations.
Usage:
itoint(f, G, y0, tspan) for Ito equation dy = f dt + G dW
stratint(f, G, y0, tspan) for Stratonovich equation dy = f dt + G \circ dW
y0 is the initial value
tspan is an array of time values (currently these must be equally spaced)
function f is the deterministic part of the system (scalar or dx1 vector)
function G is the stochastic part of the system (scalar or d x m matrix)
sdeint will choose an algorithm for you. Or you can choose one explicitly:
itoEuler: the Euler-Maruyama algorithm for Ito equations.
stratHeun: the Stratonovich Heun algorithm for Stratonovich equations.
itoSRI2: the Roessler2010 order 1.0 strong Stochastic Runge-Kutta
algorithm SRI2 for Ito equations.
stratSRS2: the Roessler2010 order 1.0 strong Stochastic Runge-Kutta
algorithm SRS2 for Stratonovich equations.
stratKP2iS: the Kloeden and Platen two-step implicit order 1.0 strong algorithm
for Stratonovich equations.
"""
from __future__ import absolute_import
from .wiener import deltaW, Ikpw, Iwik, Jkpw, Jwik
import numpy as np
import numbers
class Error(Exception):
pass
class SDEValueError(Error):
"""Thrown if integration arguments fail some basic sanity checks"""
pass
def itoint(f, G, y0, tspan):
""" Numerically integrate the Ito equation dy = f(y,t)dt + G(y,t)dW
where y is the d-dimensional state vector, f is a vector-valued function,
G is an d x m matrix-valued function giving the noise coefficients and
dW(t) = (dW_1, dW_2, ... dW_m) is a vector of independent Wiener increments
Args:
f: callable(y,t) returning a numpy array of shape (d,)
Vector-valued function to define the deterministic part of the system
G: callable(y,t) returning a numpy array of shape (d,m)
Matrix-valued function to define the noise coefficients of the system
y0: array of shape (d,) giving the initial state vector y(t==0)
tspan (array): The sequence of time points for which to solve for y.
These must be equally spaced, e.g. np.arange(0,10,0.005)
tspan[0] is the intial time corresponding to the initial state y0.
Returns:
y: array, with shape (len(tspan), len(y0))
With the initial value y0 in the first row
Raises:
SDEValueError
"""
# In future versions we can automatically choose here the most suitable
# Ito algorithm based on properties of the system and noise.
(d, m, f, G, y0, tspan, __, __) = _check_args(f, G, y0, tspan, None, None)
chosenAlgorithm = itoSRI2
return chosenAlgorithm(f, G, y0, tspan)
def stratint(f, G, y0, tspan):
""" Numerically integrate Stratonovich equation dy = f(y,t)dt + G(y,t).dW
where y is the d-dimensional state vector, f is a vector-valued function,
G is an d x m matrix-valued function giving the noise coefficients and
dW(t) = (dW_1, dW_2, ... dW_m) is a vector of independent Wiener increments
Args:
f: callable(y,t) returning a numpy array of shape (d,)
Vector-valued function to define the deterministic part of the system
G: callable(y,t) returning a numpy array of shape (d,m)
Matrix-valued function to define the noise coefficients of the system
y0: array of shape (d,) giving the initial state vector y(t==0)
tspan (array): The sequence of time points for which to solve for y.
These must be equally spaced, e.g. np.arange(0,10,0.005)
tspan[0] is the intial time corresponding to the initial state y0.
Returns:
y: array, with shape (len(tspan), len(y0))
With the initial value y0 in the first row
Raises:
SDEValueError
"""
# In future versions we can automatically choose here the most suitable
# Stratonovich algorithm based on properties of the system and noise.
(d, m, f, G, y0, tspan, __, __) = _check_args(f, G, y0, tspan, None, None)
chosenAlgorithm = stratSRS2
return chosenAlgorithm(f, G, y0, tspan)
def itoEuler(f, G, y0, tspan, dW=None):
"""Use the Euler-Maruyama algorithm to integrate the Ito equation
dy = f(y,t)dt + G(y,t) dW(t)
where y is the d-dimensional state vector, f is a vector-valued function,
G is an d x m matrix-valued function giving the noise coefficients and
dW(t) = (dW_1, dW_2, ... dW_m) is a vector of independent Wiener increments
Args:
f: callable(y, t) returning (d,) array
Vector-valued function to define the deterministic part of the system
G: callable(y, t) returning (d,m) array
Matrix-valued function to define the noise coefficients of the system
y0: array of shape (d,) giving the initial state vector y(t==0)
tspan (array): The sequence of time points for which to solve for y.
These must be equally spaced, e.g. np.arange(0,10,0.005)
tspan[0] is the intial time corresponding to the initial state y0.
dW: optional array of shape (len(tspan)-1, d). This is for advanced use,
if you want to use a specific realization of the d independent Wiener
processes. If not provided Wiener increments will be generated randomly
Returns:
y: array, with shape (len(tspan), len(y0))
With the initial value y0 in the first row
Raises:
SDEValueError
See also:
G. Maruyama (1955) Continuous Markov processes and stochastic equations
Kloeden and Platen (1999) Numerical Solution of Differential Equations
"""
(d, m, f, G, y0, tspan, dW, __) = _check_args(f, G, y0, tspan, dW, None)
N = len(tspan)
h = (tspan[N-1] - tspan[0])/(N - 1)
# allocate space for result
y = np.zeros((N, d), dtype=type(y0[0]))
if dW is None:
# pre-generate Wiener increments (for m independent Wiener processes):
dW = deltaW(N - 1, m, h)
y[0] = y0;
for n in range(0, N-1):
tn = tspan[n]
yn = y[n]
dWn = dW[n,:]
y[n+1] = yn + f(yn, tn)*h + G(yn, tn).dot(dWn)
return y
def stratHeun(f, G, y0, tspan, dW=None):
"""Use the Stratonovich Heun algorithm to integrate Stratonovich equation
dy = f(y,t)dt + G(y,t) \circ dW(t)
where y is the d-dimensional state vector, f is a vector-valued function,
G is an d x m matrix-valued function giving the noise coefficients and
dW(t) = (dW_1, dW_2, ... dW_m) is a vector of independent Wiener increments
Args:
f: callable(y, t) returning (d,) array
Vector-valued function to define the deterministic part of the system
G: callable(y, t) returning (d,m) array
Matrix-valued function to define the noise coefficients of the system
y0: array of shape (d,) giving the initial state vector y(t==0)
tspan (array): The sequence of time points for which to solve for y.
These must be equally spaced, e.g. np.arange(0,10,0.005)
tspan[0] is the intial time corresponding to the initial state y0.
dW: optional array of shape (len(tspan)-1, d). This is for advanced use,
if you want to use a specific realization of the d independent Wiener
processes. If not provided Wiener increments will be generated randomly
Returns:
y: array, with shape (len(tspan), len(y0))
With the initial value y0 in the first row
Raises:
SDEValueError
See also:
W. Rumelin (1982) Numerical Treatment of Stochastic Differential
Equations
R. Mannella (2002) Integration of Stochastic Differential Equations
on a Computer
K. Burrage, P. M. Burrage and T. Tian (2004) Numerical methods for strong
solutions of stochastic differential equations: an overview
"""
(d, m, f, G, y0, tspan, dW, __) = _check_args(f, G, y0, tspan, dW, None)
N = len(tspan)
h = (tspan[N-1] - tspan[0])/(N - 1)
# allocate space for result
y = np.zeros((N, d), dtype=type(y0[0]))
if dW is None:
# pre-generate Wiener increments (for m independent Wiener processes):
dW = deltaW(N - 1, m, h)
y[0] = y0;
for n in range(0, N-1):
tn = tspan[n]
tnp1 = tspan[n+1]
yn = y[n]
dWn = dW[n,:]
fn = f(yn, tn)
Gn = G(yn, tn)
ybar = yn + fn*h + Gn.dot(dWn)
fnbar = f(ybar, tnp1)
Gnbar = G(ybar, tnp1)
y[n+1] = yn + 0.5*(fn + fnbar)*h + 0.5*(Gn + Gnbar).dot(dWn)
return y
def itoSRI2(f, G, y0, tspan, Imethod=Ikpw, dW=None, I=None):
"""Use the Roessler2010 order 1.0 strong Stochastic Runge-Kutta algorithm
SRI2 to integrate an Ito equation dy = f(y,t)dt + G(y,t)dW(t)
where y is d-dimensional vector variable, f is a vector-valued function,
G is a d x m matrix-valued function giving the noise coefficients and
dW(t) is a vector of m independent Wiener increments.
This algorithm is suitable for Ito systems with an arbitrary noise
coefficient matrix G (i.e. the noise does not need to be scalar, diagonal,
or commutative). The algorithm has order 2.0 convergence for the
deterministic part alone and order 1.0 strong convergence for the complete
stochastic system.
Args:
f: A function f(y, t) returning an array of shape (d,)
Vector-valued function to define the deterministic part of the system
G: The d x m coefficient function G can be given in two different ways:
You can provide a single function G(y, t) that returns an array of
shape (d, m). In this case the entire matrix G() will be evaluated
2m+1 times at each time step so complexity grows quadratically with m.
Alternatively you can provide a list of m functions g(y, t) each
defining one column of G (each returning an array of shape (d,).
In this case each g will be evaluated 3 times at each time step so
complexity grows linearly with m. If your system has large m and
G involves complicated functions, consider using this way.
y0: array of shape (d,) giving the initial state vector y(t==0)
tspan (array): The sequence of time points for which to solve for y.
These must be equally spaced, e.g. np.arange(0,10,0.005)
tspan[0] is the intial time corresponding to the initial state y0.
Imethod (callable, optional): which function to use to simulate repeated
Ito integrals. Here you can choose either sdeint.Ikpw (the default) or
sdeint.Iwik (which is more accurate but uses a lot of memory in the
current implementation).
dW: optional array of shape (len(tspan)-1, d).
I: optional array of shape (len(tspan)-1, m, m).
These optional arguments dW and I are for advanced use, if you want to
use a specific realization of the d independent Wiener processes and
their multiple integrals at each time step. If not provided, suitable
values will be generated randomly.
Returns:
y: array, with shape (len(tspan), len(y0))
With the initial value y0 in the first row
Raises:
SDEValueError
See also:
A. Roessler (2010) Runge-Kutta Methods for the Strong Approximation of
Solutions of Stochastic Differential Equations
"""
return _Roessler2010_SRK2(f, G, y0, tspan, Imethod, dW, I)
def stratSRS2(f, G, y0, tspan, Jmethod=Jkpw, dW=None, J=None):
"""Use the Roessler2010 order 1.0 strong Stochastic Runge-Kutta algorithm
SRS2 to integrate a Stratonovich equation dy = f(y,t)dt + G(y,t)\circ dW(t)
where y is d-dimensional vector variable, f is a vector-valued function,
G is a d x m matrix-valued function giving the noise coefficients and
dW(t) is a vector of m independent Wiener increments.
This algorithm is suitable for Stratonovich systems with an arbitrary noise
coefficient matrix G (i.e. the noise does not need to be scalar, diagonal,
or commutative). The algorithm has order 2.0 convergence for the
deterministic part alone and order 1.0 strong convergence for the complete
stochastic system.
Args:
f: A function f(y, t) returning an array of shape (d,)
Vector-valued function to define the deterministic part of the system
G: The d x m coefficient function G can be given in two different ways:
You can provide a single function G(y, t) that returns an array of
shape (d, m). In this case the entire matrix G() will be evaluated
2m+1 times at each time step so complexity grows quadratically with m.
Alternatively you can provide a list of m functions g(y, t) each
defining one column of G (each returning an array of shape (d,).
In this case each g will be evaluated 3 times at each time step so
complexity grows linearly with m. If your system has large m and
G involves complicated functions, consider using this way.
y0: array of shape (d,) giving the initial state vector y(t==0)
tspan (array): The sequence of time points for which to solve for y.
These must be equally spaced, e.g. np.arange(0,10,0.005)
tspan[0] is the intial time corresponding to the initial state y0.
Jmethod (callable, optional): which function to use to simulate repeated
Stratonovich integrals. Here you can choose either sdeint.Jkpw (the
default) or sdeint.Jwik (which is more accurate but uses a lot of
memory in the current implementation).
dW: optional array of shape (len(tspan)-1, d).
J: optional array of shape (len(tspan)-1, m, m).
These optional arguments dW and J are for advanced use, if you want to
use a specific realization of the d independent Wiener processes and
their multiple integrals at each time step. If not provided, suitable
values will be generated randomly.
Returns:
y: array, with shape (len(tspan), len(y0))
With the initial value y0 in the first row
Raises:
SDEValueError
See also:
A. Roessler (2010) Runge-Kutta Methods for the Strong Approximation of
Solutions of Stochastic Differential Equations
"""
return _Roessler2010_SRK2(f, G, y0, tspan, Jmethod, dW, J)
def _Roessler2010_SRK2(f, G, y0, tspan, IJmethod, dW=None, IJ=None):
"""Implements the Roessler2010 order 1.0 strong Stochastic Runge-Kutta
algorithms SRI2 (for Ito equations) and SRS2 (for Stratonovich equations).
Algorithms SRI2 and SRS2 are almost identical and have the same extended
Butcher tableaus. The difference is that Ito repeateded integrals I_ij are
replaced by Stratonovich repeated integrals J_ij when integrating a
Stratonovich equation (Theorem 6.2 in Roessler2010).
Args:
f: A function f(y, t) returning an array of shape (d,)
G: Either a function G(y, t) that returns an array of shape (d, m),
or a list of m functions g(y, t) each returning an array shape (d,).
y0: array of shape (d,) giving the initial state
tspan (array): Sequence of equally spaced time points
IJmethod (callable): which function to use to generate repeated
integrals. N.B. for an Ito equation, must use an Ito version here
(either Ikpw or Iwik). For a Stratonovich equation, must use a
Stratonovich version here (Jkpw or Jwik).
dW: optional array of shape (len(tspan)-1, d).
IJ: optional array of shape (len(tspan)-1, m, m).
Optional arguments dW and IJ are for advanced use, if you want to
use a specific realization of the d independent Wiener processes and
their multiple integrals at each time step. If not provided, suitable
values will be generated randomly.
Returns:
y: array, with shape (len(tspan), len(y0))
Raises:
SDEValueError
See also:
A. Roessler (2010) Runge-Kutta Methods for the Strong Approximation of
Solutions of Stochastic Differential Equations
"""
(d, m, f, G, y0, tspan, dW, IJ) = _check_args(f, G, y0, tspan, dW, IJ)
have_separate_g = (not callable(G)) # if G is given as m separate functions
N = len(tspan)
h = (tspan[N-1] - tspan[0])/(N - 1) # assuming equal time steps
if dW is None:
# pre-generate Wiener increments (for m independent Wiener processes):
dW = deltaW(N - 1, m, h) # shape (N, m)
if IJ is None:
# pre-generate repeated stochastic integrals for each time step.
# Must give I_ij for the Ito case or J_ij for the Stratonovich case:
__, I = IJmethod(dW, h) # shape (N, m, m)
else:
I = IJ
# allocate space for result
y = np.zeros((N, d), dtype=type(y0[0]))
y[0] = y0;
Gn = np.zeros((d, m), dtype=y.dtype)
for n in range(0, N-1):
tn = tspan[n]
tn1 = tspan[n+1]
h = tn1 - tn
sqrth = np.sqrt(h)
Yn = y[n] # shape (d,)
Ik = dW[n,:] # shape (m,)
Iij = I[n,:,:] # shape (m, m)
fnh = f(Yn, tn)*h # shape (d,)
if have_separate_g:
for k in range(0, m):
Gn[:,k] = G[k](Yn, tn)
else:
Gn = G(Yn, tn)
sum1 = np.dot(Gn, Iij)/sqrth # shape (d, m)
H20 = Yn + fnh # shape (d,)
H20b = np.reshape(H20, (d, 1))
H2 = H20b + sum1 # shape (d, m)
H30 = Yn
H3 = H20b - sum1
fn1h = f(H20, tn1)*h
Yn1 = Yn + 0.5*(fnh + fn1h) + np.dot(Gn, Ik)
if have_separate_g:
for k in range(0, m):
Yn1 += 0.5*sqrth*(G[k](H2[:,k], tn1) - G[k](H3[:,k], tn1))
else:
for k in range(0, m):
Yn1 += 0.5*sqrth*(G(H2[:,k], tn1)[:,k] - G(H3[:,k], tn1)[:,k])
y[n+1] = Yn1
return y
def stratKP2iS(f, G, y0, tspan, Jmethod=Jkpw, gam=None, al1=None, al2=None,
rtol=1e-4, dW=None, J=None):
"""Use the Kloeden and Platen two-step implicit order 1.0 strong algorithm
to integrate a Stratonovich equation dy = f(y,t)dt + G(y,t)\circ dW(t)
This semi-implicit algorithm may be useful for stiff systems. The noise
does not need to be scalar, diagonal, or commutative.
This algorithm is defined in Kloeden and Platen (1999) section 12.4,
equations (4.5) and (4.7). Here implementing that scheme with default
parameters \gamma_k = \alpha_{1,k} = \alpha_{2,k} = 0.5 for k=1..d using
MINPACK HYBRD algorithm to solve the implicit vector equation at each step.
Args:
f: A function f(y, t) returning an array of shape (d,) to define the
deterministic part of the system
G: A function G(y, t) returning an array of shape (d, m) to define the
noise coefficients of the system
y0: array of shape (d,) giving the initial state
tspan (array): Sequence of equally spaced time points
Jmethod (callable, optional): which function to use to simulate repeated
Stratonovich integrals. Here you can choose either sdeint.Jkpw (the
default) or sdeint.Jwik (which is more accurate but uses a lot of
memory in the current implementation).
gam, al1, al2 (optional arrays of shape (d,)): These can configure free
parameters \gamma_k, \alpha_{1,k}, \alpha_{2,k} of the algorithm.
You can omit these, then the default values 0.5 will be used.
rtol (float, optional): Relative error tolerance. The default is 1e-4.
This is the relative tolerance used when solving the implicit equation
for Y_{n+1} at each step. It does not mean that the overall sample path
approximation has this relative precision.
dW: optional array of shape (len(tspan)-1, d).
J: optional array of shape (len(tspan)-1, m, m).
These optional arguments dW and J are for advanced use, if you want to
use a specific realization of the d independent Wiener processes and
their multiple integrals at each time step. If not provided, suitable
values will be generated randomly.
Returns:
y: array, with shape (len(tspan), len(y0))
Raises:
SDEValueError, RuntimeError
See also:
P. Kloeden and E. Platen (1999) Numerical Solution of Stochastic
Differential Equations, revised and updated 3rd printing.
"""
try:
from scipy.optimize import fsolve
except ImportError:
raise Error('stratKP2iS() requires package ``scipy`` to be installed.')
(d, m, f, G, y0, tspan, dW, J) = _check_args(f, G, y0, tspan, dW, J)
if not callable(G):
raise SDEValueError('G should be a function returning a d x m matrix.')
if np.iscomplexobj(y0):
raise SDEValueError("stratKP2iS() can't yet handle complex variables.")
if gam is None:
gam = np.ones((d,))*0.5 # Default level of implicitness \gamma_k = 0.5
if al1 is None:
al1 = np.ones((d,))*0.5 # Default \alpha_{1,k} = 0.5
if al2 is None:
al2 = np.ones((d,))*0.5 # Default \alpha_{2,k} = 0.5
N = len(tspan)
h = (tspan[N-1] - tspan[0])/(N - 1) # assuming equal time steps
if dW is None:
# pre-generate Wiener increments (for m independent Wiener processes):
dW = deltaW(N - 1, m, h) # shape (N, m)
if J is None:
# pre-generate repeated Stratonovich integrals for each time step
__, J = Jmethod(dW, h) # shape (N, m, m)
# allocate space for result
y = np.zeros((N, d), dtype=type(y0[0]))
def _imp(Ynp1, Yn, Ynm1, Vn, Vnm1, tnp1, tn, tnm1, fn, fnm1):
"""At each step we will solve _imp(Ynp1, ...) == 0 for Ynp1.
The meaning of these arguments is: Y_{n+1}, Y_n, Y_{n-1}, V_n, V_{n-1},
t_{n+1}, t_n, t_{n-1}, f(Y_n, t_n), f(Y_{n-1}, t_{n-1})."""
return ((1 - gam)*Yn + gam*Ynm1 + (al2*f(Ynp1, tnp1) +
(gam*al1 + (1 - al2))*fn + gam*(1 - al1)*fnm1)*h + Vn +
gam*Vnm1 - Ynp1)
fn = None
Vn = None
y[0] = y0;
for n in range(0, N-1):
tn = tspan[n]
tnp1 = tspan[n+1]
h = tnp1 - tn
sqrth = np.sqrt(h)
Yn = y[n] # shape (d,)
Jk = dW[n,:] # shape (m,)
Jij = J[n,:,:] # shape (m, m)
fnm1 = fn
fn = f(Yn, tn)
Gn = G(Yn, tn)
Ybar = (Yn + fn*h).reshape((d, 1)) + Gn*sqrth # shape (d, m)
sum1 = np.zeros((d,))
for j1 in range(0, m):
sum1 += np.dot(G(Ybar[:,j1], tn) - Gn, Jij[j1,:])
Vnm1 = Vn
Vn = np.dot(Gn, Jk) + sum1/sqrth
if n == 0:
# First step uses Kloeden&Platen explicit order 1.0 strong scheme:
y[n+1] = Yn + fn*h + Vn
continue
tnm1 = tspan[n-1]
Ynm1 = y[n-1] # shape (d,)
# now solve _imp(Ynp1, ...) == 0 for Ynp1, near to Yn
args = (Yn, Ynm1, Vn, Vnm1, tnp1, tn, tnm1, fn, fnm1)
(Ynp1, __, status, msg) = fsolve(_imp, Yn, args=args, xtol=rtol,
full_output=True)
if status == 1:
y[n+1] = Ynp1
else:
m = """At time t_n = %g Failed to solve for Y_{n+1} with args %s.
Reason: %s""" % (tn, args, msg)
raise RuntimeError(m)
return y
|
mattja/sdeint | sdeint/integrate.py | itoint | python | def itoint(f, G, y0, tspan):
# In future versions we can automatically choose here the most suitable
# Ito algorithm based on properties of the system and noise.
(d, m, f, G, y0, tspan, __, __) = _check_args(f, G, y0, tspan, None, None)
chosenAlgorithm = itoSRI2
return chosenAlgorithm(f, G, y0, tspan) | Numerically integrate the Ito equation dy = f(y,t)dt + G(y,t)dW
where y is the d-dimensional state vector, f is a vector-valued function,
G is an d x m matrix-valued function giving the noise coefficients and
dW(t) = (dW_1, dW_2, ... dW_m) is a vector of independent Wiener increments
Args:
f: callable(y,t) returning a numpy array of shape (d,)
Vector-valued function to define the deterministic part of the system
G: callable(y,t) returning a numpy array of shape (d,m)
Matrix-valued function to define the noise coefficients of the system
y0: array of shape (d,) giving the initial state vector y(t==0)
tspan (array): The sequence of time points for which to solve for y.
These must be equally spaced, e.g. np.arange(0,10,0.005)
tspan[0] is the intial time corresponding to the initial state y0.
Returns:
y: array, with shape (len(tspan), len(y0))
With the initial value y0 in the first row
Raises:
SDEValueError | train | https://github.com/mattja/sdeint/blob/7cf807cdf97b3bb39d29e1c2dc834b519499b601/sdeint/integrate.py#L118-L146 | [
"def itoSRI2(f, G, y0, tspan, Imethod=Ikpw, dW=None, I=None):\n \"\"\"Use the Roessler2010 order 1.0 strong Stochastic Runge-Kutta algorithm\n SRI2 to integrate an Ito equation dy = f(y,t)dt + G(y,t)dW(t)\n\n where y is d-dimensional vector variable, f is a vector-valued function,\n G is a d x m matrix-... | # Copyright 2015 Matthew J. Aburn
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version. See <http://www.gnu.org/licenses/>.
"""Numerical integration algorithms for Ito and Stratonovich stochastic
ordinary differential equations.
Usage:
itoint(f, G, y0, tspan) for Ito equation dy = f dt + G dW
stratint(f, G, y0, tspan) for Stratonovich equation dy = f dt + G \circ dW
y0 is the initial value
tspan is an array of time values (currently these must be equally spaced)
function f is the deterministic part of the system (scalar or dx1 vector)
function G is the stochastic part of the system (scalar or d x m matrix)
sdeint will choose an algorithm for you. Or you can choose one explicitly:
itoEuler: the Euler-Maruyama algorithm for Ito equations.
stratHeun: the Stratonovich Heun algorithm for Stratonovich equations.
itoSRI2: the Roessler2010 order 1.0 strong Stochastic Runge-Kutta
algorithm SRI2 for Ito equations.
stratSRS2: the Roessler2010 order 1.0 strong Stochastic Runge-Kutta
algorithm SRS2 for Stratonovich equations.
stratKP2iS: the Kloeden and Platen two-step implicit order 1.0 strong algorithm
for Stratonovich equations.
"""
from __future__ import absolute_import
from .wiener import deltaW, Ikpw, Iwik, Jkpw, Jwik
import numpy as np
import numbers
class Error(Exception):
pass
class SDEValueError(Error):
"""Thrown if integration arguments fail some basic sanity checks"""
pass
def _check_args(f, G, y0, tspan, dW=None, IJ=None):
"""Do some validation common to all algorithms. Find dimension d and number
of Wiener processes m.
"""
if not np.isclose(min(np.diff(tspan)), max(np.diff(tspan))):
raise SDEValueError('Currently time steps must be equally spaced.')
# Be flexible to allow scalar equations. convert them to a 1D vector system
if isinstance(y0, numbers.Number):
if isinstance(y0, numbers.Integral):
numtype = np.float64
else:
numtype = type(y0)
y0_orig = y0
y0 = np.array([y0], dtype=numtype)
def make_vector_fn(fn):
def newfn(y, t):
return np.array([fn(y[0], t)], dtype=numtype)
newfn.__name__ = fn.__name__
return newfn
def make_matrix_fn(fn):
def newfn(y, t):
return np.array([[fn(y[0], t)]], dtype=numtype)
newfn.__name__ = fn.__name__
return newfn
if isinstance(f(y0_orig, tspan[0]), numbers.Number):
f = make_vector_fn(f)
if isinstance(G(y0_orig, tspan[0]), numbers.Number):
G = make_matrix_fn(G)
# determine dimension d of the system
d = len(y0)
if len(f(y0, tspan[0])) != d:
raise SDEValueError('y0 and f have incompatible shapes.')
message = """y0 has length %d. So G must either be a single function
returning a matrix of shape (%d, m), or else a list of m separate
functions each returning a column of G, with shape (%d,)""" % (
d, d, d)
if callable(G):
# then G must be a function returning a d x m matrix
Gtest = G(y0, tspan[0])
if Gtest.ndim != 2 or Gtest.shape[0] != d:
raise SDEValueError(message)
# determine number of independent Wiener processes m
m = Gtest.shape[1]
else:
# G should be a list of m functions g_i giving columns of G
G = tuple(G)
m = len(G)
Gtest = np.zeros((d, m))
for k in range(0, m):
if not callable(G[k]):
raise SDEValueError(message)
Gtestk = G[k](y0, tspan[0])
if np.shape(Gtestk) != (d,):
raise SDEValueError(message)
Gtest[:,k] = Gtestk
message = """From function G, it seems m==%d. If present, the optional
parameter dW must be an array of shape (len(tspan)-1, m) giving
m independent Wiener increments for each time interval.""" % m
if dW is not None:
if not hasattr(dW, 'shape') or dW.shape != (len(tspan) - 1, m):
raise SDEValueError(message)
message = """From function G, it seems m==%d. If present, the optional
parameter I or J must be an array of shape (len(tspan)-1, m, m)
giving an m x m matrix of repeated integral values for each
time interval.""" % m
if IJ is not None:
if not hasattr(IJ, 'shape') or IJ.shape != (len(tspan) - 1, m, m):
raise SDEValueError(message)
return (d, m, f, G, y0, tspan, dW, IJ)
def stratint(f, G, y0, tspan):
""" Numerically integrate Stratonovich equation dy = f(y,t)dt + G(y,t).dW
where y is the d-dimensional state vector, f is a vector-valued function,
G is an d x m matrix-valued function giving the noise coefficients and
dW(t) = (dW_1, dW_2, ... dW_m) is a vector of independent Wiener increments
Args:
f: callable(y,t) returning a numpy array of shape (d,)
Vector-valued function to define the deterministic part of the system
G: callable(y,t) returning a numpy array of shape (d,m)
Matrix-valued function to define the noise coefficients of the system
y0: array of shape (d,) giving the initial state vector y(t==0)
tspan (array): The sequence of time points for which to solve for y.
These must be equally spaced, e.g. np.arange(0,10,0.005)
tspan[0] is the intial time corresponding to the initial state y0.
Returns:
y: array, with shape (len(tspan), len(y0))
With the initial value y0 in the first row
Raises:
SDEValueError
"""
# In future versions we can automatically choose here the most suitable
# Stratonovich algorithm based on properties of the system and noise.
(d, m, f, G, y0, tspan, __, __) = _check_args(f, G, y0, tspan, None, None)
chosenAlgorithm = stratSRS2
return chosenAlgorithm(f, G, y0, tspan)
def itoEuler(f, G, y0, tspan, dW=None):
"""Use the Euler-Maruyama algorithm to integrate the Ito equation
dy = f(y,t)dt + G(y,t) dW(t)
where y is the d-dimensional state vector, f is a vector-valued function,
G is an d x m matrix-valued function giving the noise coefficients and
dW(t) = (dW_1, dW_2, ... dW_m) is a vector of independent Wiener increments
Args:
f: callable(y, t) returning (d,) array
Vector-valued function to define the deterministic part of the system
G: callable(y, t) returning (d,m) array
Matrix-valued function to define the noise coefficients of the system
y0: array of shape (d,) giving the initial state vector y(t==0)
tspan (array): The sequence of time points for which to solve for y.
These must be equally spaced, e.g. np.arange(0,10,0.005)
tspan[0] is the intial time corresponding to the initial state y0.
dW: optional array of shape (len(tspan)-1, d). This is for advanced use,
if you want to use a specific realization of the d independent Wiener
processes. If not provided Wiener increments will be generated randomly
Returns:
y: array, with shape (len(tspan), len(y0))
With the initial value y0 in the first row
Raises:
SDEValueError
See also:
G. Maruyama (1955) Continuous Markov processes and stochastic equations
Kloeden and Platen (1999) Numerical Solution of Differential Equations
"""
(d, m, f, G, y0, tspan, dW, __) = _check_args(f, G, y0, tspan, dW, None)
N = len(tspan)
h = (tspan[N-1] - tspan[0])/(N - 1)
# allocate space for result
y = np.zeros((N, d), dtype=type(y0[0]))
if dW is None:
# pre-generate Wiener increments (for m independent Wiener processes):
dW = deltaW(N - 1, m, h)
y[0] = y0;
for n in range(0, N-1):
tn = tspan[n]
yn = y[n]
dWn = dW[n,:]
y[n+1] = yn + f(yn, tn)*h + G(yn, tn).dot(dWn)
return y
def stratHeun(f, G, y0, tspan, dW=None):
"""Use the Stratonovich Heun algorithm to integrate Stratonovich equation
dy = f(y,t)dt + G(y,t) \circ dW(t)
where y is the d-dimensional state vector, f is a vector-valued function,
G is an d x m matrix-valued function giving the noise coefficients and
dW(t) = (dW_1, dW_2, ... dW_m) is a vector of independent Wiener increments
Args:
f: callable(y, t) returning (d,) array
Vector-valued function to define the deterministic part of the system
G: callable(y, t) returning (d,m) array
Matrix-valued function to define the noise coefficients of the system
y0: array of shape (d,) giving the initial state vector y(t==0)
tspan (array): The sequence of time points for which to solve for y.
These must be equally spaced, e.g. np.arange(0,10,0.005)
tspan[0] is the intial time corresponding to the initial state y0.
dW: optional array of shape (len(tspan)-1, d). This is for advanced use,
if you want to use a specific realization of the d independent Wiener
processes. If not provided Wiener increments will be generated randomly
Returns:
y: array, with shape (len(tspan), len(y0))
With the initial value y0 in the first row
Raises:
SDEValueError
See also:
W. Rumelin (1982) Numerical Treatment of Stochastic Differential
Equations
R. Mannella (2002) Integration of Stochastic Differential Equations
on a Computer
K. Burrage, P. M. Burrage and T. Tian (2004) Numerical methods for strong
solutions of stochastic differential equations: an overview
"""
(d, m, f, G, y0, tspan, dW, __) = _check_args(f, G, y0, tspan, dW, None)
N = len(tspan)
h = (tspan[N-1] - tspan[0])/(N - 1)
# allocate space for result
y = np.zeros((N, d), dtype=type(y0[0]))
if dW is None:
# pre-generate Wiener increments (for m independent Wiener processes):
dW = deltaW(N - 1, m, h)
y[0] = y0;
for n in range(0, N-1):
tn = tspan[n]
tnp1 = tspan[n+1]
yn = y[n]
dWn = dW[n,:]
fn = f(yn, tn)
Gn = G(yn, tn)
ybar = yn + fn*h + Gn.dot(dWn)
fnbar = f(ybar, tnp1)
Gnbar = G(ybar, tnp1)
y[n+1] = yn + 0.5*(fn + fnbar)*h + 0.5*(Gn + Gnbar).dot(dWn)
return y
def itoSRI2(f, G, y0, tspan, Imethod=Ikpw, dW=None, I=None):
"""Use the Roessler2010 order 1.0 strong Stochastic Runge-Kutta algorithm
SRI2 to integrate an Ito equation dy = f(y,t)dt + G(y,t)dW(t)
where y is d-dimensional vector variable, f is a vector-valued function,
G is a d x m matrix-valued function giving the noise coefficients and
dW(t) is a vector of m independent Wiener increments.
This algorithm is suitable for Ito systems with an arbitrary noise
coefficient matrix G (i.e. the noise does not need to be scalar, diagonal,
or commutative). The algorithm has order 2.0 convergence for the
deterministic part alone and order 1.0 strong convergence for the complete
stochastic system.
Args:
f: A function f(y, t) returning an array of shape (d,)
Vector-valued function to define the deterministic part of the system
G: The d x m coefficient function G can be given in two different ways:
You can provide a single function G(y, t) that returns an array of
shape (d, m). In this case the entire matrix G() will be evaluated
2m+1 times at each time step so complexity grows quadratically with m.
Alternatively you can provide a list of m functions g(y, t) each
defining one column of G (each returning an array of shape (d,).
In this case each g will be evaluated 3 times at each time step so
complexity grows linearly with m. If your system has large m and
G involves complicated functions, consider using this way.
y0: array of shape (d,) giving the initial state vector y(t==0)
tspan (array): The sequence of time points for which to solve for y.
These must be equally spaced, e.g. np.arange(0,10,0.005)
tspan[0] is the intial time corresponding to the initial state y0.
Imethod (callable, optional): which function to use to simulate repeated
Ito integrals. Here you can choose either sdeint.Ikpw (the default) or
sdeint.Iwik (which is more accurate but uses a lot of memory in the
current implementation).
dW: optional array of shape (len(tspan)-1, d).
I: optional array of shape (len(tspan)-1, m, m).
These optional arguments dW and I are for advanced use, if you want to
use a specific realization of the d independent Wiener processes and
their multiple integrals at each time step. If not provided, suitable
values will be generated randomly.
Returns:
y: array, with shape (len(tspan), len(y0))
With the initial value y0 in the first row
Raises:
SDEValueError
See also:
A. Roessler (2010) Runge-Kutta Methods for the Strong Approximation of
Solutions of Stochastic Differential Equations
"""
return _Roessler2010_SRK2(f, G, y0, tspan, Imethod, dW, I)
def stratSRS2(f, G, y0, tspan, Jmethod=Jkpw, dW=None, J=None):
"""Use the Roessler2010 order 1.0 strong Stochastic Runge-Kutta algorithm
SRS2 to integrate a Stratonovich equation dy = f(y,t)dt + G(y,t)\circ dW(t)
where y is d-dimensional vector variable, f is a vector-valued function,
G is a d x m matrix-valued function giving the noise coefficients and
dW(t) is a vector of m independent Wiener increments.
This algorithm is suitable for Stratonovich systems with an arbitrary noise
coefficient matrix G (i.e. the noise does not need to be scalar, diagonal,
or commutative). The algorithm has order 2.0 convergence for the
deterministic part alone and order 1.0 strong convergence for the complete
stochastic system.
Args:
f: A function f(y, t) returning an array of shape (d,)
Vector-valued function to define the deterministic part of the system
G: The d x m coefficient function G can be given in two different ways:
You can provide a single function G(y, t) that returns an array of
shape (d, m). In this case the entire matrix G() will be evaluated
2m+1 times at each time step so complexity grows quadratically with m.
Alternatively you can provide a list of m functions g(y, t) each
defining one column of G (each returning an array of shape (d,).
In this case each g will be evaluated 3 times at each time step so
complexity grows linearly with m. If your system has large m and
G involves complicated functions, consider using this way.
y0: array of shape (d,) giving the initial state vector y(t==0)
tspan (array): The sequence of time points for which to solve for y.
These must be equally spaced, e.g. np.arange(0,10,0.005)
tspan[0] is the intial time corresponding to the initial state y0.
Jmethod (callable, optional): which function to use to simulate repeated
Stratonovich integrals. Here you can choose either sdeint.Jkpw (the
default) or sdeint.Jwik (which is more accurate but uses a lot of
memory in the current implementation).
dW: optional array of shape (len(tspan)-1, d).
J: optional array of shape (len(tspan)-1, m, m).
These optional arguments dW and J are for advanced use, if you want to
use a specific realization of the d independent Wiener processes and
their multiple integrals at each time step. If not provided, suitable
values will be generated randomly.
Returns:
y: array, with shape (len(tspan), len(y0))
With the initial value y0 in the first row
Raises:
SDEValueError
See also:
A. Roessler (2010) Runge-Kutta Methods for the Strong Approximation of
Solutions of Stochastic Differential Equations
"""
return _Roessler2010_SRK2(f, G, y0, tspan, Jmethod, dW, J)
def _Roessler2010_SRK2(f, G, y0, tspan, IJmethod, dW=None, IJ=None):
"""Implements the Roessler2010 order 1.0 strong Stochastic Runge-Kutta
algorithms SRI2 (for Ito equations) and SRS2 (for Stratonovich equations).
Algorithms SRI2 and SRS2 are almost identical and have the same extended
Butcher tableaus. The difference is that Ito repeateded integrals I_ij are
replaced by Stratonovich repeated integrals J_ij when integrating a
Stratonovich equation (Theorem 6.2 in Roessler2010).
Args:
f: A function f(y, t) returning an array of shape (d,)
G: Either a function G(y, t) that returns an array of shape (d, m),
or a list of m functions g(y, t) each returning an array shape (d,).
y0: array of shape (d,) giving the initial state
tspan (array): Sequence of equally spaced time points
IJmethod (callable): which function to use to generate repeated
integrals. N.B. for an Ito equation, must use an Ito version here
(either Ikpw or Iwik). For a Stratonovich equation, must use a
Stratonovich version here (Jkpw or Jwik).
dW: optional array of shape (len(tspan)-1, d).
IJ: optional array of shape (len(tspan)-1, m, m).
Optional arguments dW and IJ are for advanced use, if you want to
use a specific realization of the d independent Wiener processes and
their multiple integrals at each time step. If not provided, suitable
values will be generated randomly.
Returns:
y: array, with shape (len(tspan), len(y0))
Raises:
SDEValueError
See also:
A. Roessler (2010) Runge-Kutta Methods for the Strong Approximation of
Solutions of Stochastic Differential Equations
"""
(d, m, f, G, y0, tspan, dW, IJ) = _check_args(f, G, y0, tspan, dW, IJ)
have_separate_g = (not callable(G)) # if G is given as m separate functions
N = len(tspan)
h = (tspan[N-1] - tspan[0])/(N - 1) # assuming equal time steps
if dW is None:
# pre-generate Wiener increments (for m independent Wiener processes):
dW = deltaW(N - 1, m, h) # shape (N, m)
if IJ is None:
# pre-generate repeated stochastic integrals for each time step.
# Must give I_ij for the Ito case or J_ij for the Stratonovich case:
__, I = IJmethod(dW, h) # shape (N, m, m)
else:
I = IJ
# allocate space for result
y = np.zeros((N, d), dtype=type(y0[0]))
y[0] = y0;
Gn = np.zeros((d, m), dtype=y.dtype)
for n in range(0, N-1):
tn = tspan[n]
tn1 = tspan[n+1]
h = tn1 - tn
sqrth = np.sqrt(h)
Yn = y[n] # shape (d,)
Ik = dW[n,:] # shape (m,)
Iij = I[n,:,:] # shape (m, m)
fnh = f(Yn, tn)*h # shape (d,)
if have_separate_g:
for k in range(0, m):
Gn[:,k] = G[k](Yn, tn)
else:
Gn = G(Yn, tn)
sum1 = np.dot(Gn, Iij)/sqrth # shape (d, m)
H20 = Yn + fnh # shape (d,)
H20b = np.reshape(H20, (d, 1))
H2 = H20b + sum1 # shape (d, m)
H30 = Yn
H3 = H20b - sum1
fn1h = f(H20, tn1)*h
Yn1 = Yn + 0.5*(fnh + fn1h) + np.dot(Gn, Ik)
if have_separate_g:
for k in range(0, m):
Yn1 += 0.5*sqrth*(G[k](H2[:,k], tn1) - G[k](H3[:,k], tn1))
else:
for k in range(0, m):
Yn1 += 0.5*sqrth*(G(H2[:,k], tn1)[:,k] - G(H3[:,k], tn1)[:,k])
y[n+1] = Yn1
return y
def stratKP2iS(f, G, y0, tspan, Jmethod=Jkpw, gam=None, al1=None, al2=None,
rtol=1e-4, dW=None, J=None):
"""Use the Kloeden and Platen two-step implicit order 1.0 strong algorithm
to integrate a Stratonovich equation dy = f(y,t)dt + G(y,t)\circ dW(t)
This semi-implicit algorithm may be useful for stiff systems. The noise
does not need to be scalar, diagonal, or commutative.
This algorithm is defined in Kloeden and Platen (1999) section 12.4,
equations (4.5) and (4.7). Here implementing that scheme with default
parameters \gamma_k = \alpha_{1,k} = \alpha_{2,k} = 0.5 for k=1..d using
MINPACK HYBRD algorithm to solve the implicit vector equation at each step.
Args:
f: A function f(y, t) returning an array of shape (d,) to define the
deterministic part of the system
G: A function G(y, t) returning an array of shape (d, m) to define the
noise coefficients of the system
y0: array of shape (d,) giving the initial state
tspan (array): Sequence of equally spaced time points
Jmethod (callable, optional): which function to use to simulate repeated
Stratonovich integrals. Here you can choose either sdeint.Jkpw (the
default) or sdeint.Jwik (which is more accurate but uses a lot of
memory in the current implementation).
gam, al1, al2 (optional arrays of shape (d,)): These can configure free
parameters \gamma_k, \alpha_{1,k}, \alpha_{2,k} of the algorithm.
You can omit these, then the default values 0.5 will be used.
rtol (float, optional): Relative error tolerance. The default is 1e-4.
This is the relative tolerance used when solving the implicit equation
for Y_{n+1} at each step. It does not mean that the overall sample path
approximation has this relative precision.
dW: optional array of shape (len(tspan)-1, d).
J: optional array of shape (len(tspan)-1, m, m).
These optional arguments dW and J are for advanced use, if you want to
use a specific realization of the d independent Wiener processes and
their multiple integrals at each time step. If not provided, suitable
values will be generated randomly.
Returns:
y: array, with shape (len(tspan), len(y0))
Raises:
SDEValueError, RuntimeError
See also:
P. Kloeden and E. Platen (1999) Numerical Solution of Stochastic
Differential Equations, revised and updated 3rd printing.
"""
try:
from scipy.optimize import fsolve
except ImportError:
raise Error('stratKP2iS() requires package ``scipy`` to be installed.')
(d, m, f, G, y0, tspan, dW, J) = _check_args(f, G, y0, tspan, dW, J)
if not callable(G):
raise SDEValueError('G should be a function returning a d x m matrix.')
if np.iscomplexobj(y0):
raise SDEValueError("stratKP2iS() can't yet handle complex variables.")
if gam is None:
gam = np.ones((d,))*0.5 # Default level of implicitness \gamma_k = 0.5
if al1 is None:
al1 = np.ones((d,))*0.5 # Default \alpha_{1,k} = 0.5
if al2 is None:
al2 = np.ones((d,))*0.5 # Default \alpha_{2,k} = 0.5
N = len(tspan)
h = (tspan[N-1] - tspan[0])/(N - 1) # assuming equal time steps
if dW is None:
# pre-generate Wiener increments (for m independent Wiener processes):
dW = deltaW(N - 1, m, h) # shape (N, m)
if J is None:
# pre-generate repeated Stratonovich integrals for each time step
__, J = Jmethod(dW, h) # shape (N, m, m)
# allocate space for result
y = np.zeros((N, d), dtype=type(y0[0]))
def _imp(Ynp1, Yn, Ynm1, Vn, Vnm1, tnp1, tn, tnm1, fn, fnm1):
"""At each step we will solve _imp(Ynp1, ...) == 0 for Ynp1.
The meaning of these arguments is: Y_{n+1}, Y_n, Y_{n-1}, V_n, V_{n-1},
t_{n+1}, t_n, t_{n-1}, f(Y_n, t_n), f(Y_{n-1}, t_{n-1})."""
return ((1 - gam)*Yn + gam*Ynm1 + (al2*f(Ynp1, tnp1) +
(gam*al1 + (1 - al2))*fn + gam*(1 - al1)*fnm1)*h + Vn +
gam*Vnm1 - Ynp1)
fn = None
Vn = None
y[0] = y0;
for n in range(0, N-1):
tn = tspan[n]
tnp1 = tspan[n+1]
h = tnp1 - tn
sqrth = np.sqrt(h)
Yn = y[n] # shape (d,)
Jk = dW[n,:] # shape (m,)
Jij = J[n,:,:] # shape (m, m)
fnm1 = fn
fn = f(Yn, tn)
Gn = G(Yn, tn)
Ybar = (Yn + fn*h).reshape((d, 1)) + Gn*sqrth # shape (d, m)
sum1 = np.zeros((d,))
for j1 in range(0, m):
sum1 += np.dot(G(Ybar[:,j1], tn) - Gn, Jij[j1,:])
Vnm1 = Vn
Vn = np.dot(Gn, Jk) + sum1/sqrth
if n == 0:
# First step uses Kloeden&Platen explicit order 1.0 strong scheme:
y[n+1] = Yn + fn*h + Vn
continue
tnm1 = tspan[n-1]
Ynm1 = y[n-1] # shape (d,)
# now solve _imp(Ynp1, ...) == 0 for Ynp1, near to Yn
args = (Yn, Ynm1, Vn, Vnm1, tnp1, tn, tnm1, fn, fnm1)
(Ynp1, __, status, msg) = fsolve(_imp, Yn, args=args, xtol=rtol,
full_output=True)
if status == 1:
y[n+1] = Ynp1
else:
m = """At time t_n = %g Failed to solve for Y_{n+1} with args %s.
Reason: %s""" % (tn, args, msg)
raise RuntimeError(m)
return y
|
mattja/sdeint | sdeint/integrate.py | stratint | python | def stratint(f, G, y0, tspan):
# In future versions we can automatically choose here the most suitable
# Stratonovich algorithm based on properties of the system and noise.
(d, m, f, G, y0, tspan, __, __) = _check_args(f, G, y0, tspan, None, None)
chosenAlgorithm = stratSRS2
return chosenAlgorithm(f, G, y0, tspan) | Numerically integrate Stratonovich equation dy = f(y,t)dt + G(y,t).dW
where y is the d-dimensional state vector, f is a vector-valued function,
G is an d x m matrix-valued function giving the noise coefficients and
dW(t) = (dW_1, dW_2, ... dW_m) is a vector of independent Wiener increments
Args:
f: callable(y,t) returning a numpy array of shape (d,)
Vector-valued function to define the deterministic part of the system
G: callable(y,t) returning a numpy array of shape (d,m)
Matrix-valued function to define the noise coefficients of the system
y0: array of shape (d,) giving the initial state vector y(t==0)
tspan (array): The sequence of time points for which to solve for y.
These must be equally spaced, e.g. np.arange(0,10,0.005)
tspan[0] is the intial time corresponding to the initial state y0.
Returns:
y: array, with shape (len(tspan), len(y0))
With the initial value y0 in the first row
Raises:
SDEValueError | train | https://github.com/mattja/sdeint/blob/7cf807cdf97b3bb39d29e1c2dc834b519499b601/sdeint/integrate.py#L149-L177 | [
"def stratSRS2(f, G, y0, tspan, Jmethod=Jkpw, dW=None, J=None):\n \"\"\"Use the Roessler2010 order 1.0 strong Stochastic Runge-Kutta algorithm\n SRS2 to integrate a Stratonovich equation dy = f(y,t)dt + G(y,t)\\circ dW(t)\n\n where y is d-dimensional vector variable, f is a vector-valued function,\n G i... | # Copyright 2015 Matthew J. Aburn
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version. See <http://www.gnu.org/licenses/>.
"""Numerical integration algorithms for Ito and Stratonovich stochastic
ordinary differential equations.
Usage:
itoint(f, G, y0, tspan) for Ito equation dy = f dt + G dW
stratint(f, G, y0, tspan) for Stratonovich equation dy = f dt + G \circ dW
y0 is the initial value
tspan is an array of time values (currently these must be equally spaced)
function f is the deterministic part of the system (scalar or dx1 vector)
function G is the stochastic part of the system (scalar or d x m matrix)
sdeint will choose an algorithm for you. Or you can choose one explicitly:
itoEuler: the Euler-Maruyama algorithm for Ito equations.
stratHeun: the Stratonovich Heun algorithm for Stratonovich equations.
itoSRI2: the Roessler2010 order 1.0 strong Stochastic Runge-Kutta
algorithm SRI2 for Ito equations.
stratSRS2: the Roessler2010 order 1.0 strong Stochastic Runge-Kutta
algorithm SRS2 for Stratonovich equations.
stratKP2iS: the Kloeden and Platen two-step implicit order 1.0 strong algorithm
for Stratonovich equations.
"""
from __future__ import absolute_import
from .wiener import deltaW, Ikpw, Iwik, Jkpw, Jwik
import numpy as np
import numbers
class Error(Exception):
pass
class SDEValueError(Error):
"""Thrown if integration arguments fail some basic sanity checks"""
pass
def _check_args(f, G, y0, tspan, dW=None, IJ=None):
"""Do some validation common to all algorithms. Find dimension d and number
of Wiener processes m.
"""
if not np.isclose(min(np.diff(tspan)), max(np.diff(tspan))):
raise SDEValueError('Currently time steps must be equally spaced.')
# Be flexible to allow scalar equations. convert them to a 1D vector system
if isinstance(y0, numbers.Number):
if isinstance(y0, numbers.Integral):
numtype = np.float64
else:
numtype = type(y0)
y0_orig = y0
y0 = np.array([y0], dtype=numtype)
def make_vector_fn(fn):
def newfn(y, t):
return np.array([fn(y[0], t)], dtype=numtype)
newfn.__name__ = fn.__name__
return newfn
def make_matrix_fn(fn):
def newfn(y, t):
return np.array([[fn(y[0], t)]], dtype=numtype)
newfn.__name__ = fn.__name__
return newfn
if isinstance(f(y0_orig, tspan[0]), numbers.Number):
f = make_vector_fn(f)
if isinstance(G(y0_orig, tspan[0]), numbers.Number):
G = make_matrix_fn(G)
# determine dimension d of the system
d = len(y0)
if len(f(y0, tspan[0])) != d:
raise SDEValueError('y0 and f have incompatible shapes.')
message = """y0 has length %d. So G must either be a single function
returning a matrix of shape (%d, m), or else a list of m separate
functions each returning a column of G, with shape (%d,)""" % (
d, d, d)
if callable(G):
# then G must be a function returning a d x m matrix
Gtest = G(y0, tspan[0])
if Gtest.ndim != 2 or Gtest.shape[0] != d:
raise SDEValueError(message)
# determine number of independent Wiener processes m
m = Gtest.shape[1]
else:
# G should be a list of m functions g_i giving columns of G
G = tuple(G)
m = len(G)
Gtest = np.zeros((d, m))
for k in range(0, m):
if not callable(G[k]):
raise SDEValueError(message)
Gtestk = G[k](y0, tspan[0])
if np.shape(Gtestk) != (d,):
raise SDEValueError(message)
Gtest[:,k] = Gtestk
message = """From function G, it seems m==%d. If present, the optional
parameter dW must be an array of shape (len(tspan)-1, m) giving
m independent Wiener increments for each time interval.""" % m
if dW is not None:
if not hasattr(dW, 'shape') or dW.shape != (len(tspan) - 1, m):
raise SDEValueError(message)
message = """From function G, it seems m==%d. If present, the optional
parameter I or J must be an array of shape (len(tspan)-1, m, m)
giving an m x m matrix of repeated integral values for each
time interval.""" % m
if IJ is not None:
if not hasattr(IJ, 'shape') or IJ.shape != (len(tspan) - 1, m, m):
raise SDEValueError(message)
return (d, m, f, G, y0, tspan, dW, IJ)
def itoint(f, G, y0, tspan):
""" Numerically integrate the Ito equation dy = f(y,t)dt + G(y,t)dW
where y is the d-dimensional state vector, f is a vector-valued function,
G is an d x m matrix-valued function giving the noise coefficients and
dW(t) = (dW_1, dW_2, ... dW_m) is a vector of independent Wiener increments
Args:
f: callable(y,t) returning a numpy array of shape (d,)
Vector-valued function to define the deterministic part of the system
G: callable(y,t) returning a numpy array of shape (d,m)
Matrix-valued function to define the noise coefficients of the system
y0: array of shape (d,) giving the initial state vector y(t==0)
tspan (array): The sequence of time points for which to solve for y.
These must be equally spaced, e.g. np.arange(0,10,0.005)
tspan[0] is the intial time corresponding to the initial state y0.
Returns:
y: array, with shape (len(tspan), len(y0))
With the initial value y0 in the first row
Raises:
SDEValueError
"""
# In future versions we can automatically choose here the most suitable
# Ito algorithm based on properties of the system and noise.
(d, m, f, G, y0, tspan, __, __) = _check_args(f, G, y0, tspan, None, None)
chosenAlgorithm = itoSRI2
return chosenAlgorithm(f, G, y0, tspan)
def itoEuler(f, G, y0, tspan, dW=None):
"""Use the Euler-Maruyama algorithm to integrate the Ito equation
dy = f(y,t)dt + G(y,t) dW(t)
where y is the d-dimensional state vector, f is a vector-valued function,
G is an d x m matrix-valued function giving the noise coefficients and
dW(t) = (dW_1, dW_2, ... dW_m) is a vector of independent Wiener increments
Args:
f: callable(y, t) returning (d,) array
Vector-valued function to define the deterministic part of the system
G: callable(y, t) returning (d,m) array
Matrix-valued function to define the noise coefficients of the system
y0: array of shape (d,) giving the initial state vector y(t==0)
tspan (array): The sequence of time points for which to solve for y.
These must be equally spaced, e.g. np.arange(0,10,0.005)
tspan[0] is the intial time corresponding to the initial state y0.
dW: optional array of shape (len(tspan)-1, d). This is for advanced use,
if you want to use a specific realization of the d independent Wiener
processes. If not provided Wiener increments will be generated randomly
Returns:
y: array, with shape (len(tspan), len(y0))
With the initial value y0 in the first row
Raises:
SDEValueError
See also:
G. Maruyama (1955) Continuous Markov processes and stochastic equations
Kloeden and Platen (1999) Numerical Solution of Differential Equations
"""
(d, m, f, G, y0, tspan, dW, __) = _check_args(f, G, y0, tspan, dW, None)
N = len(tspan)
h = (tspan[N-1] - tspan[0])/(N - 1)
# allocate space for result
y = np.zeros((N, d), dtype=type(y0[0]))
if dW is None:
# pre-generate Wiener increments (for m independent Wiener processes):
dW = deltaW(N - 1, m, h)
y[0] = y0;
for n in range(0, N-1):
tn = tspan[n]
yn = y[n]
dWn = dW[n,:]
y[n+1] = yn + f(yn, tn)*h + G(yn, tn).dot(dWn)
return y
def stratHeun(f, G, y0, tspan, dW=None):
"""Use the Stratonovich Heun algorithm to integrate Stratonovich equation
dy = f(y,t)dt + G(y,t) \circ dW(t)
where y is the d-dimensional state vector, f is a vector-valued function,
G is an d x m matrix-valued function giving the noise coefficients and
dW(t) = (dW_1, dW_2, ... dW_m) is a vector of independent Wiener increments
Args:
f: callable(y, t) returning (d,) array
Vector-valued function to define the deterministic part of the system
G: callable(y, t) returning (d,m) array
Matrix-valued function to define the noise coefficients of the system
y0: array of shape (d,) giving the initial state vector y(t==0)
tspan (array): The sequence of time points for which to solve for y.
These must be equally spaced, e.g. np.arange(0,10,0.005)
tspan[0] is the intial time corresponding to the initial state y0.
dW: optional array of shape (len(tspan)-1, d). This is for advanced use,
if you want to use a specific realization of the d independent Wiener
processes. If not provided Wiener increments will be generated randomly
Returns:
y: array, with shape (len(tspan), len(y0))
With the initial value y0 in the first row
Raises:
SDEValueError
See also:
W. Rumelin (1982) Numerical Treatment of Stochastic Differential
Equations
R. Mannella (2002) Integration of Stochastic Differential Equations
on a Computer
K. Burrage, P. M. Burrage and T. Tian (2004) Numerical methods for strong
solutions of stochastic differential equations: an overview
"""
(d, m, f, G, y0, tspan, dW, __) = _check_args(f, G, y0, tspan, dW, None)
N = len(tspan)
h = (tspan[N-1] - tspan[0])/(N - 1)
# allocate space for result
y = np.zeros((N, d), dtype=type(y0[0]))
if dW is None:
# pre-generate Wiener increments (for m independent Wiener processes):
dW = deltaW(N - 1, m, h)
y[0] = y0;
for n in range(0, N-1):
tn = tspan[n]
tnp1 = tspan[n+1]
yn = y[n]
dWn = dW[n,:]
fn = f(yn, tn)
Gn = G(yn, tn)
ybar = yn + fn*h + Gn.dot(dWn)
fnbar = f(ybar, tnp1)
Gnbar = G(ybar, tnp1)
y[n+1] = yn + 0.5*(fn + fnbar)*h + 0.5*(Gn + Gnbar).dot(dWn)
return y
def itoSRI2(f, G, y0, tspan, Imethod=Ikpw, dW=None, I=None):
"""Use the Roessler2010 order 1.0 strong Stochastic Runge-Kutta algorithm
SRI2 to integrate an Ito equation dy = f(y,t)dt + G(y,t)dW(t)
where y is d-dimensional vector variable, f is a vector-valued function,
G is a d x m matrix-valued function giving the noise coefficients and
dW(t) is a vector of m independent Wiener increments.
This algorithm is suitable for Ito systems with an arbitrary noise
coefficient matrix G (i.e. the noise does not need to be scalar, diagonal,
or commutative). The algorithm has order 2.0 convergence for the
deterministic part alone and order 1.0 strong convergence for the complete
stochastic system.
Args:
f: A function f(y, t) returning an array of shape (d,)
Vector-valued function to define the deterministic part of the system
G: The d x m coefficient function G can be given in two different ways:
You can provide a single function G(y, t) that returns an array of
shape (d, m). In this case the entire matrix G() will be evaluated
2m+1 times at each time step so complexity grows quadratically with m.
Alternatively you can provide a list of m functions g(y, t) each
defining one column of G (each returning an array of shape (d,).
In this case each g will be evaluated 3 times at each time step so
complexity grows linearly with m. If your system has large m and
G involves complicated functions, consider using this way.
y0: array of shape (d,) giving the initial state vector y(t==0)
tspan (array): The sequence of time points for which to solve for y.
These must be equally spaced, e.g. np.arange(0,10,0.005)
tspan[0] is the intial time corresponding to the initial state y0.
Imethod (callable, optional): which function to use to simulate repeated
Ito integrals. Here you can choose either sdeint.Ikpw (the default) or
sdeint.Iwik (which is more accurate but uses a lot of memory in the
current implementation).
dW: optional array of shape (len(tspan)-1, d).
I: optional array of shape (len(tspan)-1, m, m).
These optional arguments dW and I are for advanced use, if you want to
use a specific realization of the d independent Wiener processes and
their multiple integrals at each time step. If not provided, suitable
values will be generated randomly.
Returns:
y: array, with shape (len(tspan), len(y0))
With the initial value y0 in the first row
Raises:
SDEValueError
See also:
A. Roessler (2010) Runge-Kutta Methods for the Strong Approximation of
Solutions of Stochastic Differential Equations
"""
return _Roessler2010_SRK2(f, G, y0, tspan, Imethod, dW, I)
def stratSRS2(f, G, y0, tspan, Jmethod=Jkpw, dW=None, J=None):
"""Use the Roessler2010 order 1.0 strong Stochastic Runge-Kutta algorithm
SRS2 to integrate a Stratonovich equation dy = f(y,t)dt + G(y,t)\circ dW(t)
where y is d-dimensional vector variable, f is a vector-valued function,
G is a d x m matrix-valued function giving the noise coefficients and
dW(t) is a vector of m independent Wiener increments.
This algorithm is suitable for Stratonovich systems with an arbitrary noise
coefficient matrix G (i.e. the noise does not need to be scalar, diagonal,
or commutative). The algorithm has order 2.0 convergence for the
deterministic part alone and order 1.0 strong convergence for the complete
stochastic system.
Args:
f: A function f(y, t) returning an array of shape (d,)
Vector-valued function to define the deterministic part of the system
G: The d x m coefficient function G can be given in two different ways:
You can provide a single function G(y, t) that returns an array of
shape (d, m). In this case the entire matrix G() will be evaluated
2m+1 times at each time step so complexity grows quadratically with m.
Alternatively you can provide a list of m functions g(y, t) each
defining one column of G (each returning an array of shape (d,).
In this case each g will be evaluated 3 times at each time step so
complexity grows linearly with m. If your system has large m and
G involves complicated functions, consider using this way.
y0: array of shape (d,) giving the initial state vector y(t==0)
tspan (array): The sequence of time points for which to solve for y.
These must be equally spaced, e.g. np.arange(0,10,0.005)
tspan[0] is the intial time corresponding to the initial state y0.
Jmethod (callable, optional): which function to use to simulate repeated
Stratonovich integrals. Here you can choose either sdeint.Jkpw (the
default) or sdeint.Jwik (which is more accurate but uses a lot of
memory in the current implementation).
dW: optional array of shape (len(tspan)-1, d).
J: optional array of shape (len(tspan)-1, m, m).
These optional arguments dW and J are for advanced use, if you want to
use a specific realization of the d independent Wiener processes and
their multiple integrals at each time step. If not provided, suitable
values will be generated randomly.
Returns:
y: array, with shape (len(tspan), len(y0))
With the initial value y0 in the first row
Raises:
SDEValueError
See also:
A. Roessler (2010) Runge-Kutta Methods for the Strong Approximation of
Solutions of Stochastic Differential Equations
"""
return _Roessler2010_SRK2(f, G, y0, tspan, Jmethod, dW, J)
def _Roessler2010_SRK2(f, G, y0, tspan, IJmethod, dW=None, IJ=None):
"""Implements the Roessler2010 order 1.0 strong Stochastic Runge-Kutta
algorithms SRI2 (for Ito equations) and SRS2 (for Stratonovich equations).
Algorithms SRI2 and SRS2 are almost identical and have the same extended
Butcher tableaus. The difference is that Ito repeateded integrals I_ij are
replaced by Stratonovich repeated integrals J_ij when integrating a
Stratonovich equation (Theorem 6.2 in Roessler2010).
Args:
f: A function f(y, t) returning an array of shape (d,)
G: Either a function G(y, t) that returns an array of shape (d, m),
or a list of m functions g(y, t) each returning an array shape (d,).
y0: array of shape (d,) giving the initial state
tspan (array): Sequence of equally spaced time points
IJmethod (callable): which function to use to generate repeated
integrals. N.B. for an Ito equation, must use an Ito version here
(either Ikpw or Iwik). For a Stratonovich equation, must use a
Stratonovich version here (Jkpw or Jwik).
dW: optional array of shape (len(tspan)-1, d).
IJ: optional array of shape (len(tspan)-1, m, m).
Optional arguments dW and IJ are for advanced use, if you want to
use a specific realization of the d independent Wiener processes and
their multiple integrals at each time step. If not provided, suitable
values will be generated randomly.
Returns:
y: array, with shape (len(tspan), len(y0))
Raises:
SDEValueError
See also:
A. Roessler (2010) Runge-Kutta Methods for the Strong Approximation of
Solutions of Stochastic Differential Equations
"""
(d, m, f, G, y0, tspan, dW, IJ) = _check_args(f, G, y0, tspan, dW, IJ)
have_separate_g = (not callable(G)) # if G is given as m separate functions
N = len(tspan)
h = (tspan[N-1] - tspan[0])/(N - 1) # assuming equal time steps
if dW is None:
# pre-generate Wiener increments (for m independent Wiener processes):
dW = deltaW(N - 1, m, h) # shape (N, m)
if IJ is None:
# pre-generate repeated stochastic integrals for each time step.
# Must give I_ij for the Ito case or J_ij for the Stratonovich case:
__, I = IJmethod(dW, h) # shape (N, m, m)
else:
I = IJ
# allocate space for result
y = np.zeros((N, d), dtype=type(y0[0]))
y[0] = y0;
Gn = np.zeros((d, m), dtype=y.dtype)
for n in range(0, N-1):
tn = tspan[n]
tn1 = tspan[n+1]
h = tn1 - tn
sqrth = np.sqrt(h)
Yn = y[n] # shape (d,)
Ik = dW[n,:] # shape (m,)
Iij = I[n,:,:] # shape (m, m)
fnh = f(Yn, tn)*h # shape (d,)
if have_separate_g:
for k in range(0, m):
Gn[:,k] = G[k](Yn, tn)
else:
Gn = G(Yn, tn)
sum1 = np.dot(Gn, Iij)/sqrth # shape (d, m)
H20 = Yn + fnh # shape (d,)
H20b = np.reshape(H20, (d, 1))
H2 = H20b + sum1 # shape (d, m)
H30 = Yn
H3 = H20b - sum1
fn1h = f(H20, tn1)*h
Yn1 = Yn + 0.5*(fnh + fn1h) + np.dot(Gn, Ik)
if have_separate_g:
for k in range(0, m):
Yn1 += 0.5*sqrth*(G[k](H2[:,k], tn1) - G[k](H3[:,k], tn1))
else:
for k in range(0, m):
Yn1 += 0.5*sqrth*(G(H2[:,k], tn1)[:,k] - G(H3[:,k], tn1)[:,k])
y[n+1] = Yn1
return y
def stratKP2iS(f, G, y0, tspan, Jmethod=Jkpw, gam=None, al1=None, al2=None,
rtol=1e-4, dW=None, J=None):
"""Use the Kloeden and Platen two-step implicit order 1.0 strong algorithm
to integrate a Stratonovich equation dy = f(y,t)dt + G(y,t)\circ dW(t)
This semi-implicit algorithm may be useful for stiff systems. The noise
does not need to be scalar, diagonal, or commutative.
This algorithm is defined in Kloeden and Platen (1999) section 12.4,
equations (4.5) and (4.7). Here implementing that scheme with default
parameters \gamma_k = \alpha_{1,k} = \alpha_{2,k} = 0.5 for k=1..d using
MINPACK HYBRD algorithm to solve the implicit vector equation at each step.
Args:
f: A function f(y, t) returning an array of shape (d,) to define the
deterministic part of the system
G: A function G(y, t) returning an array of shape (d, m) to define the
noise coefficients of the system
y0: array of shape (d,) giving the initial state
tspan (array): Sequence of equally spaced time points
Jmethod (callable, optional): which function to use to simulate repeated
Stratonovich integrals. Here you can choose either sdeint.Jkpw (the
default) or sdeint.Jwik (which is more accurate but uses a lot of
memory in the current implementation).
gam, al1, al2 (optional arrays of shape (d,)): These can configure free
parameters \gamma_k, \alpha_{1,k}, \alpha_{2,k} of the algorithm.
You can omit these, then the default values 0.5 will be used.
rtol (float, optional): Relative error tolerance. The default is 1e-4.
This is the relative tolerance used when solving the implicit equation
for Y_{n+1} at each step. It does not mean that the overall sample path
approximation has this relative precision.
dW: optional array of shape (len(tspan)-1, d).
J: optional array of shape (len(tspan)-1, m, m).
These optional arguments dW and J are for advanced use, if you want to
use a specific realization of the d independent Wiener processes and
their multiple integrals at each time step. If not provided, suitable
values will be generated randomly.
Returns:
y: array, with shape (len(tspan), len(y0))
Raises:
SDEValueError, RuntimeError
See also:
P. Kloeden and E. Platen (1999) Numerical Solution of Stochastic
Differential Equations, revised and updated 3rd printing.
"""
try:
from scipy.optimize import fsolve
except ImportError:
raise Error('stratKP2iS() requires package ``scipy`` to be installed.')
(d, m, f, G, y0, tspan, dW, J) = _check_args(f, G, y0, tspan, dW, J)
if not callable(G):
raise SDEValueError('G should be a function returning a d x m matrix.')
if np.iscomplexobj(y0):
raise SDEValueError("stratKP2iS() can't yet handle complex variables.")
if gam is None:
gam = np.ones((d,))*0.5 # Default level of implicitness \gamma_k = 0.5
if al1 is None:
al1 = np.ones((d,))*0.5 # Default \alpha_{1,k} = 0.5
if al2 is None:
al2 = np.ones((d,))*0.5 # Default \alpha_{2,k} = 0.5
N = len(tspan)
h = (tspan[N-1] - tspan[0])/(N - 1) # assuming equal time steps
if dW is None:
# pre-generate Wiener increments (for m independent Wiener processes):
dW = deltaW(N - 1, m, h) # shape (N, m)
if J is None:
# pre-generate repeated Stratonovich integrals for each time step
__, J = Jmethod(dW, h) # shape (N, m, m)
# allocate space for result
y = np.zeros((N, d), dtype=type(y0[0]))
def _imp(Ynp1, Yn, Ynm1, Vn, Vnm1, tnp1, tn, tnm1, fn, fnm1):
"""At each step we will solve _imp(Ynp1, ...) == 0 for Ynp1.
The meaning of these arguments is: Y_{n+1}, Y_n, Y_{n-1}, V_n, V_{n-1},
t_{n+1}, t_n, t_{n-1}, f(Y_n, t_n), f(Y_{n-1}, t_{n-1})."""
return ((1 - gam)*Yn + gam*Ynm1 + (al2*f(Ynp1, tnp1) +
(gam*al1 + (1 - al2))*fn + gam*(1 - al1)*fnm1)*h + Vn +
gam*Vnm1 - Ynp1)
fn = None
Vn = None
y[0] = y0;
for n in range(0, N-1):
tn = tspan[n]
tnp1 = tspan[n+1]
h = tnp1 - tn
sqrth = np.sqrt(h)
Yn = y[n] # shape (d,)
Jk = dW[n,:] # shape (m,)
Jij = J[n,:,:] # shape (m, m)
fnm1 = fn
fn = f(Yn, tn)
Gn = G(Yn, tn)
Ybar = (Yn + fn*h).reshape((d, 1)) + Gn*sqrth # shape (d, m)
sum1 = np.zeros((d,))
for j1 in range(0, m):
sum1 += np.dot(G(Ybar[:,j1], tn) - Gn, Jij[j1,:])
Vnm1 = Vn
Vn = np.dot(Gn, Jk) + sum1/sqrth
if n == 0:
# First step uses Kloeden&Platen explicit order 1.0 strong scheme:
y[n+1] = Yn + fn*h + Vn
continue
tnm1 = tspan[n-1]
Ynm1 = y[n-1] # shape (d,)
# now solve _imp(Ynp1, ...) == 0 for Ynp1, near to Yn
args = (Yn, Ynm1, Vn, Vnm1, tnp1, tn, tnm1, fn, fnm1)
(Ynp1, __, status, msg) = fsolve(_imp, Yn, args=args, xtol=rtol,
full_output=True)
if status == 1:
y[n+1] = Ynp1
else:
m = """At time t_n = %g Failed to solve for Y_{n+1} with args %s.
Reason: %s""" % (tn, args, msg)
raise RuntimeError(m)
return y
|
mattja/sdeint | sdeint/integrate.py | itoEuler | python | def itoEuler(f, G, y0, tspan, dW=None):
(d, m, f, G, y0, tspan, dW, __) = _check_args(f, G, y0, tspan, dW, None)
N = len(tspan)
h = (tspan[N-1] - tspan[0])/(N - 1)
# allocate space for result
y = np.zeros((N, d), dtype=type(y0[0]))
if dW is None:
# pre-generate Wiener increments (for m independent Wiener processes):
dW = deltaW(N - 1, m, h)
y[0] = y0;
for n in range(0, N-1):
tn = tspan[n]
yn = y[n]
dWn = dW[n,:]
y[n+1] = yn + f(yn, tn)*h + G(yn, tn).dot(dWn)
return y | Use the Euler-Maruyama algorithm to integrate the Ito equation
dy = f(y,t)dt + G(y,t) dW(t)
where y is the d-dimensional state vector, f is a vector-valued function,
G is an d x m matrix-valued function giving the noise coefficients and
dW(t) = (dW_1, dW_2, ... dW_m) is a vector of independent Wiener increments
Args:
f: callable(y, t) returning (d,) array
Vector-valued function to define the deterministic part of the system
G: callable(y, t) returning (d,m) array
Matrix-valued function to define the noise coefficients of the system
y0: array of shape (d,) giving the initial state vector y(t==0)
tspan (array): The sequence of time points for which to solve for y.
These must be equally spaced, e.g. np.arange(0,10,0.005)
tspan[0] is the intial time corresponding to the initial state y0.
dW: optional array of shape (len(tspan)-1, d). This is for advanced use,
if you want to use a specific realization of the d independent Wiener
processes. If not provided Wiener increments will be generated randomly
Returns:
y: array, with shape (len(tspan), len(y0))
With the initial value y0 in the first row
Raises:
SDEValueError
See also:
G. Maruyama (1955) Continuous Markov processes and stochastic equations
Kloeden and Platen (1999) Numerical Solution of Differential Equations | train | https://github.com/mattja/sdeint/blob/7cf807cdf97b3bb39d29e1c2dc834b519499b601/sdeint/integrate.py#L180-L226 | [
"def deltaW(N, m, h):\n \"\"\"Generate sequence of Wiener increments for m independent Wiener\n processes W_j(t) j=0..m-1 for each of N time intervals of length h. \n\n Returns:\n dW (array of shape (N, m)): The [n, j] element has the value\n W_j((n+1)*h) - W_j(n*h) \n \"\"\"\n return np... | # Copyright 2015 Matthew J. Aburn
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version. See <http://www.gnu.org/licenses/>.
"""Numerical integration algorithms for Ito and Stratonovich stochastic
ordinary differential equations.
Usage:
itoint(f, G, y0, tspan) for Ito equation dy = f dt + G dW
stratint(f, G, y0, tspan) for Stratonovich equation dy = f dt + G \circ dW
y0 is the initial value
tspan is an array of time values (currently these must be equally spaced)
function f is the deterministic part of the system (scalar or dx1 vector)
function G is the stochastic part of the system (scalar or d x m matrix)
sdeint will choose an algorithm for you. Or you can choose one explicitly:
itoEuler: the Euler-Maruyama algorithm for Ito equations.
stratHeun: the Stratonovich Heun algorithm for Stratonovich equations.
itoSRI2: the Roessler2010 order 1.0 strong Stochastic Runge-Kutta
algorithm SRI2 for Ito equations.
stratSRS2: the Roessler2010 order 1.0 strong Stochastic Runge-Kutta
algorithm SRS2 for Stratonovich equations.
stratKP2iS: the Kloeden and Platen two-step implicit order 1.0 strong algorithm
for Stratonovich equations.
"""
from __future__ import absolute_import
from .wiener import deltaW, Ikpw, Iwik, Jkpw, Jwik
import numpy as np
import numbers
class Error(Exception):
pass
class SDEValueError(Error):
"""Thrown if integration arguments fail some basic sanity checks"""
pass
def _check_args(f, G, y0, tspan, dW=None, IJ=None):
"""Do some validation common to all algorithms. Find dimension d and number
of Wiener processes m.
"""
if not np.isclose(min(np.diff(tspan)), max(np.diff(tspan))):
raise SDEValueError('Currently time steps must be equally spaced.')
# Be flexible to allow scalar equations. convert them to a 1D vector system
if isinstance(y0, numbers.Number):
if isinstance(y0, numbers.Integral):
numtype = np.float64
else:
numtype = type(y0)
y0_orig = y0
y0 = np.array([y0], dtype=numtype)
def make_vector_fn(fn):
def newfn(y, t):
return np.array([fn(y[0], t)], dtype=numtype)
newfn.__name__ = fn.__name__
return newfn
def make_matrix_fn(fn):
def newfn(y, t):
return np.array([[fn(y[0], t)]], dtype=numtype)
newfn.__name__ = fn.__name__
return newfn
if isinstance(f(y0_orig, tspan[0]), numbers.Number):
f = make_vector_fn(f)
if isinstance(G(y0_orig, tspan[0]), numbers.Number):
G = make_matrix_fn(G)
# determine dimension d of the system
d = len(y0)
if len(f(y0, tspan[0])) != d:
raise SDEValueError('y0 and f have incompatible shapes.')
message = """y0 has length %d. So G must either be a single function
returning a matrix of shape (%d, m), or else a list of m separate
functions each returning a column of G, with shape (%d,)""" % (
d, d, d)
if callable(G):
# then G must be a function returning a d x m matrix
Gtest = G(y0, tspan[0])
if Gtest.ndim != 2 or Gtest.shape[0] != d:
raise SDEValueError(message)
# determine number of independent Wiener processes m
m = Gtest.shape[1]
else:
# G should be a list of m functions g_i giving columns of G
G = tuple(G)
m = len(G)
Gtest = np.zeros((d, m))
for k in range(0, m):
if not callable(G[k]):
raise SDEValueError(message)
Gtestk = G[k](y0, tspan[0])
if np.shape(Gtestk) != (d,):
raise SDEValueError(message)
Gtest[:,k] = Gtestk
message = """From function G, it seems m==%d. If present, the optional
parameter dW must be an array of shape (len(tspan)-1, m) giving
m independent Wiener increments for each time interval.""" % m
if dW is not None:
if not hasattr(dW, 'shape') or dW.shape != (len(tspan) - 1, m):
raise SDEValueError(message)
message = """From function G, it seems m==%d. If present, the optional
parameter I or J must be an array of shape (len(tspan)-1, m, m)
giving an m x m matrix of repeated integral values for each
time interval.""" % m
if IJ is not None:
if not hasattr(IJ, 'shape') or IJ.shape != (len(tspan) - 1, m, m):
raise SDEValueError(message)
return (d, m, f, G, y0, tspan, dW, IJ)
def itoint(f, G, y0, tspan):
""" Numerically integrate the Ito equation dy = f(y,t)dt + G(y,t)dW
where y is the d-dimensional state vector, f is a vector-valued function,
G is an d x m matrix-valued function giving the noise coefficients and
dW(t) = (dW_1, dW_2, ... dW_m) is a vector of independent Wiener increments
Args:
f: callable(y,t) returning a numpy array of shape (d,)
Vector-valued function to define the deterministic part of the system
G: callable(y,t) returning a numpy array of shape (d,m)
Matrix-valued function to define the noise coefficients of the system
y0: array of shape (d,) giving the initial state vector y(t==0)
tspan (array): The sequence of time points for which to solve for y.
These must be equally spaced, e.g. np.arange(0,10,0.005)
tspan[0] is the intial time corresponding to the initial state y0.
Returns:
y: array, with shape (len(tspan), len(y0))
With the initial value y0 in the first row
Raises:
SDEValueError
"""
# In future versions we can automatically choose here the most suitable
# Ito algorithm based on properties of the system and noise.
(d, m, f, G, y0, tspan, __, __) = _check_args(f, G, y0, tspan, None, None)
chosenAlgorithm = itoSRI2
return chosenAlgorithm(f, G, y0, tspan)
def stratint(f, G, y0, tspan):
""" Numerically integrate Stratonovich equation dy = f(y,t)dt + G(y,t).dW
where y is the d-dimensional state vector, f is a vector-valued function,
G is an d x m matrix-valued function giving the noise coefficients and
dW(t) = (dW_1, dW_2, ... dW_m) is a vector of independent Wiener increments
Args:
f: callable(y,t) returning a numpy array of shape (d,)
Vector-valued function to define the deterministic part of the system
G: callable(y,t) returning a numpy array of shape (d,m)
Matrix-valued function to define the noise coefficients of the system
y0: array of shape (d,) giving the initial state vector y(t==0)
tspan (array): The sequence of time points for which to solve for y.
These must be equally spaced, e.g. np.arange(0,10,0.005)
tspan[0] is the intial time corresponding to the initial state y0.
Returns:
y: array, with shape (len(tspan), len(y0))
With the initial value y0 in the first row
Raises:
SDEValueError
"""
# In future versions we can automatically choose here the most suitable
# Stratonovich algorithm based on properties of the system and noise.
(d, m, f, G, y0, tspan, __, __) = _check_args(f, G, y0, tspan, None, None)
chosenAlgorithm = stratSRS2
return chosenAlgorithm(f, G, y0, tspan)
def stratHeun(f, G, y0, tspan, dW=None):
"""Use the Stratonovich Heun algorithm to integrate Stratonovich equation
dy = f(y,t)dt + G(y,t) \circ dW(t)
where y is the d-dimensional state vector, f is a vector-valued function,
G is an d x m matrix-valued function giving the noise coefficients and
dW(t) = (dW_1, dW_2, ... dW_m) is a vector of independent Wiener increments
Args:
f: callable(y, t) returning (d,) array
Vector-valued function to define the deterministic part of the system
G: callable(y, t) returning (d,m) array
Matrix-valued function to define the noise coefficients of the system
y0: array of shape (d,) giving the initial state vector y(t==0)
tspan (array): The sequence of time points for which to solve for y.
These must be equally spaced, e.g. np.arange(0,10,0.005)
tspan[0] is the intial time corresponding to the initial state y0.
dW: optional array of shape (len(tspan)-1, d). This is for advanced use,
if you want to use a specific realization of the d independent Wiener
processes. If not provided Wiener increments will be generated randomly
Returns:
y: array, with shape (len(tspan), len(y0))
With the initial value y0 in the first row
Raises:
SDEValueError
See also:
W. Rumelin (1982) Numerical Treatment of Stochastic Differential
Equations
R. Mannella (2002) Integration of Stochastic Differential Equations
on a Computer
K. Burrage, P. M. Burrage and T. Tian (2004) Numerical methods for strong
solutions of stochastic differential equations: an overview
"""
(d, m, f, G, y0, tspan, dW, __) = _check_args(f, G, y0, tspan, dW, None)
N = len(tspan)
h = (tspan[N-1] - tspan[0])/(N - 1)
# allocate space for result
y = np.zeros((N, d), dtype=type(y0[0]))
if dW is None:
# pre-generate Wiener increments (for m independent Wiener processes):
dW = deltaW(N - 1, m, h)
y[0] = y0;
for n in range(0, N-1):
tn = tspan[n]
tnp1 = tspan[n+1]
yn = y[n]
dWn = dW[n,:]
fn = f(yn, tn)
Gn = G(yn, tn)
ybar = yn + fn*h + Gn.dot(dWn)
fnbar = f(ybar, tnp1)
Gnbar = G(ybar, tnp1)
y[n+1] = yn + 0.5*(fn + fnbar)*h + 0.5*(Gn + Gnbar).dot(dWn)
return y
def itoSRI2(f, G, y0, tspan, Imethod=Ikpw, dW=None, I=None):
"""Use the Roessler2010 order 1.0 strong Stochastic Runge-Kutta algorithm
SRI2 to integrate an Ito equation dy = f(y,t)dt + G(y,t)dW(t)
where y is d-dimensional vector variable, f is a vector-valued function,
G is a d x m matrix-valued function giving the noise coefficients and
dW(t) is a vector of m independent Wiener increments.
This algorithm is suitable for Ito systems with an arbitrary noise
coefficient matrix G (i.e. the noise does not need to be scalar, diagonal,
or commutative). The algorithm has order 2.0 convergence for the
deterministic part alone and order 1.0 strong convergence for the complete
stochastic system.
Args:
f: A function f(y, t) returning an array of shape (d,)
Vector-valued function to define the deterministic part of the system
G: The d x m coefficient function G can be given in two different ways:
You can provide a single function G(y, t) that returns an array of
shape (d, m). In this case the entire matrix G() will be evaluated
2m+1 times at each time step so complexity grows quadratically with m.
Alternatively you can provide a list of m functions g(y, t) each
defining one column of G (each returning an array of shape (d,).
In this case each g will be evaluated 3 times at each time step so
complexity grows linearly with m. If your system has large m and
G involves complicated functions, consider using this way.
y0: array of shape (d,) giving the initial state vector y(t==0)
tspan (array): The sequence of time points for which to solve for y.
These must be equally spaced, e.g. np.arange(0,10,0.005)
tspan[0] is the intial time corresponding to the initial state y0.
Imethod (callable, optional): which function to use to simulate repeated
Ito integrals. Here you can choose either sdeint.Ikpw (the default) or
sdeint.Iwik (which is more accurate but uses a lot of memory in the
current implementation).
dW: optional array of shape (len(tspan)-1, d).
I: optional array of shape (len(tspan)-1, m, m).
These optional arguments dW and I are for advanced use, if you want to
use a specific realization of the d independent Wiener processes and
their multiple integrals at each time step. If not provided, suitable
values will be generated randomly.
Returns:
y: array, with shape (len(tspan), len(y0))
With the initial value y0 in the first row
Raises:
SDEValueError
See also:
A. Roessler (2010) Runge-Kutta Methods for the Strong Approximation of
Solutions of Stochastic Differential Equations
"""
return _Roessler2010_SRK2(f, G, y0, tspan, Imethod, dW, I)
def stratSRS2(f, G, y0, tspan, Jmethod=Jkpw, dW=None, J=None):
"""Use the Roessler2010 order 1.0 strong Stochastic Runge-Kutta algorithm
SRS2 to integrate a Stratonovich equation dy = f(y,t)dt + G(y,t)\circ dW(t)
where y is d-dimensional vector variable, f is a vector-valued function,
G is a d x m matrix-valued function giving the noise coefficients and
dW(t) is a vector of m independent Wiener increments.
This algorithm is suitable for Stratonovich systems with an arbitrary noise
coefficient matrix G (i.e. the noise does not need to be scalar, diagonal,
or commutative). The algorithm has order 2.0 convergence for the
deterministic part alone and order 1.0 strong convergence for the complete
stochastic system.
Args:
f: A function f(y, t) returning an array of shape (d,)
Vector-valued function to define the deterministic part of the system
G: The d x m coefficient function G can be given in two different ways:
You can provide a single function G(y, t) that returns an array of
shape (d, m). In this case the entire matrix G() will be evaluated
2m+1 times at each time step so complexity grows quadratically with m.
Alternatively you can provide a list of m functions g(y, t) each
defining one column of G (each returning an array of shape (d,).
In this case each g will be evaluated 3 times at each time step so
complexity grows linearly with m. If your system has large m and
G involves complicated functions, consider using this way.
y0: array of shape (d,) giving the initial state vector y(t==0)
tspan (array): The sequence of time points for which to solve for y.
These must be equally spaced, e.g. np.arange(0,10,0.005)
tspan[0] is the intial time corresponding to the initial state y0.
Jmethod (callable, optional): which function to use to simulate repeated
Stratonovich integrals. Here you can choose either sdeint.Jkpw (the
default) or sdeint.Jwik (which is more accurate but uses a lot of
memory in the current implementation).
dW: optional array of shape (len(tspan)-1, d).
J: optional array of shape (len(tspan)-1, m, m).
These optional arguments dW and J are for advanced use, if you want to
use a specific realization of the d independent Wiener processes and
their multiple integrals at each time step. If not provided, suitable
values will be generated randomly.
Returns:
y: array, with shape (len(tspan), len(y0))
With the initial value y0 in the first row
Raises:
SDEValueError
See also:
A. Roessler (2010) Runge-Kutta Methods for the Strong Approximation of
Solutions of Stochastic Differential Equations
"""
return _Roessler2010_SRK2(f, G, y0, tspan, Jmethod, dW, J)
def _Roessler2010_SRK2(f, G, y0, tspan, IJmethod, dW=None, IJ=None):
"""Implements the Roessler2010 order 1.0 strong Stochastic Runge-Kutta
algorithms SRI2 (for Ito equations) and SRS2 (for Stratonovich equations).
Algorithms SRI2 and SRS2 are almost identical and have the same extended
Butcher tableaus. The difference is that Ito repeateded integrals I_ij are
replaced by Stratonovich repeated integrals J_ij when integrating a
Stratonovich equation (Theorem 6.2 in Roessler2010).
Args:
f: A function f(y, t) returning an array of shape (d,)
G: Either a function G(y, t) that returns an array of shape (d, m),
or a list of m functions g(y, t) each returning an array shape (d,).
y0: array of shape (d,) giving the initial state
tspan (array): Sequence of equally spaced time points
IJmethod (callable): which function to use to generate repeated
integrals. N.B. for an Ito equation, must use an Ito version here
(either Ikpw or Iwik). For a Stratonovich equation, must use a
Stratonovich version here (Jkpw or Jwik).
dW: optional array of shape (len(tspan)-1, d).
IJ: optional array of shape (len(tspan)-1, m, m).
Optional arguments dW and IJ are for advanced use, if you want to
use a specific realization of the d independent Wiener processes and
their multiple integrals at each time step. If not provided, suitable
values will be generated randomly.
Returns:
y: array, with shape (len(tspan), len(y0))
Raises:
SDEValueError
See also:
A. Roessler (2010) Runge-Kutta Methods for the Strong Approximation of
Solutions of Stochastic Differential Equations
"""
(d, m, f, G, y0, tspan, dW, IJ) = _check_args(f, G, y0, tspan, dW, IJ)
have_separate_g = (not callable(G)) # if G is given as m separate functions
N = len(tspan)
h = (tspan[N-1] - tspan[0])/(N - 1) # assuming equal time steps
if dW is None:
# pre-generate Wiener increments (for m independent Wiener processes):
dW = deltaW(N - 1, m, h) # shape (N, m)
if IJ is None:
# pre-generate repeated stochastic integrals for each time step.
# Must give I_ij for the Ito case or J_ij for the Stratonovich case:
__, I = IJmethod(dW, h) # shape (N, m, m)
else:
I = IJ
# allocate space for result
y = np.zeros((N, d), dtype=type(y0[0]))
y[0] = y0;
Gn = np.zeros((d, m), dtype=y.dtype)
for n in range(0, N-1):
tn = tspan[n]
tn1 = tspan[n+1]
h = tn1 - tn
sqrth = np.sqrt(h)
Yn = y[n] # shape (d,)
Ik = dW[n,:] # shape (m,)
Iij = I[n,:,:] # shape (m, m)
fnh = f(Yn, tn)*h # shape (d,)
if have_separate_g:
for k in range(0, m):
Gn[:,k] = G[k](Yn, tn)
else:
Gn = G(Yn, tn)
sum1 = np.dot(Gn, Iij)/sqrth # shape (d, m)
H20 = Yn + fnh # shape (d,)
H20b = np.reshape(H20, (d, 1))
H2 = H20b + sum1 # shape (d, m)
H30 = Yn
H3 = H20b - sum1
fn1h = f(H20, tn1)*h
Yn1 = Yn + 0.5*(fnh + fn1h) + np.dot(Gn, Ik)
if have_separate_g:
for k in range(0, m):
Yn1 += 0.5*sqrth*(G[k](H2[:,k], tn1) - G[k](H3[:,k], tn1))
else:
for k in range(0, m):
Yn1 += 0.5*sqrth*(G(H2[:,k], tn1)[:,k] - G(H3[:,k], tn1)[:,k])
y[n+1] = Yn1
return y
def stratKP2iS(f, G, y0, tspan, Jmethod=Jkpw, gam=None, al1=None, al2=None,
rtol=1e-4, dW=None, J=None):
"""Use the Kloeden and Platen two-step implicit order 1.0 strong algorithm
to integrate a Stratonovich equation dy = f(y,t)dt + G(y,t)\circ dW(t)
This semi-implicit algorithm may be useful for stiff systems. The noise
does not need to be scalar, diagonal, or commutative.
This algorithm is defined in Kloeden and Platen (1999) section 12.4,
equations (4.5) and (4.7). Here implementing that scheme with default
parameters \gamma_k = \alpha_{1,k} = \alpha_{2,k} = 0.5 for k=1..d using
MINPACK HYBRD algorithm to solve the implicit vector equation at each step.
Args:
f: A function f(y, t) returning an array of shape (d,) to define the
deterministic part of the system
G: A function G(y, t) returning an array of shape (d, m) to define the
noise coefficients of the system
y0: array of shape (d,) giving the initial state
tspan (array): Sequence of equally spaced time points
Jmethod (callable, optional): which function to use to simulate repeated
Stratonovich integrals. Here you can choose either sdeint.Jkpw (the
default) or sdeint.Jwik (which is more accurate but uses a lot of
memory in the current implementation).
gam, al1, al2 (optional arrays of shape (d,)): These can configure free
parameters \gamma_k, \alpha_{1,k}, \alpha_{2,k} of the algorithm.
You can omit these, then the default values 0.5 will be used.
rtol (float, optional): Relative error tolerance. The default is 1e-4.
This is the relative tolerance used when solving the implicit equation
for Y_{n+1} at each step. It does not mean that the overall sample path
approximation has this relative precision.
dW: optional array of shape (len(tspan)-1, d).
J: optional array of shape (len(tspan)-1, m, m).
These optional arguments dW and J are for advanced use, if you want to
use a specific realization of the d independent Wiener processes and
their multiple integrals at each time step. If not provided, suitable
values will be generated randomly.
Returns:
y: array, with shape (len(tspan), len(y0))
Raises:
SDEValueError, RuntimeError
See also:
P. Kloeden and E. Platen (1999) Numerical Solution of Stochastic
Differential Equations, revised and updated 3rd printing.
"""
try:
from scipy.optimize import fsolve
except ImportError:
raise Error('stratKP2iS() requires package ``scipy`` to be installed.')
(d, m, f, G, y0, tspan, dW, J) = _check_args(f, G, y0, tspan, dW, J)
if not callable(G):
raise SDEValueError('G should be a function returning a d x m matrix.')
if np.iscomplexobj(y0):
raise SDEValueError("stratKP2iS() can't yet handle complex variables.")
if gam is None:
gam = np.ones((d,))*0.5 # Default level of implicitness \gamma_k = 0.5
if al1 is None:
al1 = np.ones((d,))*0.5 # Default \alpha_{1,k} = 0.5
if al2 is None:
al2 = np.ones((d,))*0.5 # Default \alpha_{2,k} = 0.5
N = len(tspan)
h = (tspan[N-1] - tspan[0])/(N - 1) # assuming equal time steps
if dW is None:
# pre-generate Wiener increments (for m independent Wiener processes):
dW = deltaW(N - 1, m, h) # shape (N, m)
if J is None:
# pre-generate repeated Stratonovich integrals for each time step
__, J = Jmethod(dW, h) # shape (N, m, m)
# allocate space for result
y = np.zeros((N, d), dtype=type(y0[0]))
def _imp(Ynp1, Yn, Ynm1, Vn, Vnm1, tnp1, tn, tnm1, fn, fnm1):
"""At each step we will solve _imp(Ynp1, ...) == 0 for Ynp1.
The meaning of these arguments is: Y_{n+1}, Y_n, Y_{n-1}, V_n, V_{n-1},
t_{n+1}, t_n, t_{n-1}, f(Y_n, t_n), f(Y_{n-1}, t_{n-1})."""
return ((1 - gam)*Yn + gam*Ynm1 + (al2*f(Ynp1, tnp1) +
(gam*al1 + (1 - al2))*fn + gam*(1 - al1)*fnm1)*h + Vn +
gam*Vnm1 - Ynp1)
fn = None
Vn = None
y[0] = y0;
for n in range(0, N-1):
tn = tspan[n]
tnp1 = tspan[n+1]
h = tnp1 - tn
sqrth = np.sqrt(h)
Yn = y[n] # shape (d,)
Jk = dW[n,:] # shape (m,)
Jij = J[n,:,:] # shape (m, m)
fnm1 = fn
fn = f(Yn, tn)
Gn = G(Yn, tn)
Ybar = (Yn + fn*h).reshape((d, 1)) + Gn*sqrth # shape (d, m)
sum1 = np.zeros((d,))
for j1 in range(0, m):
sum1 += np.dot(G(Ybar[:,j1], tn) - Gn, Jij[j1,:])
Vnm1 = Vn
Vn = np.dot(Gn, Jk) + sum1/sqrth
if n == 0:
# First step uses Kloeden&Platen explicit order 1.0 strong scheme:
y[n+1] = Yn + fn*h + Vn
continue
tnm1 = tspan[n-1]
Ynm1 = y[n-1] # shape (d,)
# now solve _imp(Ynp1, ...) == 0 for Ynp1, near to Yn
args = (Yn, Ynm1, Vn, Vnm1, tnp1, tn, tnm1, fn, fnm1)
(Ynp1, __, status, msg) = fsolve(_imp, Yn, args=args, xtol=rtol,
full_output=True)
if status == 1:
y[n+1] = Ynp1
else:
m = """At time t_n = %g Failed to solve for Y_{n+1} with args %s.
Reason: %s""" % (tn, args, msg)
raise RuntimeError(m)
return y
|
mattja/sdeint | sdeint/integrate.py | stratHeun | python | def stratHeun(f, G, y0, tspan, dW=None):
(d, m, f, G, y0, tspan, dW, __) = _check_args(f, G, y0, tspan, dW, None)
N = len(tspan)
h = (tspan[N-1] - tspan[0])/(N - 1)
# allocate space for result
y = np.zeros((N, d), dtype=type(y0[0]))
if dW is None:
# pre-generate Wiener increments (for m independent Wiener processes):
dW = deltaW(N - 1, m, h)
y[0] = y0;
for n in range(0, N-1):
tn = tspan[n]
tnp1 = tspan[n+1]
yn = y[n]
dWn = dW[n,:]
fn = f(yn, tn)
Gn = G(yn, tn)
ybar = yn + fn*h + Gn.dot(dWn)
fnbar = f(ybar, tnp1)
Gnbar = G(ybar, tnp1)
y[n+1] = yn + 0.5*(fn + fnbar)*h + 0.5*(Gn + Gnbar).dot(dWn)
return y | Use the Stratonovich Heun algorithm to integrate Stratonovich equation
dy = f(y,t)dt + G(y,t) \circ dW(t)
where y is the d-dimensional state vector, f is a vector-valued function,
G is an d x m matrix-valued function giving the noise coefficients and
dW(t) = (dW_1, dW_2, ... dW_m) is a vector of independent Wiener increments
Args:
f: callable(y, t) returning (d,) array
Vector-valued function to define the deterministic part of the system
G: callable(y, t) returning (d,m) array
Matrix-valued function to define the noise coefficients of the system
y0: array of shape (d,) giving the initial state vector y(t==0)
tspan (array): The sequence of time points for which to solve for y.
These must be equally spaced, e.g. np.arange(0,10,0.005)
tspan[0] is the intial time corresponding to the initial state y0.
dW: optional array of shape (len(tspan)-1, d). This is for advanced use,
if you want to use a specific realization of the d independent Wiener
processes. If not provided Wiener increments will be generated randomly
Returns:
y: array, with shape (len(tspan), len(y0))
With the initial value y0 in the first row
Raises:
SDEValueError
See also:
W. Rumelin (1982) Numerical Treatment of Stochastic Differential
Equations
R. Mannella (2002) Integration of Stochastic Differential Equations
on a Computer
K. Burrage, P. M. Burrage and T. Tian (2004) Numerical methods for strong
solutions of stochastic differential equations: an overview | train | https://github.com/mattja/sdeint/blob/7cf807cdf97b3bb39d29e1c2dc834b519499b601/sdeint/integrate.py#L229-L285 | [
"def deltaW(N, m, h):\n \"\"\"Generate sequence of Wiener increments for m independent Wiener\n processes W_j(t) j=0..m-1 for each of N time intervals of length h. \n\n Returns:\n dW (array of shape (N, m)): The [n, j] element has the value\n W_j((n+1)*h) - W_j(n*h) \n \"\"\"\n return np... | # Copyright 2015 Matthew J. Aburn
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version. See <http://www.gnu.org/licenses/>.
"""Numerical integration algorithms for Ito and Stratonovich stochastic
ordinary differential equations.
Usage:
itoint(f, G, y0, tspan) for Ito equation dy = f dt + G dW
stratint(f, G, y0, tspan) for Stratonovich equation dy = f dt + G \circ dW
y0 is the initial value
tspan is an array of time values (currently these must be equally spaced)
function f is the deterministic part of the system (scalar or dx1 vector)
function G is the stochastic part of the system (scalar or d x m matrix)
sdeint will choose an algorithm for you. Or you can choose one explicitly:
itoEuler: the Euler-Maruyama algorithm for Ito equations.
stratHeun: the Stratonovich Heun algorithm for Stratonovich equations.
itoSRI2: the Roessler2010 order 1.0 strong Stochastic Runge-Kutta
algorithm SRI2 for Ito equations.
stratSRS2: the Roessler2010 order 1.0 strong Stochastic Runge-Kutta
algorithm SRS2 for Stratonovich equations.
stratKP2iS: the Kloeden and Platen two-step implicit order 1.0 strong algorithm
for Stratonovich equations.
"""
from __future__ import absolute_import
from .wiener import deltaW, Ikpw, Iwik, Jkpw, Jwik
import numpy as np
import numbers
class Error(Exception):
pass
class SDEValueError(Error):
"""Thrown if integration arguments fail some basic sanity checks"""
pass
def _check_args(f, G, y0, tspan, dW=None, IJ=None):
"""Do some validation common to all algorithms. Find dimension d and number
of Wiener processes m.
"""
if not np.isclose(min(np.diff(tspan)), max(np.diff(tspan))):
raise SDEValueError('Currently time steps must be equally spaced.')
# Be flexible to allow scalar equations. convert them to a 1D vector system
if isinstance(y0, numbers.Number):
if isinstance(y0, numbers.Integral):
numtype = np.float64
else:
numtype = type(y0)
y0_orig = y0
y0 = np.array([y0], dtype=numtype)
def make_vector_fn(fn):
def newfn(y, t):
return np.array([fn(y[0], t)], dtype=numtype)
newfn.__name__ = fn.__name__
return newfn
def make_matrix_fn(fn):
def newfn(y, t):
return np.array([[fn(y[0], t)]], dtype=numtype)
newfn.__name__ = fn.__name__
return newfn
if isinstance(f(y0_orig, tspan[0]), numbers.Number):
f = make_vector_fn(f)
if isinstance(G(y0_orig, tspan[0]), numbers.Number):
G = make_matrix_fn(G)
# determine dimension d of the system
d = len(y0)
if len(f(y0, tspan[0])) != d:
raise SDEValueError('y0 and f have incompatible shapes.')
message = """y0 has length %d. So G must either be a single function
returning a matrix of shape (%d, m), or else a list of m separate
functions each returning a column of G, with shape (%d,)""" % (
d, d, d)
if callable(G):
# then G must be a function returning a d x m matrix
Gtest = G(y0, tspan[0])
if Gtest.ndim != 2 or Gtest.shape[0] != d:
raise SDEValueError(message)
# determine number of independent Wiener processes m
m = Gtest.shape[1]
else:
# G should be a list of m functions g_i giving columns of G
G = tuple(G)
m = len(G)
Gtest = np.zeros((d, m))
for k in range(0, m):
if not callable(G[k]):
raise SDEValueError(message)
Gtestk = G[k](y0, tspan[0])
if np.shape(Gtestk) != (d,):
raise SDEValueError(message)
Gtest[:,k] = Gtestk
message = """From function G, it seems m==%d. If present, the optional
parameter dW must be an array of shape (len(tspan)-1, m) giving
m independent Wiener increments for each time interval.""" % m
if dW is not None:
if not hasattr(dW, 'shape') or dW.shape != (len(tspan) - 1, m):
raise SDEValueError(message)
message = """From function G, it seems m==%d. If present, the optional
parameter I or J must be an array of shape (len(tspan)-1, m, m)
giving an m x m matrix of repeated integral values for each
time interval.""" % m
if IJ is not None:
if not hasattr(IJ, 'shape') or IJ.shape != (len(tspan) - 1, m, m):
raise SDEValueError(message)
return (d, m, f, G, y0, tspan, dW, IJ)
def itoint(f, G, y0, tspan):
""" Numerically integrate the Ito equation dy = f(y,t)dt + G(y,t)dW
where y is the d-dimensional state vector, f is a vector-valued function,
G is an d x m matrix-valued function giving the noise coefficients and
dW(t) = (dW_1, dW_2, ... dW_m) is a vector of independent Wiener increments
Args:
f: callable(y,t) returning a numpy array of shape (d,)
Vector-valued function to define the deterministic part of the system
G: callable(y,t) returning a numpy array of shape (d,m)
Matrix-valued function to define the noise coefficients of the system
y0: array of shape (d,) giving the initial state vector y(t==0)
tspan (array): The sequence of time points for which to solve for y.
These must be equally spaced, e.g. np.arange(0,10,0.005)
tspan[0] is the intial time corresponding to the initial state y0.
Returns:
y: array, with shape (len(tspan), len(y0))
With the initial value y0 in the first row
Raises:
SDEValueError
"""
# In future versions we can automatically choose here the most suitable
# Ito algorithm based on properties of the system and noise.
(d, m, f, G, y0, tspan, __, __) = _check_args(f, G, y0, tspan, None, None)
chosenAlgorithm = itoSRI2
return chosenAlgorithm(f, G, y0, tspan)
def stratint(f, G, y0, tspan):
""" Numerically integrate Stratonovich equation dy = f(y,t)dt + G(y,t).dW
where y is the d-dimensional state vector, f is a vector-valued function,
G is an d x m matrix-valued function giving the noise coefficients and
dW(t) = (dW_1, dW_2, ... dW_m) is a vector of independent Wiener increments
Args:
f: callable(y,t) returning a numpy array of shape (d,)
Vector-valued function to define the deterministic part of the system
G: callable(y,t) returning a numpy array of shape (d,m)
Matrix-valued function to define the noise coefficients of the system
y0: array of shape (d,) giving the initial state vector y(t==0)
tspan (array): The sequence of time points for which to solve for y.
These must be equally spaced, e.g. np.arange(0,10,0.005)
tspan[0] is the intial time corresponding to the initial state y0.
Returns:
y: array, with shape (len(tspan), len(y0))
With the initial value y0 in the first row
Raises:
SDEValueError
"""
# In future versions we can automatically choose here the most suitable
# Stratonovich algorithm based on properties of the system and noise.
(d, m, f, G, y0, tspan, __, __) = _check_args(f, G, y0, tspan, None, None)
chosenAlgorithm = stratSRS2
return chosenAlgorithm(f, G, y0, tspan)
def itoEuler(f, G, y0, tspan, dW=None):
"""Use the Euler-Maruyama algorithm to integrate the Ito equation
dy = f(y,t)dt + G(y,t) dW(t)
where y is the d-dimensional state vector, f is a vector-valued function,
G is an d x m matrix-valued function giving the noise coefficients and
dW(t) = (dW_1, dW_2, ... dW_m) is a vector of independent Wiener increments
Args:
f: callable(y, t) returning (d,) array
Vector-valued function to define the deterministic part of the system
G: callable(y, t) returning (d,m) array
Matrix-valued function to define the noise coefficients of the system
y0: array of shape (d,) giving the initial state vector y(t==0)
tspan (array): The sequence of time points for which to solve for y.
These must be equally spaced, e.g. np.arange(0,10,0.005)
tspan[0] is the intial time corresponding to the initial state y0.
dW: optional array of shape (len(tspan)-1, d). This is for advanced use,
if you want to use a specific realization of the d independent Wiener
processes. If not provided Wiener increments will be generated randomly
Returns:
y: array, with shape (len(tspan), len(y0))
With the initial value y0 in the first row
Raises:
SDEValueError
See also:
G. Maruyama (1955) Continuous Markov processes and stochastic equations
Kloeden and Platen (1999) Numerical Solution of Differential Equations
"""
(d, m, f, G, y0, tspan, dW, __) = _check_args(f, G, y0, tspan, dW, None)
N = len(tspan)
h = (tspan[N-1] - tspan[0])/(N - 1)
# allocate space for result
y = np.zeros((N, d), dtype=type(y0[0]))
if dW is None:
# pre-generate Wiener increments (for m independent Wiener processes):
dW = deltaW(N - 1, m, h)
y[0] = y0;
for n in range(0, N-1):
tn = tspan[n]
yn = y[n]
dWn = dW[n,:]
y[n+1] = yn + f(yn, tn)*h + G(yn, tn).dot(dWn)
return y
def itoSRI2(f, G, y0, tspan, Imethod=Ikpw, dW=None, I=None):
"""Use the Roessler2010 order 1.0 strong Stochastic Runge-Kutta algorithm
SRI2 to integrate an Ito equation dy = f(y,t)dt + G(y,t)dW(t)
where y is d-dimensional vector variable, f is a vector-valued function,
G is a d x m matrix-valued function giving the noise coefficients and
dW(t) is a vector of m independent Wiener increments.
This algorithm is suitable for Ito systems with an arbitrary noise
coefficient matrix G (i.e. the noise does not need to be scalar, diagonal,
or commutative). The algorithm has order 2.0 convergence for the
deterministic part alone and order 1.0 strong convergence for the complete
stochastic system.
Args:
f: A function f(y, t) returning an array of shape (d,)
Vector-valued function to define the deterministic part of the system
G: The d x m coefficient function G can be given in two different ways:
You can provide a single function G(y, t) that returns an array of
shape (d, m). In this case the entire matrix G() will be evaluated
2m+1 times at each time step so complexity grows quadratically with m.
Alternatively you can provide a list of m functions g(y, t) each
defining one column of G (each returning an array of shape (d,).
In this case each g will be evaluated 3 times at each time step so
complexity grows linearly with m. If your system has large m and
G involves complicated functions, consider using this way.
y0: array of shape (d,) giving the initial state vector y(t==0)
tspan (array): The sequence of time points for which to solve for y.
These must be equally spaced, e.g. np.arange(0,10,0.005)
tspan[0] is the intial time corresponding to the initial state y0.
Imethod (callable, optional): which function to use to simulate repeated
Ito integrals. Here you can choose either sdeint.Ikpw (the default) or
sdeint.Iwik (which is more accurate but uses a lot of memory in the
current implementation).
dW: optional array of shape (len(tspan)-1, d).
I: optional array of shape (len(tspan)-1, m, m).
These optional arguments dW and I are for advanced use, if you want to
use a specific realization of the d independent Wiener processes and
their multiple integrals at each time step. If not provided, suitable
values will be generated randomly.
Returns:
y: array, with shape (len(tspan), len(y0))
With the initial value y0 in the first row
Raises:
SDEValueError
See also:
A. Roessler (2010) Runge-Kutta Methods for the Strong Approximation of
Solutions of Stochastic Differential Equations
"""
return _Roessler2010_SRK2(f, G, y0, tspan, Imethod, dW, I)
def stratSRS2(f, G, y0, tspan, Jmethod=Jkpw, dW=None, J=None):
"""Use the Roessler2010 order 1.0 strong Stochastic Runge-Kutta algorithm
SRS2 to integrate a Stratonovich equation dy = f(y,t)dt + G(y,t)\circ dW(t)
where y is d-dimensional vector variable, f is a vector-valued function,
G is a d x m matrix-valued function giving the noise coefficients and
dW(t) is a vector of m independent Wiener increments.
This algorithm is suitable for Stratonovich systems with an arbitrary noise
coefficient matrix G (i.e. the noise does not need to be scalar, diagonal,
or commutative). The algorithm has order 2.0 convergence for the
deterministic part alone and order 1.0 strong convergence for the complete
stochastic system.
Args:
f: A function f(y, t) returning an array of shape (d,)
Vector-valued function to define the deterministic part of the system
G: The d x m coefficient function G can be given in two different ways:
You can provide a single function G(y, t) that returns an array of
shape (d, m). In this case the entire matrix G() will be evaluated
2m+1 times at each time step so complexity grows quadratically with m.
Alternatively you can provide a list of m functions g(y, t) each
defining one column of G (each returning an array of shape (d,).
In this case each g will be evaluated 3 times at each time step so
complexity grows linearly with m. If your system has large m and
G involves complicated functions, consider using this way.
y0: array of shape (d,) giving the initial state vector y(t==0)
tspan (array): The sequence of time points for which to solve for y.
These must be equally spaced, e.g. np.arange(0,10,0.005)
tspan[0] is the intial time corresponding to the initial state y0.
Jmethod (callable, optional): which function to use to simulate repeated
Stratonovich integrals. Here you can choose either sdeint.Jkpw (the
default) or sdeint.Jwik (which is more accurate but uses a lot of
memory in the current implementation).
dW: optional array of shape (len(tspan)-1, d).
J: optional array of shape (len(tspan)-1, m, m).
These optional arguments dW and J are for advanced use, if you want to
use a specific realization of the d independent Wiener processes and
their multiple integrals at each time step. If not provided, suitable
values will be generated randomly.
Returns:
y: array, with shape (len(tspan), len(y0))
With the initial value y0 in the first row
Raises:
SDEValueError
See also:
A. Roessler (2010) Runge-Kutta Methods for the Strong Approximation of
Solutions of Stochastic Differential Equations
"""
return _Roessler2010_SRK2(f, G, y0, tspan, Jmethod, dW, J)
def _Roessler2010_SRK2(f, G, y0, tspan, IJmethod, dW=None, IJ=None):
"""Implements the Roessler2010 order 1.0 strong Stochastic Runge-Kutta
algorithms SRI2 (for Ito equations) and SRS2 (for Stratonovich equations).
Algorithms SRI2 and SRS2 are almost identical and have the same extended
Butcher tableaus. The difference is that Ito repeateded integrals I_ij are
replaced by Stratonovich repeated integrals J_ij when integrating a
Stratonovich equation (Theorem 6.2 in Roessler2010).
Args:
f: A function f(y, t) returning an array of shape (d,)
G: Either a function G(y, t) that returns an array of shape (d, m),
or a list of m functions g(y, t) each returning an array shape (d,).
y0: array of shape (d,) giving the initial state
tspan (array): Sequence of equally spaced time points
IJmethod (callable): which function to use to generate repeated
integrals. N.B. for an Ito equation, must use an Ito version here
(either Ikpw or Iwik). For a Stratonovich equation, must use a
Stratonovich version here (Jkpw or Jwik).
dW: optional array of shape (len(tspan)-1, d).
IJ: optional array of shape (len(tspan)-1, m, m).
Optional arguments dW and IJ are for advanced use, if you want to
use a specific realization of the d independent Wiener processes and
their multiple integrals at each time step. If not provided, suitable
values will be generated randomly.
Returns:
y: array, with shape (len(tspan), len(y0))
Raises:
SDEValueError
See also:
A. Roessler (2010) Runge-Kutta Methods for the Strong Approximation of
Solutions of Stochastic Differential Equations
"""
(d, m, f, G, y0, tspan, dW, IJ) = _check_args(f, G, y0, tspan, dW, IJ)
have_separate_g = (not callable(G)) # if G is given as m separate functions
N = len(tspan)
h = (tspan[N-1] - tspan[0])/(N - 1) # assuming equal time steps
if dW is None:
# pre-generate Wiener increments (for m independent Wiener processes):
dW = deltaW(N - 1, m, h) # shape (N, m)
if IJ is None:
# pre-generate repeated stochastic integrals for each time step.
# Must give I_ij for the Ito case or J_ij for the Stratonovich case:
__, I = IJmethod(dW, h) # shape (N, m, m)
else:
I = IJ
# allocate space for result
y = np.zeros((N, d), dtype=type(y0[0]))
y[0] = y0;
Gn = np.zeros((d, m), dtype=y.dtype)
for n in range(0, N-1):
tn = tspan[n]
tn1 = tspan[n+1]
h = tn1 - tn
sqrth = np.sqrt(h)
Yn = y[n] # shape (d,)
Ik = dW[n,:] # shape (m,)
Iij = I[n,:,:] # shape (m, m)
fnh = f(Yn, tn)*h # shape (d,)
if have_separate_g:
for k in range(0, m):
Gn[:,k] = G[k](Yn, tn)
else:
Gn = G(Yn, tn)
sum1 = np.dot(Gn, Iij)/sqrth # shape (d, m)
H20 = Yn + fnh # shape (d,)
H20b = np.reshape(H20, (d, 1))
H2 = H20b + sum1 # shape (d, m)
H30 = Yn
H3 = H20b - sum1
fn1h = f(H20, tn1)*h
Yn1 = Yn + 0.5*(fnh + fn1h) + np.dot(Gn, Ik)
if have_separate_g:
for k in range(0, m):
Yn1 += 0.5*sqrth*(G[k](H2[:,k], tn1) - G[k](H3[:,k], tn1))
else:
for k in range(0, m):
Yn1 += 0.5*sqrth*(G(H2[:,k], tn1)[:,k] - G(H3[:,k], tn1)[:,k])
y[n+1] = Yn1
return y
def stratKP2iS(f, G, y0, tspan, Jmethod=Jkpw, gam=None, al1=None, al2=None,
rtol=1e-4, dW=None, J=None):
"""Use the Kloeden and Platen two-step implicit order 1.0 strong algorithm
to integrate a Stratonovich equation dy = f(y,t)dt + G(y,t)\circ dW(t)
This semi-implicit algorithm may be useful for stiff systems. The noise
does not need to be scalar, diagonal, or commutative.
This algorithm is defined in Kloeden and Platen (1999) section 12.4,
equations (4.5) and (4.7). Here implementing that scheme with default
parameters \gamma_k = \alpha_{1,k} = \alpha_{2,k} = 0.5 for k=1..d using
MINPACK HYBRD algorithm to solve the implicit vector equation at each step.
Args:
f: A function f(y, t) returning an array of shape (d,) to define the
deterministic part of the system
G: A function G(y, t) returning an array of shape (d, m) to define the
noise coefficients of the system
y0: array of shape (d,) giving the initial state
tspan (array): Sequence of equally spaced time points
Jmethod (callable, optional): which function to use to simulate repeated
Stratonovich integrals. Here you can choose either sdeint.Jkpw (the
default) or sdeint.Jwik (which is more accurate but uses a lot of
memory in the current implementation).
gam, al1, al2 (optional arrays of shape (d,)): These can configure free
parameters \gamma_k, \alpha_{1,k}, \alpha_{2,k} of the algorithm.
You can omit these, then the default values 0.5 will be used.
rtol (float, optional): Relative error tolerance. The default is 1e-4.
This is the relative tolerance used when solving the implicit equation
for Y_{n+1} at each step. It does not mean that the overall sample path
approximation has this relative precision.
dW: optional array of shape (len(tspan)-1, d).
J: optional array of shape (len(tspan)-1, m, m).
These optional arguments dW and J are for advanced use, if you want to
use a specific realization of the d independent Wiener processes and
their multiple integrals at each time step. If not provided, suitable
values will be generated randomly.
Returns:
y: array, with shape (len(tspan), len(y0))
Raises:
SDEValueError, RuntimeError
See also:
P. Kloeden and E. Platen (1999) Numerical Solution of Stochastic
Differential Equations, revised and updated 3rd printing.
"""
try:
from scipy.optimize import fsolve
except ImportError:
raise Error('stratKP2iS() requires package ``scipy`` to be installed.')
(d, m, f, G, y0, tspan, dW, J) = _check_args(f, G, y0, tspan, dW, J)
if not callable(G):
raise SDEValueError('G should be a function returning a d x m matrix.')
if np.iscomplexobj(y0):
raise SDEValueError("stratKP2iS() can't yet handle complex variables.")
if gam is None:
gam = np.ones((d,))*0.5 # Default level of implicitness \gamma_k = 0.5
if al1 is None:
al1 = np.ones((d,))*0.5 # Default \alpha_{1,k} = 0.5
if al2 is None:
al2 = np.ones((d,))*0.5 # Default \alpha_{2,k} = 0.5
N = len(tspan)
h = (tspan[N-1] - tspan[0])/(N - 1) # assuming equal time steps
if dW is None:
# pre-generate Wiener increments (for m independent Wiener processes):
dW = deltaW(N - 1, m, h) # shape (N, m)
if J is None:
# pre-generate repeated Stratonovich integrals for each time step
__, J = Jmethod(dW, h) # shape (N, m, m)
# allocate space for result
y = np.zeros((N, d), dtype=type(y0[0]))
def _imp(Ynp1, Yn, Ynm1, Vn, Vnm1, tnp1, tn, tnm1, fn, fnm1):
"""At each step we will solve _imp(Ynp1, ...) == 0 for Ynp1.
The meaning of these arguments is: Y_{n+1}, Y_n, Y_{n-1}, V_n, V_{n-1},
t_{n+1}, t_n, t_{n-1}, f(Y_n, t_n), f(Y_{n-1}, t_{n-1})."""
return ((1 - gam)*Yn + gam*Ynm1 + (al2*f(Ynp1, tnp1) +
(gam*al1 + (1 - al2))*fn + gam*(1 - al1)*fnm1)*h + Vn +
gam*Vnm1 - Ynp1)
fn = None
Vn = None
y[0] = y0;
for n in range(0, N-1):
tn = tspan[n]
tnp1 = tspan[n+1]
h = tnp1 - tn
sqrth = np.sqrt(h)
Yn = y[n] # shape (d,)
Jk = dW[n,:] # shape (m,)
Jij = J[n,:,:] # shape (m, m)
fnm1 = fn
fn = f(Yn, tn)
Gn = G(Yn, tn)
Ybar = (Yn + fn*h).reshape((d, 1)) + Gn*sqrth # shape (d, m)
sum1 = np.zeros((d,))
for j1 in range(0, m):
sum1 += np.dot(G(Ybar[:,j1], tn) - Gn, Jij[j1,:])
Vnm1 = Vn
Vn = np.dot(Gn, Jk) + sum1/sqrth
if n == 0:
# First step uses Kloeden&Platen explicit order 1.0 strong scheme:
y[n+1] = Yn + fn*h + Vn
continue
tnm1 = tspan[n-1]
Ynm1 = y[n-1] # shape (d,)
# now solve _imp(Ynp1, ...) == 0 for Ynp1, near to Yn
args = (Yn, Ynm1, Vn, Vnm1, tnp1, tn, tnm1, fn, fnm1)
(Ynp1, __, status, msg) = fsolve(_imp, Yn, args=args, xtol=rtol,
full_output=True)
if status == 1:
y[n+1] = Ynp1
else:
m = """At time t_n = %g Failed to solve for Y_{n+1} with args %s.
Reason: %s""" % (tn, args, msg)
raise RuntimeError(m)
return y
|
mattja/sdeint | sdeint/integrate.py | itoSRI2 | python | def itoSRI2(f, G, y0, tspan, Imethod=Ikpw, dW=None, I=None):
return _Roessler2010_SRK2(f, G, y0, tspan, Imethod, dW, I) | Use the Roessler2010 order 1.0 strong Stochastic Runge-Kutta algorithm
SRI2 to integrate an Ito equation dy = f(y,t)dt + G(y,t)dW(t)
where y is d-dimensional vector variable, f is a vector-valued function,
G is a d x m matrix-valued function giving the noise coefficients and
dW(t) is a vector of m independent Wiener increments.
This algorithm is suitable for Ito systems with an arbitrary noise
coefficient matrix G (i.e. the noise does not need to be scalar, diagonal,
or commutative). The algorithm has order 2.0 convergence for the
deterministic part alone and order 1.0 strong convergence for the complete
stochastic system.
Args:
f: A function f(y, t) returning an array of shape (d,)
Vector-valued function to define the deterministic part of the system
G: The d x m coefficient function G can be given in two different ways:
You can provide a single function G(y, t) that returns an array of
shape (d, m). In this case the entire matrix G() will be evaluated
2m+1 times at each time step so complexity grows quadratically with m.
Alternatively you can provide a list of m functions g(y, t) each
defining one column of G (each returning an array of shape (d,).
In this case each g will be evaluated 3 times at each time step so
complexity grows linearly with m. If your system has large m and
G involves complicated functions, consider using this way.
y0: array of shape (d,) giving the initial state vector y(t==0)
tspan (array): The sequence of time points for which to solve for y.
These must be equally spaced, e.g. np.arange(0,10,0.005)
tspan[0] is the intial time corresponding to the initial state y0.
Imethod (callable, optional): which function to use to simulate repeated
Ito integrals. Here you can choose either sdeint.Ikpw (the default) or
sdeint.Iwik (which is more accurate but uses a lot of memory in the
current implementation).
dW: optional array of shape (len(tspan)-1, d).
I: optional array of shape (len(tspan)-1, m, m).
These optional arguments dW and I are for advanced use, if you want to
use a specific realization of the d independent Wiener processes and
their multiple integrals at each time step. If not provided, suitable
values will be generated randomly.
Returns:
y: array, with shape (len(tspan), len(y0))
With the initial value y0 in the first row
Raises:
SDEValueError
See also:
A. Roessler (2010) Runge-Kutta Methods for the Strong Approximation of
Solutions of Stochastic Differential Equations | train | https://github.com/mattja/sdeint/blob/7cf807cdf97b3bb39d29e1c2dc834b519499b601/sdeint/integrate.py#L288-L347 | [
"def _Roessler2010_SRK2(f, G, y0, tspan, IJmethod, dW=None, IJ=None):\n \"\"\"Implements the Roessler2010 order 1.0 strong Stochastic Runge-Kutta\n algorithms SRI2 (for Ito equations) and SRS2 (for Stratonovich equations). \n\n Algorithms SRI2 and SRS2 are almost identical and have the same extended\n B... | # Copyright 2015 Matthew J. Aburn
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version. See <http://www.gnu.org/licenses/>.
"""Numerical integration algorithms for Ito and Stratonovich stochastic
ordinary differential equations.
Usage:
itoint(f, G, y0, tspan) for Ito equation dy = f dt + G dW
stratint(f, G, y0, tspan) for Stratonovich equation dy = f dt + G \circ dW
y0 is the initial value
tspan is an array of time values (currently these must be equally spaced)
function f is the deterministic part of the system (scalar or dx1 vector)
function G is the stochastic part of the system (scalar or d x m matrix)
sdeint will choose an algorithm for you. Or you can choose one explicitly:
itoEuler: the Euler-Maruyama algorithm for Ito equations.
stratHeun: the Stratonovich Heun algorithm for Stratonovich equations.
itoSRI2: the Roessler2010 order 1.0 strong Stochastic Runge-Kutta
algorithm SRI2 for Ito equations.
stratSRS2: the Roessler2010 order 1.0 strong Stochastic Runge-Kutta
algorithm SRS2 for Stratonovich equations.
stratKP2iS: the Kloeden and Platen two-step implicit order 1.0 strong algorithm
for Stratonovich equations.
"""
from __future__ import absolute_import
from .wiener import deltaW, Ikpw, Iwik, Jkpw, Jwik
import numpy as np
import numbers
class Error(Exception):
pass
class SDEValueError(Error):
"""Thrown if integration arguments fail some basic sanity checks"""
pass
def _check_args(f, G, y0, tspan, dW=None, IJ=None):
"""Do some validation common to all algorithms. Find dimension d and number
of Wiener processes m.
"""
if not np.isclose(min(np.diff(tspan)), max(np.diff(tspan))):
raise SDEValueError('Currently time steps must be equally spaced.')
# Be flexible to allow scalar equations. convert them to a 1D vector system
if isinstance(y0, numbers.Number):
if isinstance(y0, numbers.Integral):
numtype = np.float64
else:
numtype = type(y0)
y0_orig = y0
y0 = np.array([y0], dtype=numtype)
def make_vector_fn(fn):
def newfn(y, t):
return np.array([fn(y[0], t)], dtype=numtype)
newfn.__name__ = fn.__name__
return newfn
def make_matrix_fn(fn):
def newfn(y, t):
return np.array([[fn(y[0], t)]], dtype=numtype)
newfn.__name__ = fn.__name__
return newfn
if isinstance(f(y0_orig, tspan[0]), numbers.Number):
f = make_vector_fn(f)
if isinstance(G(y0_orig, tspan[0]), numbers.Number):
G = make_matrix_fn(G)
# determine dimension d of the system
d = len(y0)
if len(f(y0, tspan[0])) != d:
raise SDEValueError('y0 and f have incompatible shapes.')
message = """y0 has length %d. So G must either be a single function
returning a matrix of shape (%d, m), or else a list of m separate
functions each returning a column of G, with shape (%d,)""" % (
d, d, d)
if callable(G):
# then G must be a function returning a d x m matrix
Gtest = G(y0, tspan[0])
if Gtest.ndim != 2 or Gtest.shape[0] != d:
raise SDEValueError(message)
# determine number of independent Wiener processes m
m = Gtest.shape[1]
else:
# G should be a list of m functions g_i giving columns of G
G = tuple(G)
m = len(G)
Gtest = np.zeros((d, m))
for k in range(0, m):
if not callable(G[k]):
raise SDEValueError(message)
Gtestk = G[k](y0, tspan[0])
if np.shape(Gtestk) != (d,):
raise SDEValueError(message)
Gtest[:,k] = Gtestk
message = """From function G, it seems m==%d. If present, the optional
parameter dW must be an array of shape (len(tspan)-1, m) giving
m independent Wiener increments for each time interval.""" % m
if dW is not None:
if not hasattr(dW, 'shape') or dW.shape != (len(tspan) - 1, m):
raise SDEValueError(message)
message = """From function G, it seems m==%d. If present, the optional
parameter I or J must be an array of shape (len(tspan)-1, m, m)
giving an m x m matrix of repeated integral values for each
time interval.""" % m
if IJ is not None:
if not hasattr(IJ, 'shape') or IJ.shape != (len(tspan) - 1, m, m):
raise SDEValueError(message)
return (d, m, f, G, y0, tspan, dW, IJ)
def itoint(f, G, y0, tspan):
""" Numerically integrate the Ito equation dy = f(y,t)dt + G(y,t)dW
where y is the d-dimensional state vector, f is a vector-valued function,
G is an d x m matrix-valued function giving the noise coefficients and
dW(t) = (dW_1, dW_2, ... dW_m) is a vector of independent Wiener increments
Args:
f: callable(y,t) returning a numpy array of shape (d,)
Vector-valued function to define the deterministic part of the system
G: callable(y,t) returning a numpy array of shape (d,m)
Matrix-valued function to define the noise coefficients of the system
y0: array of shape (d,) giving the initial state vector y(t==0)
tspan (array): The sequence of time points for which to solve for y.
These must be equally spaced, e.g. np.arange(0,10,0.005)
tspan[0] is the intial time corresponding to the initial state y0.
Returns:
y: array, with shape (len(tspan), len(y0))
With the initial value y0 in the first row
Raises:
SDEValueError
"""
# In future versions we can automatically choose here the most suitable
# Ito algorithm based on properties of the system and noise.
(d, m, f, G, y0, tspan, __, __) = _check_args(f, G, y0, tspan, None, None)
chosenAlgorithm = itoSRI2
return chosenAlgorithm(f, G, y0, tspan)
def stratint(f, G, y0, tspan):
""" Numerically integrate Stratonovich equation dy = f(y,t)dt + G(y,t).dW
where y is the d-dimensional state vector, f is a vector-valued function,
G is an d x m matrix-valued function giving the noise coefficients and
dW(t) = (dW_1, dW_2, ... dW_m) is a vector of independent Wiener increments
Args:
f: callable(y,t) returning a numpy array of shape (d,)
Vector-valued function to define the deterministic part of the system
G: callable(y,t) returning a numpy array of shape (d,m)
Matrix-valued function to define the noise coefficients of the system
y0: array of shape (d,) giving the initial state vector y(t==0)
tspan (array): The sequence of time points for which to solve for y.
These must be equally spaced, e.g. np.arange(0,10,0.005)
tspan[0] is the intial time corresponding to the initial state y0.
Returns:
y: array, with shape (len(tspan), len(y0))
With the initial value y0 in the first row
Raises:
SDEValueError
"""
# In future versions we can automatically choose here the most suitable
# Stratonovich algorithm based on properties of the system and noise.
(d, m, f, G, y0, tspan, __, __) = _check_args(f, G, y0, tspan, None, None)
chosenAlgorithm = stratSRS2
return chosenAlgorithm(f, G, y0, tspan)
def itoEuler(f, G, y0, tspan, dW=None):
"""Use the Euler-Maruyama algorithm to integrate the Ito equation
dy = f(y,t)dt + G(y,t) dW(t)
where y is the d-dimensional state vector, f is a vector-valued function,
G is an d x m matrix-valued function giving the noise coefficients and
dW(t) = (dW_1, dW_2, ... dW_m) is a vector of independent Wiener increments
Args:
f: callable(y, t) returning (d,) array
Vector-valued function to define the deterministic part of the system
G: callable(y, t) returning (d,m) array
Matrix-valued function to define the noise coefficients of the system
y0: array of shape (d,) giving the initial state vector y(t==0)
tspan (array): The sequence of time points for which to solve for y.
These must be equally spaced, e.g. np.arange(0,10,0.005)
tspan[0] is the intial time corresponding to the initial state y0.
dW: optional array of shape (len(tspan)-1, d). This is for advanced use,
if you want to use a specific realization of the d independent Wiener
processes. If not provided Wiener increments will be generated randomly
Returns:
y: array, with shape (len(tspan), len(y0))
With the initial value y0 in the first row
Raises:
SDEValueError
See also:
G. Maruyama (1955) Continuous Markov processes and stochastic equations
Kloeden and Platen (1999) Numerical Solution of Differential Equations
"""
(d, m, f, G, y0, tspan, dW, __) = _check_args(f, G, y0, tspan, dW, None)
N = len(tspan)
h = (tspan[N-1] - tspan[0])/(N - 1)
# allocate space for result
y = np.zeros((N, d), dtype=type(y0[0]))
if dW is None:
# pre-generate Wiener increments (for m independent Wiener processes):
dW = deltaW(N - 1, m, h)
y[0] = y0;
for n in range(0, N-1):
tn = tspan[n]
yn = y[n]
dWn = dW[n,:]
y[n+1] = yn + f(yn, tn)*h + G(yn, tn).dot(dWn)
return y
def stratHeun(f, G, y0, tspan, dW=None):
"""Use the Stratonovich Heun algorithm to integrate Stratonovich equation
dy = f(y,t)dt + G(y,t) \circ dW(t)
where y is the d-dimensional state vector, f is a vector-valued function,
G is an d x m matrix-valued function giving the noise coefficients and
dW(t) = (dW_1, dW_2, ... dW_m) is a vector of independent Wiener increments
Args:
f: callable(y, t) returning (d,) array
Vector-valued function to define the deterministic part of the system
G: callable(y, t) returning (d,m) array
Matrix-valued function to define the noise coefficients of the system
y0: array of shape (d,) giving the initial state vector y(t==0)
tspan (array): The sequence of time points for which to solve for y.
These must be equally spaced, e.g. np.arange(0,10,0.005)
tspan[0] is the intial time corresponding to the initial state y0.
dW: optional array of shape (len(tspan)-1, d). This is for advanced use,
if you want to use a specific realization of the d independent Wiener
processes. If not provided Wiener increments will be generated randomly
Returns:
y: array, with shape (len(tspan), len(y0))
With the initial value y0 in the first row
Raises:
SDEValueError
See also:
W. Rumelin (1982) Numerical Treatment of Stochastic Differential
Equations
R. Mannella (2002) Integration of Stochastic Differential Equations
on a Computer
K. Burrage, P. M. Burrage and T. Tian (2004) Numerical methods for strong
solutions of stochastic differential equations: an overview
"""
(d, m, f, G, y0, tspan, dW, __) = _check_args(f, G, y0, tspan, dW, None)
N = len(tspan)
h = (tspan[N-1] - tspan[0])/(N - 1)
# allocate space for result
y = np.zeros((N, d), dtype=type(y0[0]))
if dW is None:
# pre-generate Wiener increments (for m independent Wiener processes):
dW = deltaW(N - 1, m, h)
y[0] = y0;
for n in range(0, N-1):
tn = tspan[n]
tnp1 = tspan[n+1]
yn = y[n]
dWn = dW[n,:]
fn = f(yn, tn)
Gn = G(yn, tn)
ybar = yn + fn*h + Gn.dot(dWn)
fnbar = f(ybar, tnp1)
Gnbar = G(ybar, tnp1)
y[n+1] = yn + 0.5*(fn + fnbar)*h + 0.5*(Gn + Gnbar).dot(dWn)
return y
def itoSRI2(f, G, y0, tspan, Imethod=Ikpw, dW=None, I=None):
"""Use the Roessler2010 order 1.0 strong Stochastic Runge-Kutta algorithm
SRI2 to integrate an Ito equation dy = f(y,t)dt + G(y,t)dW(t)
where y is d-dimensional vector variable, f is a vector-valued function,
G is a d x m matrix-valued function giving the noise coefficients and
dW(t) is a vector of m independent Wiener increments.
This algorithm is suitable for Ito systems with an arbitrary noise
coefficient matrix G (i.e. the noise does not need to be scalar, diagonal,
or commutative). The algorithm has order 2.0 convergence for the
deterministic part alone and order 1.0 strong convergence for the complete
stochastic system.
Args:
f: A function f(y, t) returning an array of shape (d,)
Vector-valued function to define the deterministic part of the system
G: The d x m coefficient function G can be given in two different ways:
You can provide a single function G(y, t) that returns an array of
shape (d, m). In this case the entire matrix G() will be evaluated
2m+1 times at each time step so complexity grows quadratically with m.
Alternatively you can provide a list of m functions g(y, t) each
defining one column of G (each returning an array of shape (d,).
In this case each g will be evaluated 3 times at each time step so
complexity grows linearly with m. If your system has large m and
G involves complicated functions, consider using this way.
y0: array of shape (d,) giving the initial state vector y(t==0)
tspan (array): The sequence of time points for which to solve for y.
These must be equally spaced, e.g. np.arange(0,10,0.005)
tspan[0] is the intial time corresponding to the initial state y0.
Imethod (callable, optional): which function to use to simulate repeated
Ito integrals. Here you can choose either sdeint.Ikpw (the default) or
sdeint.Iwik (which is more accurate but uses a lot of memory in the
current implementation).
dW: optional array of shape (len(tspan)-1, d).
I: optional array of shape (len(tspan)-1, m, m).
These optional arguments dW and I are for advanced use, if you want to
use a specific realization of the d independent Wiener processes and
their multiple integrals at each time step. If not provided, suitable
values will be generated randomly.
Returns:
y: array, with shape (len(tspan), len(y0))
With the initial value y0 in the first row
Raises:
SDEValueError
See also:
A. Roessler (2010) Runge-Kutta Methods for the Strong Approximation of
Solutions of Stochastic Differential Equations
"""
return _Roessler2010_SRK2(f, G, y0, tspan, Imethod, dW, I)
def stratSRS2(f, G, y0, tspan, Jmethod=Jkpw, dW=None, J=None):
"""Use the Roessler2010 order 1.0 strong Stochastic Runge-Kutta algorithm
SRS2 to integrate a Stratonovich equation dy = f(y,t)dt + G(y,t)\circ dW(t)
where y is d-dimensional vector variable, f is a vector-valued function,
G is a d x m matrix-valued function giving the noise coefficients and
dW(t) is a vector of m independent Wiener increments.
This algorithm is suitable for Stratonovich systems with an arbitrary noise
coefficient matrix G (i.e. the noise does not need to be scalar, diagonal,
or commutative). The algorithm has order 2.0 convergence for the
deterministic part alone and order 1.0 strong convergence for the complete
stochastic system.
Args:
f: A function f(y, t) returning an array of shape (d,)
Vector-valued function to define the deterministic part of the system
G: The d x m coefficient function G can be given in two different ways:
You can provide a single function G(y, t) that returns an array of
shape (d, m). In this case the entire matrix G() will be evaluated
2m+1 times at each time step so complexity grows quadratically with m.
Alternatively you can provide a list of m functions g(y, t) each
defining one column of G (each returning an array of shape (d,).
In this case each g will be evaluated 3 times at each time step so
complexity grows linearly with m. If your system has large m and
G involves complicated functions, consider using this way.
y0: array of shape (d,) giving the initial state vector y(t==0)
tspan (array): The sequence of time points for which to solve for y.
These must be equally spaced, e.g. np.arange(0,10,0.005)
tspan[0] is the intial time corresponding to the initial state y0.
Jmethod (callable, optional): which function to use to simulate repeated
Stratonovich integrals. Here you can choose either sdeint.Jkpw (the
default) or sdeint.Jwik (which is more accurate but uses a lot of
memory in the current implementation).
dW: optional array of shape (len(tspan)-1, d).
J: optional array of shape (len(tspan)-1, m, m).
These optional arguments dW and J are for advanced use, if you want to
use a specific realization of the d independent Wiener processes and
their multiple integrals at each time step. If not provided, suitable
values will be generated randomly.
Returns:
y: array, with shape (len(tspan), len(y0))
With the initial value y0 in the first row
Raises:
SDEValueError
See also:
A. Roessler (2010) Runge-Kutta Methods for the Strong Approximation of
Solutions of Stochastic Differential Equations
"""
return _Roessler2010_SRK2(f, G, y0, tspan, Jmethod, dW, J)
def _Roessler2010_SRK2(f, G, y0, tspan, IJmethod, dW=None, IJ=None):
"""Implements the Roessler2010 order 1.0 strong Stochastic Runge-Kutta
algorithms SRI2 (for Ito equations) and SRS2 (for Stratonovich equations).
Algorithms SRI2 and SRS2 are almost identical and have the same extended
Butcher tableaus. The difference is that Ito repeateded integrals I_ij are
replaced by Stratonovich repeated integrals J_ij when integrating a
Stratonovich equation (Theorem 6.2 in Roessler2010).
Args:
f: A function f(y, t) returning an array of shape (d,)
G: Either a function G(y, t) that returns an array of shape (d, m),
or a list of m functions g(y, t) each returning an array shape (d,).
y0: array of shape (d,) giving the initial state
tspan (array): Sequence of equally spaced time points
IJmethod (callable): which function to use to generate repeated
integrals. N.B. for an Ito equation, must use an Ito version here
(either Ikpw or Iwik). For a Stratonovich equation, must use a
Stratonovich version here (Jkpw or Jwik).
dW: optional array of shape (len(tspan)-1, d).
IJ: optional array of shape (len(tspan)-1, m, m).
Optional arguments dW and IJ are for advanced use, if you want to
use a specific realization of the d independent Wiener processes and
their multiple integrals at each time step. If not provided, suitable
values will be generated randomly.
Returns:
y: array, with shape (len(tspan), len(y0))
Raises:
SDEValueError
See also:
A. Roessler (2010) Runge-Kutta Methods for the Strong Approximation of
Solutions of Stochastic Differential Equations
"""
(d, m, f, G, y0, tspan, dW, IJ) = _check_args(f, G, y0, tspan, dW, IJ)
have_separate_g = (not callable(G)) # if G is given as m separate functions
N = len(tspan)
h = (tspan[N-1] - tspan[0])/(N - 1) # assuming equal time steps
if dW is None:
# pre-generate Wiener increments (for m independent Wiener processes):
dW = deltaW(N - 1, m, h) # shape (N, m)
if IJ is None:
# pre-generate repeated stochastic integrals for each time step.
# Must give I_ij for the Ito case or J_ij for the Stratonovich case:
__, I = IJmethod(dW, h) # shape (N, m, m)
else:
I = IJ
# allocate space for result
y = np.zeros((N, d), dtype=type(y0[0]))
y[0] = y0;
Gn = np.zeros((d, m), dtype=y.dtype)
for n in range(0, N-1):
tn = tspan[n]
tn1 = tspan[n+1]
h = tn1 - tn
sqrth = np.sqrt(h)
Yn = y[n] # shape (d,)
Ik = dW[n,:] # shape (m,)
Iij = I[n,:,:] # shape (m, m)
fnh = f(Yn, tn)*h # shape (d,)
if have_separate_g:
for k in range(0, m):
Gn[:,k] = G[k](Yn, tn)
else:
Gn = G(Yn, tn)
sum1 = np.dot(Gn, Iij)/sqrth # shape (d, m)
H20 = Yn + fnh # shape (d,)
H20b = np.reshape(H20, (d, 1))
H2 = H20b + sum1 # shape (d, m)
H30 = Yn
H3 = H20b - sum1
fn1h = f(H20, tn1)*h
Yn1 = Yn + 0.5*(fnh + fn1h) + np.dot(Gn, Ik)
if have_separate_g:
for k in range(0, m):
Yn1 += 0.5*sqrth*(G[k](H2[:,k], tn1) - G[k](H3[:,k], tn1))
else:
for k in range(0, m):
Yn1 += 0.5*sqrth*(G(H2[:,k], tn1)[:,k] - G(H3[:,k], tn1)[:,k])
y[n+1] = Yn1
return y
def stratKP2iS(f, G, y0, tspan, Jmethod=Jkpw, gam=None, al1=None, al2=None,
rtol=1e-4, dW=None, J=None):
"""Use the Kloeden and Platen two-step implicit order 1.0 strong algorithm
to integrate a Stratonovich equation dy = f(y,t)dt + G(y,t)\circ dW(t)
This semi-implicit algorithm may be useful for stiff systems. The noise
does not need to be scalar, diagonal, or commutative.
This algorithm is defined in Kloeden and Platen (1999) section 12.4,
equations (4.5) and (4.7). Here implementing that scheme with default
parameters \gamma_k = \alpha_{1,k} = \alpha_{2,k} = 0.5 for k=1..d using
MINPACK HYBRD algorithm to solve the implicit vector equation at each step.
Args:
f: A function f(y, t) returning an array of shape (d,) to define the
deterministic part of the system
G: A function G(y, t) returning an array of shape (d, m) to define the
noise coefficients of the system
y0: array of shape (d,) giving the initial state
tspan (array): Sequence of equally spaced time points
Jmethod (callable, optional): which function to use to simulate repeated
Stratonovich integrals. Here you can choose either sdeint.Jkpw (the
default) or sdeint.Jwik (which is more accurate but uses a lot of
memory in the current implementation).
gam, al1, al2 (optional arrays of shape (d,)): These can configure free
parameters \gamma_k, \alpha_{1,k}, \alpha_{2,k} of the algorithm.
You can omit these, then the default values 0.5 will be used.
rtol (float, optional): Relative error tolerance. The default is 1e-4.
This is the relative tolerance used when solving the implicit equation
for Y_{n+1} at each step. It does not mean that the overall sample path
approximation has this relative precision.
dW: optional array of shape (len(tspan)-1, d).
J: optional array of shape (len(tspan)-1, m, m).
These optional arguments dW and J are for advanced use, if you want to
use a specific realization of the d independent Wiener processes and
their multiple integrals at each time step. If not provided, suitable
values will be generated randomly.
Returns:
y: array, with shape (len(tspan), len(y0))
Raises:
SDEValueError, RuntimeError
See also:
P. Kloeden and E. Platen (1999) Numerical Solution of Stochastic
Differential Equations, revised and updated 3rd printing.
"""
try:
from scipy.optimize import fsolve
except ImportError:
raise Error('stratKP2iS() requires package ``scipy`` to be installed.')
(d, m, f, G, y0, tspan, dW, J) = _check_args(f, G, y0, tspan, dW, J)
if not callable(G):
raise SDEValueError('G should be a function returning a d x m matrix.')
if np.iscomplexobj(y0):
raise SDEValueError("stratKP2iS() can't yet handle complex variables.")
if gam is None:
gam = np.ones((d,))*0.5 # Default level of implicitness \gamma_k = 0.5
if al1 is None:
al1 = np.ones((d,))*0.5 # Default \alpha_{1,k} = 0.5
if al2 is None:
al2 = np.ones((d,))*0.5 # Default \alpha_{2,k} = 0.5
N = len(tspan)
h = (tspan[N-1] - tspan[0])/(N - 1) # assuming equal time steps
if dW is None:
# pre-generate Wiener increments (for m independent Wiener processes):
dW = deltaW(N - 1, m, h) # shape (N, m)
if J is None:
# pre-generate repeated Stratonovich integrals for each time step
__, J = Jmethod(dW, h) # shape (N, m, m)
# allocate space for result
y = np.zeros((N, d), dtype=type(y0[0]))
def _imp(Ynp1, Yn, Ynm1, Vn, Vnm1, tnp1, tn, tnm1, fn, fnm1):
"""At each step we will solve _imp(Ynp1, ...) == 0 for Ynp1.
The meaning of these arguments is: Y_{n+1}, Y_n, Y_{n-1}, V_n, V_{n-1},
t_{n+1}, t_n, t_{n-1}, f(Y_n, t_n), f(Y_{n-1}, t_{n-1})."""
return ((1 - gam)*Yn + gam*Ynm1 + (al2*f(Ynp1, tnp1) +
(gam*al1 + (1 - al2))*fn + gam*(1 - al1)*fnm1)*h + Vn +
gam*Vnm1 - Ynp1)
fn = None
Vn = None
y[0] = y0;
for n in range(0, N-1):
tn = tspan[n]
tnp1 = tspan[n+1]
h = tnp1 - tn
sqrth = np.sqrt(h)
Yn = y[n] # shape (d,)
Jk = dW[n,:] # shape (m,)
Jij = J[n,:,:] # shape (m, m)
fnm1 = fn
fn = f(Yn, tn)
Gn = G(Yn, tn)
Ybar = (Yn + fn*h).reshape((d, 1)) + Gn*sqrth # shape (d, m)
sum1 = np.zeros((d,))
for j1 in range(0, m):
sum1 += np.dot(G(Ybar[:,j1], tn) - Gn, Jij[j1,:])
Vnm1 = Vn
Vn = np.dot(Gn, Jk) + sum1/sqrth
if n == 0:
# First step uses Kloeden&Platen explicit order 1.0 strong scheme:
y[n+1] = Yn + fn*h + Vn
continue
tnm1 = tspan[n-1]
Ynm1 = y[n-1] # shape (d,)
# now solve _imp(Ynp1, ...) == 0 for Ynp1, near to Yn
args = (Yn, Ynm1, Vn, Vnm1, tnp1, tn, tnm1, fn, fnm1)
(Ynp1, __, status, msg) = fsolve(_imp, Yn, args=args, xtol=rtol,
full_output=True)
if status == 1:
y[n+1] = Ynp1
else:
m = """At time t_n = %g Failed to solve for Y_{n+1} with args %s.
Reason: %s""" % (tn, args, msg)
raise RuntimeError(m)
return y
|
mattja/sdeint | sdeint/integrate.py | stratSRS2 | python | def stratSRS2(f, G, y0, tspan, Jmethod=Jkpw, dW=None, J=None):
return _Roessler2010_SRK2(f, G, y0, tspan, Jmethod, dW, J) | Use the Roessler2010 order 1.0 strong Stochastic Runge-Kutta algorithm
SRS2 to integrate a Stratonovich equation dy = f(y,t)dt + G(y,t)\circ dW(t)
where y is d-dimensional vector variable, f is a vector-valued function,
G is a d x m matrix-valued function giving the noise coefficients and
dW(t) is a vector of m independent Wiener increments.
This algorithm is suitable for Stratonovich systems with an arbitrary noise
coefficient matrix G (i.e. the noise does not need to be scalar, diagonal,
or commutative). The algorithm has order 2.0 convergence for the
deterministic part alone and order 1.0 strong convergence for the complete
stochastic system.
Args:
f: A function f(y, t) returning an array of shape (d,)
Vector-valued function to define the deterministic part of the system
G: The d x m coefficient function G can be given in two different ways:
You can provide a single function G(y, t) that returns an array of
shape (d, m). In this case the entire matrix G() will be evaluated
2m+1 times at each time step so complexity grows quadratically with m.
Alternatively you can provide a list of m functions g(y, t) each
defining one column of G (each returning an array of shape (d,).
In this case each g will be evaluated 3 times at each time step so
complexity grows linearly with m. If your system has large m and
G involves complicated functions, consider using this way.
y0: array of shape (d,) giving the initial state vector y(t==0)
tspan (array): The sequence of time points for which to solve for y.
These must be equally spaced, e.g. np.arange(0,10,0.005)
tspan[0] is the intial time corresponding to the initial state y0.
Jmethod (callable, optional): which function to use to simulate repeated
Stratonovich integrals. Here you can choose either sdeint.Jkpw (the
default) or sdeint.Jwik (which is more accurate but uses a lot of
memory in the current implementation).
dW: optional array of shape (len(tspan)-1, d).
J: optional array of shape (len(tspan)-1, m, m).
These optional arguments dW and J are for advanced use, if you want to
use a specific realization of the d independent Wiener processes and
their multiple integrals at each time step. If not provided, suitable
values will be generated randomly.
Returns:
y: array, with shape (len(tspan), len(y0))
With the initial value y0 in the first row
Raises:
SDEValueError
See also:
A. Roessler (2010) Runge-Kutta Methods for the Strong Approximation of
Solutions of Stochastic Differential Equations | train | https://github.com/mattja/sdeint/blob/7cf807cdf97b3bb39d29e1c2dc834b519499b601/sdeint/integrate.py#L350-L409 | [
"def _Roessler2010_SRK2(f, G, y0, tspan, IJmethod, dW=None, IJ=None):\n \"\"\"Implements the Roessler2010 order 1.0 strong Stochastic Runge-Kutta\n algorithms SRI2 (for Ito equations) and SRS2 (for Stratonovich equations). \n\n Algorithms SRI2 and SRS2 are almost identical and have the same extended\n B... | # Copyright 2015 Matthew J. Aburn
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version. See <http://www.gnu.org/licenses/>.
"""Numerical integration algorithms for Ito and Stratonovich stochastic
ordinary differential equations.
Usage:
itoint(f, G, y0, tspan) for Ito equation dy = f dt + G dW
stratint(f, G, y0, tspan) for Stratonovich equation dy = f dt + G \circ dW
y0 is the initial value
tspan is an array of time values (currently these must be equally spaced)
function f is the deterministic part of the system (scalar or dx1 vector)
function G is the stochastic part of the system (scalar or d x m matrix)
sdeint will choose an algorithm for you. Or you can choose one explicitly:
itoEuler: the Euler-Maruyama algorithm for Ito equations.
stratHeun: the Stratonovich Heun algorithm for Stratonovich equations.
itoSRI2: the Roessler2010 order 1.0 strong Stochastic Runge-Kutta
algorithm SRI2 for Ito equations.
stratSRS2: the Roessler2010 order 1.0 strong Stochastic Runge-Kutta
algorithm SRS2 for Stratonovich equations.
stratKP2iS: the Kloeden and Platen two-step implicit order 1.0 strong algorithm
for Stratonovich equations.
"""
from __future__ import absolute_import
from .wiener import deltaW, Ikpw, Iwik, Jkpw, Jwik
import numpy as np
import numbers
class Error(Exception):
pass
class SDEValueError(Error):
"""Thrown if integration arguments fail some basic sanity checks"""
pass
def _check_args(f, G, y0, tspan, dW=None, IJ=None):
"""Do some validation common to all algorithms. Find dimension d and number
of Wiener processes m.
"""
if not np.isclose(min(np.diff(tspan)), max(np.diff(tspan))):
raise SDEValueError('Currently time steps must be equally spaced.')
# Be flexible to allow scalar equations. convert them to a 1D vector system
if isinstance(y0, numbers.Number):
if isinstance(y0, numbers.Integral):
numtype = np.float64
else:
numtype = type(y0)
y0_orig = y0
y0 = np.array([y0], dtype=numtype)
def make_vector_fn(fn):
def newfn(y, t):
return np.array([fn(y[0], t)], dtype=numtype)
newfn.__name__ = fn.__name__
return newfn
def make_matrix_fn(fn):
def newfn(y, t):
return np.array([[fn(y[0], t)]], dtype=numtype)
newfn.__name__ = fn.__name__
return newfn
if isinstance(f(y0_orig, tspan[0]), numbers.Number):
f = make_vector_fn(f)
if isinstance(G(y0_orig, tspan[0]), numbers.Number):
G = make_matrix_fn(G)
# determine dimension d of the system
d = len(y0)
if len(f(y0, tspan[0])) != d:
raise SDEValueError('y0 and f have incompatible shapes.')
message = """y0 has length %d. So G must either be a single function
returning a matrix of shape (%d, m), or else a list of m separate
functions each returning a column of G, with shape (%d,)""" % (
d, d, d)
if callable(G):
# then G must be a function returning a d x m matrix
Gtest = G(y0, tspan[0])
if Gtest.ndim != 2 or Gtest.shape[0] != d:
raise SDEValueError(message)
# determine number of independent Wiener processes m
m = Gtest.shape[1]
else:
# G should be a list of m functions g_i giving columns of G
G = tuple(G)
m = len(G)
Gtest = np.zeros((d, m))
for k in range(0, m):
if not callable(G[k]):
raise SDEValueError(message)
Gtestk = G[k](y0, tspan[0])
if np.shape(Gtestk) != (d,):
raise SDEValueError(message)
Gtest[:,k] = Gtestk
message = """From function G, it seems m==%d. If present, the optional
parameter dW must be an array of shape (len(tspan)-1, m) giving
m independent Wiener increments for each time interval.""" % m
if dW is not None:
if not hasattr(dW, 'shape') or dW.shape != (len(tspan) - 1, m):
raise SDEValueError(message)
message = """From function G, it seems m==%d. If present, the optional
parameter I or J must be an array of shape (len(tspan)-1, m, m)
giving an m x m matrix of repeated integral values for each
time interval.""" % m
if IJ is not None:
if not hasattr(IJ, 'shape') or IJ.shape != (len(tspan) - 1, m, m):
raise SDEValueError(message)
return (d, m, f, G, y0, tspan, dW, IJ)
def itoint(f, G, y0, tspan):
""" Numerically integrate the Ito equation dy = f(y,t)dt + G(y,t)dW
where y is the d-dimensional state vector, f is a vector-valued function,
G is an d x m matrix-valued function giving the noise coefficients and
dW(t) = (dW_1, dW_2, ... dW_m) is a vector of independent Wiener increments
Args:
f: callable(y,t) returning a numpy array of shape (d,)
Vector-valued function to define the deterministic part of the system
G: callable(y,t) returning a numpy array of shape (d,m)
Matrix-valued function to define the noise coefficients of the system
y0: array of shape (d,) giving the initial state vector y(t==0)
tspan (array): The sequence of time points for which to solve for y.
These must be equally spaced, e.g. np.arange(0,10,0.005)
tspan[0] is the intial time corresponding to the initial state y0.
Returns:
y: array, with shape (len(tspan), len(y0))
With the initial value y0 in the first row
Raises:
SDEValueError
"""
# In future versions we can automatically choose here the most suitable
# Ito algorithm based on properties of the system and noise.
(d, m, f, G, y0, tspan, __, __) = _check_args(f, G, y0, tspan, None, None)
chosenAlgorithm = itoSRI2
return chosenAlgorithm(f, G, y0, tspan)
def stratint(f, G, y0, tspan):
""" Numerically integrate Stratonovich equation dy = f(y,t)dt + G(y,t).dW
where y is the d-dimensional state vector, f is a vector-valued function,
G is an d x m matrix-valued function giving the noise coefficients and
dW(t) = (dW_1, dW_2, ... dW_m) is a vector of independent Wiener increments
Args:
f: callable(y,t) returning a numpy array of shape (d,)
Vector-valued function to define the deterministic part of the system
G: callable(y,t) returning a numpy array of shape (d,m)
Matrix-valued function to define the noise coefficients of the system
y0: array of shape (d,) giving the initial state vector y(t==0)
tspan (array): The sequence of time points for which to solve for y.
These must be equally spaced, e.g. np.arange(0,10,0.005)
tspan[0] is the intial time corresponding to the initial state y0.
Returns:
y: array, with shape (len(tspan), len(y0))
With the initial value y0 in the first row
Raises:
SDEValueError
"""
# In future versions we can automatically choose here the most suitable
# Stratonovich algorithm based on properties of the system and noise.
(d, m, f, G, y0, tspan, __, __) = _check_args(f, G, y0, tspan, None, None)
chosenAlgorithm = stratSRS2
return chosenAlgorithm(f, G, y0, tspan)
def itoEuler(f, G, y0, tspan, dW=None):
"""Use the Euler-Maruyama algorithm to integrate the Ito equation
dy = f(y,t)dt + G(y,t) dW(t)
where y is the d-dimensional state vector, f is a vector-valued function,
G is an d x m matrix-valued function giving the noise coefficients and
dW(t) = (dW_1, dW_2, ... dW_m) is a vector of independent Wiener increments
Args:
f: callable(y, t) returning (d,) array
Vector-valued function to define the deterministic part of the system
G: callable(y, t) returning (d,m) array
Matrix-valued function to define the noise coefficients of the system
y0: array of shape (d,) giving the initial state vector y(t==0)
tspan (array): The sequence of time points for which to solve for y.
These must be equally spaced, e.g. np.arange(0,10,0.005)
tspan[0] is the intial time corresponding to the initial state y0.
dW: optional array of shape (len(tspan)-1, d). This is for advanced use,
if you want to use a specific realization of the d independent Wiener
processes. If not provided Wiener increments will be generated randomly
Returns:
y: array, with shape (len(tspan), len(y0))
With the initial value y0 in the first row
Raises:
SDEValueError
See also:
G. Maruyama (1955) Continuous Markov processes and stochastic equations
Kloeden and Platen (1999) Numerical Solution of Differential Equations
"""
(d, m, f, G, y0, tspan, dW, __) = _check_args(f, G, y0, tspan, dW, None)
N = len(tspan)
h = (tspan[N-1] - tspan[0])/(N - 1)
# allocate space for result
y = np.zeros((N, d), dtype=type(y0[0]))
if dW is None:
# pre-generate Wiener increments (for m independent Wiener processes):
dW = deltaW(N - 1, m, h)
y[0] = y0;
for n in range(0, N-1):
tn = tspan[n]
yn = y[n]
dWn = dW[n,:]
y[n+1] = yn + f(yn, tn)*h + G(yn, tn).dot(dWn)
return y
def stratHeun(f, G, y0, tspan, dW=None):
"""Use the Stratonovich Heun algorithm to integrate Stratonovich equation
dy = f(y,t)dt + G(y,t) \circ dW(t)
where y is the d-dimensional state vector, f is a vector-valued function,
G is an d x m matrix-valued function giving the noise coefficients and
dW(t) = (dW_1, dW_2, ... dW_m) is a vector of independent Wiener increments
Args:
f: callable(y, t) returning (d,) array
Vector-valued function to define the deterministic part of the system
G: callable(y, t) returning (d,m) array
Matrix-valued function to define the noise coefficients of the system
y0: array of shape (d,) giving the initial state vector y(t==0)
tspan (array): The sequence of time points for which to solve for y.
These must be equally spaced, e.g. np.arange(0,10,0.005)
tspan[0] is the intial time corresponding to the initial state y0.
dW: optional array of shape (len(tspan)-1, d). This is for advanced use,
if you want to use a specific realization of the d independent Wiener
processes. If not provided Wiener increments will be generated randomly
Returns:
y: array, with shape (len(tspan), len(y0))
With the initial value y0 in the first row
Raises:
SDEValueError
See also:
W. Rumelin (1982) Numerical Treatment of Stochastic Differential
Equations
R. Mannella (2002) Integration of Stochastic Differential Equations
on a Computer
K. Burrage, P. M. Burrage and T. Tian (2004) Numerical methods for strong
solutions of stochastic differential equations: an overview
"""
(d, m, f, G, y0, tspan, dW, __) = _check_args(f, G, y0, tspan, dW, None)
N = len(tspan)
h = (tspan[N-1] - tspan[0])/(N - 1)
# allocate space for result
y = np.zeros((N, d), dtype=type(y0[0]))
if dW is None:
# pre-generate Wiener increments (for m independent Wiener processes):
dW = deltaW(N - 1, m, h)
y[0] = y0;
for n in range(0, N-1):
tn = tspan[n]
tnp1 = tspan[n+1]
yn = y[n]
dWn = dW[n,:]
fn = f(yn, tn)
Gn = G(yn, tn)
ybar = yn + fn*h + Gn.dot(dWn)
fnbar = f(ybar, tnp1)
Gnbar = G(ybar, tnp1)
y[n+1] = yn + 0.5*(fn + fnbar)*h + 0.5*(Gn + Gnbar).dot(dWn)
return y
def itoSRI2(f, G, y0, tspan, Imethod=Ikpw, dW=None, I=None):
"""Use the Roessler2010 order 1.0 strong Stochastic Runge-Kutta algorithm
SRI2 to integrate an Ito equation dy = f(y,t)dt + G(y,t)dW(t)
where y is d-dimensional vector variable, f is a vector-valued function,
G is a d x m matrix-valued function giving the noise coefficients and
dW(t) is a vector of m independent Wiener increments.
This algorithm is suitable for Ito systems with an arbitrary noise
coefficient matrix G (i.e. the noise does not need to be scalar, diagonal,
or commutative). The algorithm has order 2.0 convergence for the
deterministic part alone and order 1.0 strong convergence for the complete
stochastic system.
Args:
f: A function f(y, t) returning an array of shape (d,)
Vector-valued function to define the deterministic part of the system
G: The d x m coefficient function G can be given in two different ways:
You can provide a single function G(y, t) that returns an array of
shape (d, m). In this case the entire matrix G() will be evaluated
2m+1 times at each time step so complexity grows quadratically with m.
Alternatively you can provide a list of m functions g(y, t) each
defining one column of G (each returning an array of shape (d,).
In this case each g will be evaluated 3 times at each time step so
complexity grows linearly with m. If your system has large m and
G involves complicated functions, consider using this way.
y0: array of shape (d,) giving the initial state vector y(t==0)
tspan (array): The sequence of time points for which to solve for y.
These must be equally spaced, e.g. np.arange(0,10,0.005)
tspan[0] is the intial time corresponding to the initial state y0.
Imethod (callable, optional): which function to use to simulate repeated
Ito integrals. Here you can choose either sdeint.Ikpw (the default) or
sdeint.Iwik (which is more accurate but uses a lot of memory in the
current implementation).
dW: optional array of shape (len(tspan)-1, d).
I: optional array of shape (len(tspan)-1, m, m).
These optional arguments dW and I are for advanced use, if you want to
use a specific realization of the d independent Wiener processes and
their multiple integrals at each time step. If not provided, suitable
values will be generated randomly.
Returns:
y: array, with shape (len(tspan), len(y0))
With the initial value y0 in the first row
Raises:
SDEValueError
See also:
A. Roessler (2010) Runge-Kutta Methods for the Strong Approximation of
Solutions of Stochastic Differential Equations
"""
return _Roessler2010_SRK2(f, G, y0, tspan, Imethod, dW, I)
def _Roessler2010_SRK2(f, G, y0, tspan, IJmethod, dW=None, IJ=None):
"""Implements the Roessler2010 order 1.0 strong Stochastic Runge-Kutta
algorithms SRI2 (for Ito equations) and SRS2 (for Stratonovich equations).
Algorithms SRI2 and SRS2 are almost identical and have the same extended
Butcher tableaus. The difference is that Ito repeateded integrals I_ij are
replaced by Stratonovich repeated integrals J_ij when integrating a
Stratonovich equation (Theorem 6.2 in Roessler2010).
Args:
f: A function f(y, t) returning an array of shape (d,)
G: Either a function G(y, t) that returns an array of shape (d, m),
or a list of m functions g(y, t) each returning an array shape (d,).
y0: array of shape (d,) giving the initial state
tspan (array): Sequence of equally spaced time points
IJmethod (callable): which function to use to generate repeated
integrals. N.B. for an Ito equation, must use an Ito version here
(either Ikpw or Iwik). For a Stratonovich equation, must use a
Stratonovich version here (Jkpw or Jwik).
dW: optional array of shape (len(tspan)-1, d).
IJ: optional array of shape (len(tspan)-1, m, m).
Optional arguments dW and IJ are for advanced use, if you want to
use a specific realization of the d independent Wiener processes and
their multiple integrals at each time step. If not provided, suitable
values will be generated randomly.
Returns:
y: array, with shape (len(tspan), len(y0))
Raises:
SDEValueError
See also:
A. Roessler (2010) Runge-Kutta Methods for the Strong Approximation of
Solutions of Stochastic Differential Equations
"""
(d, m, f, G, y0, tspan, dW, IJ) = _check_args(f, G, y0, tspan, dW, IJ)
have_separate_g = (not callable(G)) # if G is given as m separate functions
N = len(tspan)
h = (tspan[N-1] - tspan[0])/(N - 1) # assuming equal time steps
if dW is None:
# pre-generate Wiener increments (for m independent Wiener processes):
dW = deltaW(N - 1, m, h) # shape (N, m)
if IJ is None:
# pre-generate repeated stochastic integrals for each time step.
# Must give I_ij for the Ito case or J_ij for the Stratonovich case:
__, I = IJmethod(dW, h) # shape (N, m, m)
else:
I = IJ
# allocate space for result
y = np.zeros((N, d), dtype=type(y0[0]))
y[0] = y0;
Gn = np.zeros((d, m), dtype=y.dtype)
for n in range(0, N-1):
tn = tspan[n]
tn1 = tspan[n+1]
h = tn1 - tn
sqrth = np.sqrt(h)
Yn = y[n] # shape (d,)
Ik = dW[n,:] # shape (m,)
Iij = I[n,:,:] # shape (m, m)
fnh = f(Yn, tn)*h # shape (d,)
if have_separate_g:
for k in range(0, m):
Gn[:,k] = G[k](Yn, tn)
else:
Gn = G(Yn, tn)
sum1 = np.dot(Gn, Iij)/sqrth # shape (d, m)
H20 = Yn + fnh # shape (d,)
H20b = np.reshape(H20, (d, 1))
H2 = H20b + sum1 # shape (d, m)
H30 = Yn
H3 = H20b - sum1
fn1h = f(H20, tn1)*h
Yn1 = Yn + 0.5*(fnh + fn1h) + np.dot(Gn, Ik)
if have_separate_g:
for k in range(0, m):
Yn1 += 0.5*sqrth*(G[k](H2[:,k], tn1) - G[k](H3[:,k], tn1))
else:
for k in range(0, m):
Yn1 += 0.5*sqrth*(G(H2[:,k], tn1)[:,k] - G(H3[:,k], tn1)[:,k])
y[n+1] = Yn1
return y
def stratKP2iS(f, G, y0, tspan, Jmethod=Jkpw, gam=None, al1=None, al2=None,
rtol=1e-4, dW=None, J=None):
"""Use the Kloeden and Platen two-step implicit order 1.0 strong algorithm
to integrate a Stratonovich equation dy = f(y,t)dt + G(y,t)\circ dW(t)
This semi-implicit algorithm may be useful for stiff systems. The noise
does not need to be scalar, diagonal, or commutative.
This algorithm is defined in Kloeden and Platen (1999) section 12.4,
equations (4.5) and (4.7). Here implementing that scheme with default
parameters \gamma_k = \alpha_{1,k} = \alpha_{2,k} = 0.5 for k=1..d using
MINPACK HYBRD algorithm to solve the implicit vector equation at each step.
Args:
f: A function f(y, t) returning an array of shape (d,) to define the
deterministic part of the system
G: A function G(y, t) returning an array of shape (d, m) to define the
noise coefficients of the system
y0: array of shape (d,) giving the initial state
tspan (array): Sequence of equally spaced time points
Jmethod (callable, optional): which function to use to simulate repeated
Stratonovich integrals. Here you can choose either sdeint.Jkpw (the
default) or sdeint.Jwik (which is more accurate but uses a lot of
memory in the current implementation).
gam, al1, al2 (optional arrays of shape (d,)): These can configure free
parameters \gamma_k, \alpha_{1,k}, \alpha_{2,k} of the algorithm.
You can omit these, then the default values 0.5 will be used.
rtol (float, optional): Relative error tolerance. The default is 1e-4.
This is the relative tolerance used when solving the implicit equation
for Y_{n+1} at each step. It does not mean that the overall sample path
approximation has this relative precision.
dW: optional array of shape (len(tspan)-1, d).
J: optional array of shape (len(tspan)-1, m, m).
These optional arguments dW and J are for advanced use, if you want to
use a specific realization of the d independent Wiener processes and
their multiple integrals at each time step. If not provided, suitable
values will be generated randomly.
Returns:
y: array, with shape (len(tspan), len(y0))
Raises:
SDEValueError, RuntimeError
See also:
P. Kloeden and E. Platen (1999) Numerical Solution of Stochastic
Differential Equations, revised and updated 3rd printing.
"""
try:
from scipy.optimize import fsolve
except ImportError:
raise Error('stratKP2iS() requires package ``scipy`` to be installed.')
(d, m, f, G, y0, tspan, dW, J) = _check_args(f, G, y0, tspan, dW, J)
if not callable(G):
raise SDEValueError('G should be a function returning a d x m matrix.')
if np.iscomplexobj(y0):
raise SDEValueError("stratKP2iS() can't yet handle complex variables.")
if gam is None:
gam = np.ones((d,))*0.5 # Default level of implicitness \gamma_k = 0.5
if al1 is None:
al1 = np.ones((d,))*0.5 # Default \alpha_{1,k} = 0.5
if al2 is None:
al2 = np.ones((d,))*0.5 # Default \alpha_{2,k} = 0.5
N = len(tspan)
h = (tspan[N-1] - tspan[0])/(N - 1) # assuming equal time steps
if dW is None:
# pre-generate Wiener increments (for m independent Wiener processes):
dW = deltaW(N - 1, m, h) # shape (N, m)
if J is None:
# pre-generate repeated Stratonovich integrals for each time step
__, J = Jmethod(dW, h) # shape (N, m, m)
# allocate space for result
y = np.zeros((N, d), dtype=type(y0[0]))
def _imp(Ynp1, Yn, Ynm1, Vn, Vnm1, tnp1, tn, tnm1, fn, fnm1):
"""At each step we will solve _imp(Ynp1, ...) == 0 for Ynp1.
The meaning of these arguments is: Y_{n+1}, Y_n, Y_{n-1}, V_n, V_{n-1},
t_{n+1}, t_n, t_{n-1}, f(Y_n, t_n), f(Y_{n-1}, t_{n-1})."""
return ((1 - gam)*Yn + gam*Ynm1 + (al2*f(Ynp1, tnp1) +
(gam*al1 + (1 - al2))*fn + gam*(1 - al1)*fnm1)*h + Vn +
gam*Vnm1 - Ynp1)
fn = None
Vn = None
y[0] = y0;
for n in range(0, N-1):
tn = tspan[n]
tnp1 = tspan[n+1]
h = tnp1 - tn
sqrth = np.sqrt(h)
Yn = y[n] # shape (d,)
Jk = dW[n,:] # shape (m,)
Jij = J[n,:,:] # shape (m, m)
fnm1 = fn
fn = f(Yn, tn)
Gn = G(Yn, tn)
Ybar = (Yn + fn*h).reshape((d, 1)) + Gn*sqrth # shape (d, m)
sum1 = np.zeros((d,))
for j1 in range(0, m):
sum1 += np.dot(G(Ybar[:,j1], tn) - Gn, Jij[j1,:])
Vnm1 = Vn
Vn = np.dot(Gn, Jk) + sum1/sqrth
if n == 0:
# First step uses Kloeden&Platen explicit order 1.0 strong scheme:
y[n+1] = Yn + fn*h + Vn
continue
tnm1 = tspan[n-1]
Ynm1 = y[n-1] # shape (d,)
# now solve _imp(Ynp1, ...) == 0 for Ynp1, near to Yn
args = (Yn, Ynm1, Vn, Vnm1, tnp1, tn, tnm1, fn, fnm1)
(Ynp1, __, status, msg) = fsolve(_imp, Yn, args=args, xtol=rtol,
full_output=True)
if status == 1:
y[n+1] = Ynp1
else:
m = """At time t_n = %g Failed to solve for Y_{n+1} with args %s.
Reason: %s""" % (tn, args, msg)
raise RuntimeError(m)
return y
|
mattja/sdeint | sdeint/integrate.py | _Roessler2010_SRK2 | python | def _Roessler2010_SRK2(f, G, y0, tspan, IJmethod, dW=None, IJ=None):
(d, m, f, G, y0, tspan, dW, IJ) = _check_args(f, G, y0, tspan, dW, IJ)
have_separate_g = (not callable(G)) # if G is given as m separate functions
N = len(tspan)
h = (tspan[N-1] - tspan[0])/(N - 1) # assuming equal time steps
if dW is None:
# pre-generate Wiener increments (for m independent Wiener processes):
dW = deltaW(N - 1, m, h) # shape (N, m)
if IJ is None:
# pre-generate repeated stochastic integrals for each time step.
# Must give I_ij for the Ito case or J_ij for the Stratonovich case:
__, I = IJmethod(dW, h) # shape (N, m, m)
else:
I = IJ
# allocate space for result
y = np.zeros((N, d), dtype=type(y0[0]))
y[0] = y0;
Gn = np.zeros((d, m), dtype=y.dtype)
for n in range(0, N-1):
tn = tspan[n]
tn1 = tspan[n+1]
h = tn1 - tn
sqrth = np.sqrt(h)
Yn = y[n] # shape (d,)
Ik = dW[n,:] # shape (m,)
Iij = I[n,:,:] # shape (m, m)
fnh = f(Yn, tn)*h # shape (d,)
if have_separate_g:
for k in range(0, m):
Gn[:,k] = G[k](Yn, tn)
else:
Gn = G(Yn, tn)
sum1 = np.dot(Gn, Iij)/sqrth # shape (d, m)
H20 = Yn + fnh # shape (d,)
H20b = np.reshape(H20, (d, 1))
H2 = H20b + sum1 # shape (d, m)
H30 = Yn
H3 = H20b - sum1
fn1h = f(H20, tn1)*h
Yn1 = Yn + 0.5*(fnh + fn1h) + np.dot(Gn, Ik)
if have_separate_g:
for k in range(0, m):
Yn1 += 0.5*sqrth*(G[k](H2[:,k], tn1) - G[k](H3[:,k], tn1))
else:
for k in range(0, m):
Yn1 += 0.5*sqrth*(G(H2[:,k], tn1)[:,k] - G(H3[:,k], tn1)[:,k])
y[n+1] = Yn1
return y | Implements the Roessler2010 order 1.0 strong Stochastic Runge-Kutta
algorithms SRI2 (for Ito equations) and SRS2 (for Stratonovich equations).
Algorithms SRI2 and SRS2 are almost identical and have the same extended
Butcher tableaus. The difference is that Ito repeateded integrals I_ij are
replaced by Stratonovich repeated integrals J_ij when integrating a
Stratonovich equation (Theorem 6.2 in Roessler2010).
Args:
f: A function f(y, t) returning an array of shape (d,)
G: Either a function G(y, t) that returns an array of shape (d, m),
or a list of m functions g(y, t) each returning an array shape (d,).
y0: array of shape (d,) giving the initial state
tspan (array): Sequence of equally spaced time points
IJmethod (callable): which function to use to generate repeated
integrals. N.B. for an Ito equation, must use an Ito version here
(either Ikpw or Iwik). For a Stratonovich equation, must use a
Stratonovich version here (Jkpw or Jwik).
dW: optional array of shape (len(tspan)-1, d).
IJ: optional array of shape (len(tspan)-1, m, m).
Optional arguments dW and IJ are for advanced use, if you want to
use a specific realization of the d independent Wiener processes and
their multiple integrals at each time step. If not provided, suitable
values will be generated randomly.
Returns:
y: array, with shape (len(tspan), len(y0))
Raises:
SDEValueError
See also:
A. Roessler (2010) Runge-Kutta Methods for the Strong Approximation of
Solutions of Stochastic Differential Equations | train | https://github.com/mattja/sdeint/blob/7cf807cdf97b3bb39d29e1c2dc834b519499b601/sdeint/integrate.py#L412-L494 | [
"def deltaW(N, m, h):\n \"\"\"Generate sequence of Wiener increments for m independent Wiener\n processes W_j(t) j=0..m-1 for each of N time intervals of length h. \n\n Returns:\n dW (array of shape (N, m)): The [n, j] element has the value\n W_j((n+1)*h) - W_j(n*h) \n \"\"\"\n return np... | # Copyright 2015 Matthew J. Aburn
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version. See <http://www.gnu.org/licenses/>.
"""Numerical integration algorithms for Ito and Stratonovich stochastic
ordinary differential equations.
Usage:
itoint(f, G, y0, tspan) for Ito equation dy = f dt + G dW
stratint(f, G, y0, tspan) for Stratonovich equation dy = f dt + G \circ dW
y0 is the initial value
tspan is an array of time values (currently these must be equally spaced)
function f is the deterministic part of the system (scalar or dx1 vector)
function G is the stochastic part of the system (scalar or d x m matrix)
sdeint will choose an algorithm for you. Or you can choose one explicitly:
itoEuler: the Euler-Maruyama algorithm for Ito equations.
stratHeun: the Stratonovich Heun algorithm for Stratonovich equations.
itoSRI2: the Roessler2010 order 1.0 strong Stochastic Runge-Kutta
algorithm SRI2 for Ito equations.
stratSRS2: the Roessler2010 order 1.0 strong Stochastic Runge-Kutta
algorithm SRS2 for Stratonovich equations.
stratKP2iS: the Kloeden and Platen two-step implicit order 1.0 strong algorithm
for Stratonovich equations.
"""
from __future__ import absolute_import
from .wiener import deltaW, Ikpw, Iwik, Jkpw, Jwik
import numpy as np
import numbers
class Error(Exception):
pass
class SDEValueError(Error):
"""Thrown if integration arguments fail some basic sanity checks"""
pass
def _check_args(f, G, y0, tspan, dW=None, IJ=None):
"""Do some validation common to all algorithms. Find dimension d and number
of Wiener processes m.
"""
if not np.isclose(min(np.diff(tspan)), max(np.diff(tspan))):
raise SDEValueError('Currently time steps must be equally spaced.')
# Be flexible to allow scalar equations. convert them to a 1D vector system
if isinstance(y0, numbers.Number):
if isinstance(y0, numbers.Integral):
numtype = np.float64
else:
numtype = type(y0)
y0_orig = y0
y0 = np.array([y0], dtype=numtype)
def make_vector_fn(fn):
def newfn(y, t):
return np.array([fn(y[0], t)], dtype=numtype)
newfn.__name__ = fn.__name__
return newfn
def make_matrix_fn(fn):
def newfn(y, t):
return np.array([[fn(y[0], t)]], dtype=numtype)
newfn.__name__ = fn.__name__
return newfn
if isinstance(f(y0_orig, tspan[0]), numbers.Number):
f = make_vector_fn(f)
if isinstance(G(y0_orig, tspan[0]), numbers.Number):
G = make_matrix_fn(G)
# determine dimension d of the system
d = len(y0)
if len(f(y0, tspan[0])) != d:
raise SDEValueError('y0 and f have incompatible shapes.')
message = """y0 has length %d. So G must either be a single function
returning a matrix of shape (%d, m), or else a list of m separate
functions each returning a column of G, with shape (%d,)""" % (
d, d, d)
if callable(G):
# then G must be a function returning a d x m matrix
Gtest = G(y0, tspan[0])
if Gtest.ndim != 2 or Gtest.shape[0] != d:
raise SDEValueError(message)
# determine number of independent Wiener processes m
m = Gtest.shape[1]
else:
# G should be a list of m functions g_i giving columns of G
G = tuple(G)
m = len(G)
Gtest = np.zeros((d, m))
for k in range(0, m):
if not callable(G[k]):
raise SDEValueError(message)
Gtestk = G[k](y0, tspan[0])
if np.shape(Gtestk) != (d,):
raise SDEValueError(message)
Gtest[:,k] = Gtestk
message = """From function G, it seems m==%d. If present, the optional
parameter dW must be an array of shape (len(tspan)-1, m) giving
m independent Wiener increments for each time interval.""" % m
if dW is not None:
if not hasattr(dW, 'shape') or dW.shape != (len(tspan) - 1, m):
raise SDEValueError(message)
message = """From function G, it seems m==%d. If present, the optional
parameter I or J must be an array of shape (len(tspan)-1, m, m)
giving an m x m matrix of repeated integral values for each
time interval.""" % m
if IJ is not None:
if not hasattr(IJ, 'shape') or IJ.shape != (len(tspan) - 1, m, m):
raise SDEValueError(message)
return (d, m, f, G, y0, tspan, dW, IJ)
def itoint(f, G, y0, tspan):
""" Numerically integrate the Ito equation dy = f(y,t)dt + G(y,t)dW
where y is the d-dimensional state vector, f is a vector-valued function,
G is an d x m matrix-valued function giving the noise coefficients and
dW(t) = (dW_1, dW_2, ... dW_m) is a vector of independent Wiener increments
Args:
f: callable(y,t) returning a numpy array of shape (d,)
Vector-valued function to define the deterministic part of the system
G: callable(y,t) returning a numpy array of shape (d,m)
Matrix-valued function to define the noise coefficients of the system
y0: array of shape (d,) giving the initial state vector y(t==0)
tspan (array): The sequence of time points for which to solve for y.
These must be equally spaced, e.g. np.arange(0,10,0.005)
tspan[0] is the intial time corresponding to the initial state y0.
Returns:
y: array, with shape (len(tspan), len(y0))
With the initial value y0 in the first row
Raises:
SDEValueError
"""
# In future versions we can automatically choose here the most suitable
# Ito algorithm based on properties of the system and noise.
(d, m, f, G, y0, tspan, __, __) = _check_args(f, G, y0, tspan, None, None)
chosenAlgorithm = itoSRI2
return chosenAlgorithm(f, G, y0, tspan)
def stratint(f, G, y0, tspan):
""" Numerically integrate Stratonovich equation dy = f(y,t)dt + G(y,t).dW
where y is the d-dimensional state vector, f is a vector-valued function,
G is an d x m matrix-valued function giving the noise coefficients and
dW(t) = (dW_1, dW_2, ... dW_m) is a vector of independent Wiener increments
Args:
f: callable(y,t) returning a numpy array of shape (d,)
Vector-valued function to define the deterministic part of the system
G: callable(y,t) returning a numpy array of shape (d,m)
Matrix-valued function to define the noise coefficients of the system
y0: array of shape (d,) giving the initial state vector y(t==0)
tspan (array): The sequence of time points for which to solve for y.
These must be equally spaced, e.g. np.arange(0,10,0.005)
tspan[0] is the intial time corresponding to the initial state y0.
Returns:
y: array, with shape (len(tspan), len(y0))
With the initial value y0 in the first row
Raises:
SDEValueError
"""
# In future versions we can automatically choose here the most suitable
# Stratonovich algorithm based on properties of the system and noise.
(d, m, f, G, y0, tspan, __, __) = _check_args(f, G, y0, tspan, None, None)
chosenAlgorithm = stratSRS2
return chosenAlgorithm(f, G, y0, tspan)
def itoEuler(f, G, y0, tspan, dW=None):
"""Use the Euler-Maruyama algorithm to integrate the Ito equation
dy = f(y,t)dt + G(y,t) dW(t)
where y is the d-dimensional state vector, f is a vector-valued function,
G is an d x m matrix-valued function giving the noise coefficients and
dW(t) = (dW_1, dW_2, ... dW_m) is a vector of independent Wiener increments
Args:
f: callable(y, t) returning (d,) array
Vector-valued function to define the deterministic part of the system
G: callable(y, t) returning (d,m) array
Matrix-valued function to define the noise coefficients of the system
y0: array of shape (d,) giving the initial state vector y(t==0)
tspan (array): The sequence of time points for which to solve for y.
These must be equally spaced, e.g. np.arange(0,10,0.005)
tspan[0] is the intial time corresponding to the initial state y0.
dW: optional array of shape (len(tspan)-1, d). This is for advanced use,
if you want to use a specific realization of the d independent Wiener
processes. If not provided Wiener increments will be generated randomly
Returns:
y: array, with shape (len(tspan), len(y0))
With the initial value y0 in the first row
Raises:
SDEValueError
See also:
G. Maruyama (1955) Continuous Markov processes and stochastic equations
Kloeden and Platen (1999) Numerical Solution of Differential Equations
"""
(d, m, f, G, y0, tspan, dW, __) = _check_args(f, G, y0, tspan, dW, None)
N = len(tspan)
h = (tspan[N-1] - tspan[0])/(N - 1)
# allocate space for result
y = np.zeros((N, d), dtype=type(y0[0]))
if dW is None:
# pre-generate Wiener increments (for m independent Wiener processes):
dW = deltaW(N - 1, m, h)
y[0] = y0;
for n in range(0, N-1):
tn = tspan[n]
yn = y[n]
dWn = dW[n,:]
y[n+1] = yn + f(yn, tn)*h + G(yn, tn).dot(dWn)
return y
def stratHeun(f, G, y0, tspan, dW=None):
"""Use the Stratonovich Heun algorithm to integrate Stratonovich equation
dy = f(y,t)dt + G(y,t) \circ dW(t)
where y is the d-dimensional state vector, f is a vector-valued function,
G is an d x m matrix-valued function giving the noise coefficients and
dW(t) = (dW_1, dW_2, ... dW_m) is a vector of independent Wiener increments
Args:
f: callable(y, t) returning (d,) array
Vector-valued function to define the deterministic part of the system
G: callable(y, t) returning (d,m) array
Matrix-valued function to define the noise coefficients of the system
y0: array of shape (d,) giving the initial state vector y(t==0)
tspan (array): The sequence of time points for which to solve for y.
These must be equally spaced, e.g. np.arange(0,10,0.005)
tspan[0] is the intial time corresponding to the initial state y0.
dW: optional array of shape (len(tspan)-1, d). This is for advanced use,
if you want to use a specific realization of the d independent Wiener
processes. If not provided Wiener increments will be generated randomly
Returns:
y: array, with shape (len(tspan), len(y0))
With the initial value y0 in the first row
Raises:
SDEValueError
See also:
W. Rumelin (1982) Numerical Treatment of Stochastic Differential
Equations
R. Mannella (2002) Integration of Stochastic Differential Equations
on a Computer
K. Burrage, P. M. Burrage and T. Tian (2004) Numerical methods for strong
solutions of stochastic differential equations: an overview
"""
(d, m, f, G, y0, tspan, dW, __) = _check_args(f, G, y0, tspan, dW, None)
N = len(tspan)
h = (tspan[N-1] - tspan[0])/(N - 1)
# allocate space for result
y = np.zeros((N, d), dtype=type(y0[0]))
if dW is None:
# pre-generate Wiener increments (for m independent Wiener processes):
dW = deltaW(N - 1, m, h)
y[0] = y0;
for n in range(0, N-1):
tn = tspan[n]
tnp1 = tspan[n+1]
yn = y[n]
dWn = dW[n,:]
fn = f(yn, tn)
Gn = G(yn, tn)
ybar = yn + fn*h + Gn.dot(dWn)
fnbar = f(ybar, tnp1)
Gnbar = G(ybar, tnp1)
y[n+1] = yn + 0.5*(fn + fnbar)*h + 0.5*(Gn + Gnbar).dot(dWn)
return y
def itoSRI2(f, G, y0, tspan, Imethod=Ikpw, dW=None, I=None):
"""Use the Roessler2010 order 1.0 strong Stochastic Runge-Kutta algorithm
SRI2 to integrate an Ito equation dy = f(y,t)dt + G(y,t)dW(t)
where y is d-dimensional vector variable, f is a vector-valued function,
G is a d x m matrix-valued function giving the noise coefficients and
dW(t) is a vector of m independent Wiener increments.
This algorithm is suitable for Ito systems with an arbitrary noise
coefficient matrix G (i.e. the noise does not need to be scalar, diagonal,
or commutative). The algorithm has order 2.0 convergence for the
deterministic part alone and order 1.0 strong convergence for the complete
stochastic system.
Args:
f: A function f(y, t) returning an array of shape (d,)
Vector-valued function to define the deterministic part of the system
G: The d x m coefficient function G can be given in two different ways:
You can provide a single function G(y, t) that returns an array of
shape (d, m). In this case the entire matrix G() will be evaluated
2m+1 times at each time step so complexity grows quadratically with m.
Alternatively you can provide a list of m functions g(y, t) each
defining one column of G (each returning an array of shape (d,).
In this case each g will be evaluated 3 times at each time step so
complexity grows linearly with m. If your system has large m and
G involves complicated functions, consider using this way.
y0: array of shape (d,) giving the initial state vector y(t==0)
tspan (array): The sequence of time points for which to solve for y.
These must be equally spaced, e.g. np.arange(0,10,0.005)
tspan[0] is the intial time corresponding to the initial state y0.
Imethod (callable, optional): which function to use to simulate repeated
Ito integrals. Here you can choose either sdeint.Ikpw (the default) or
sdeint.Iwik (which is more accurate but uses a lot of memory in the
current implementation).
dW: optional array of shape (len(tspan)-1, d).
I: optional array of shape (len(tspan)-1, m, m).
These optional arguments dW and I are for advanced use, if you want to
use a specific realization of the d independent Wiener processes and
their multiple integrals at each time step. If not provided, suitable
values will be generated randomly.
Returns:
y: array, with shape (len(tspan), len(y0))
With the initial value y0 in the first row
Raises:
SDEValueError
See also:
A. Roessler (2010) Runge-Kutta Methods for the Strong Approximation of
Solutions of Stochastic Differential Equations
"""
return _Roessler2010_SRK2(f, G, y0, tspan, Imethod, dW, I)
def stratSRS2(f, G, y0, tspan, Jmethod=Jkpw, dW=None, J=None):
"""Use the Roessler2010 order 1.0 strong Stochastic Runge-Kutta algorithm
SRS2 to integrate a Stratonovich equation dy = f(y,t)dt + G(y,t)\circ dW(t)
where y is d-dimensional vector variable, f is a vector-valued function,
G is a d x m matrix-valued function giving the noise coefficients and
dW(t) is a vector of m independent Wiener increments.
This algorithm is suitable for Stratonovich systems with an arbitrary noise
coefficient matrix G (i.e. the noise does not need to be scalar, diagonal,
or commutative). The algorithm has order 2.0 convergence for the
deterministic part alone and order 1.0 strong convergence for the complete
stochastic system.
Args:
f: A function f(y, t) returning an array of shape (d,)
Vector-valued function to define the deterministic part of the system
G: The d x m coefficient function G can be given in two different ways:
You can provide a single function G(y, t) that returns an array of
shape (d, m). In this case the entire matrix G() will be evaluated
2m+1 times at each time step so complexity grows quadratically with m.
Alternatively you can provide a list of m functions g(y, t) each
defining one column of G (each returning an array of shape (d,).
In this case each g will be evaluated 3 times at each time step so
complexity grows linearly with m. If your system has large m and
G involves complicated functions, consider using this way.
y0: array of shape (d,) giving the initial state vector y(t==0)
tspan (array): The sequence of time points for which to solve for y.
These must be equally spaced, e.g. np.arange(0,10,0.005)
tspan[0] is the intial time corresponding to the initial state y0.
Jmethod (callable, optional): which function to use to simulate repeated
Stratonovich integrals. Here you can choose either sdeint.Jkpw (the
default) or sdeint.Jwik (which is more accurate but uses a lot of
memory in the current implementation).
dW: optional array of shape (len(tspan)-1, d).
J: optional array of shape (len(tspan)-1, m, m).
These optional arguments dW and J are for advanced use, if you want to
use a specific realization of the d independent Wiener processes and
their multiple integrals at each time step. If not provided, suitable
values will be generated randomly.
Returns:
y: array, with shape (len(tspan), len(y0))
With the initial value y0 in the first row
Raises:
SDEValueError
See also:
A. Roessler (2010) Runge-Kutta Methods for the Strong Approximation of
Solutions of Stochastic Differential Equations
"""
return _Roessler2010_SRK2(f, G, y0, tspan, Jmethod, dW, J)
def stratKP2iS(f, G, y0, tspan, Jmethod=Jkpw, gam=None, al1=None, al2=None,
rtol=1e-4, dW=None, J=None):
"""Use the Kloeden and Platen two-step implicit order 1.0 strong algorithm
to integrate a Stratonovich equation dy = f(y,t)dt + G(y,t)\circ dW(t)
This semi-implicit algorithm may be useful for stiff systems. The noise
does not need to be scalar, diagonal, or commutative.
This algorithm is defined in Kloeden and Platen (1999) section 12.4,
equations (4.5) and (4.7). Here implementing that scheme with default
parameters \gamma_k = \alpha_{1,k} = \alpha_{2,k} = 0.5 for k=1..d using
MINPACK HYBRD algorithm to solve the implicit vector equation at each step.
Args:
f: A function f(y, t) returning an array of shape (d,) to define the
deterministic part of the system
G: A function G(y, t) returning an array of shape (d, m) to define the
noise coefficients of the system
y0: array of shape (d,) giving the initial state
tspan (array): Sequence of equally spaced time points
Jmethod (callable, optional): which function to use to simulate repeated
Stratonovich integrals. Here you can choose either sdeint.Jkpw (the
default) or sdeint.Jwik (which is more accurate but uses a lot of
memory in the current implementation).
gam, al1, al2 (optional arrays of shape (d,)): These can configure free
parameters \gamma_k, \alpha_{1,k}, \alpha_{2,k} of the algorithm.
You can omit these, then the default values 0.5 will be used.
rtol (float, optional): Relative error tolerance. The default is 1e-4.
This is the relative tolerance used when solving the implicit equation
for Y_{n+1} at each step. It does not mean that the overall sample path
approximation has this relative precision.
dW: optional array of shape (len(tspan)-1, d).
J: optional array of shape (len(tspan)-1, m, m).
These optional arguments dW and J are for advanced use, if you want to
use a specific realization of the d independent Wiener processes and
their multiple integrals at each time step. If not provided, suitable
values will be generated randomly.
Returns:
y: array, with shape (len(tspan), len(y0))
Raises:
SDEValueError, RuntimeError
See also:
P. Kloeden and E. Platen (1999) Numerical Solution of Stochastic
Differential Equations, revised and updated 3rd printing.
"""
try:
from scipy.optimize import fsolve
except ImportError:
raise Error('stratKP2iS() requires package ``scipy`` to be installed.')
(d, m, f, G, y0, tspan, dW, J) = _check_args(f, G, y0, tspan, dW, J)
if not callable(G):
raise SDEValueError('G should be a function returning a d x m matrix.')
if np.iscomplexobj(y0):
raise SDEValueError("stratKP2iS() can't yet handle complex variables.")
if gam is None:
gam = np.ones((d,))*0.5 # Default level of implicitness \gamma_k = 0.5
if al1 is None:
al1 = np.ones((d,))*0.5 # Default \alpha_{1,k} = 0.5
if al2 is None:
al2 = np.ones((d,))*0.5 # Default \alpha_{2,k} = 0.5
N = len(tspan)
h = (tspan[N-1] - tspan[0])/(N - 1) # assuming equal time steps
if dW is None:
# pre-generate Wiener increments (for m independent Wiener processes):
dW = deltaW(N - 1, m, h) # shape (N, m)
if J is None:
# pre-generate repeated Stratonovich integrals for each time step
__, J = Jmethod(dW, h) # shape (N, m, m)
# allocate space for result
y = np.zeros((N, d), dtype=type(y0[0]))
def _imp(Ynp1, Yn, Ynm1, Vn, Vnm1, tnp1, tn, tnm1, fn, fnm1):
"""At each step we will solve _imp(Ynp1, ...) == 0 for Ynp1.
The meaning of these arguments is: Y_{n+1}, Y_n, Y_{n-1}, V_n, V_{n-1},
t_{n+1}, t_n, t_{n-1}, f(Y_n, t_n), f(Y_{n-1}, t_{n-1})."""
return ((1 - gam)*Yn + gam*Ynm1 + (al2*f(Ynp1, tnp1) +
(gam*al1 + (1 - al2))*fn + gam*(1 - al1)*fnm1)*h + Vn +
gam*Vnm1 - Ynp1)
fn = None
Vn = None
y[0] = y0;
for n in range(0, N-1):
tn = tspan[n]
tnp1 = tspan[n+1]
h = tnp1 - tn
sqrth = np.sqrt(h)
Yn = y[n] # shape (d,)
Jk = dW[n,:] # shape (m,)
Jij = J[n,:,:] # shape (m, m)
fnm1 = fn
fn = f(Yn, tn)
Gn = G(Yn, tn)
Ybar = (Yn + fn*h).reshape((d, 1)) + Gn*sqrth # shape (d, m)
sum1 = np.zeros((d,))
for j1 in range(0, m):
sum1 += np.dot(G(Ybar[:,j1], tn) - Gn, Jij[j1,:])
Vnm1 = Vn
Vn = np.dot(Gn, Jk) + sum1/sqrth
if n == 0:
# First step uses Kloeden&Platen explicit order 1.0 strong scheme:
y[n+1] = Yn + fn*h + Vn
continue
tnm1 = tspan[n-1]
Ynm1 = y[n-1] # shape (d,)
# now solve _imp(Ynp1, ...) == 0 for Ynp1, near to Yn
args = (Yn, Ynm1, Vn, Vnm1, tnp1, tn, tnm1, fn, fnm1)
(Ynp1, __, status, msg) = fsolve(_imp, Yn, args=args, xtol=rtol,
full_output=True)
if status == 1:
y[n+1] = Ynp1
else:
m = """At time t_n = %g Failed to solve for Y_{n+1} with args %s.
Reason: %s""" % (tn, args, msg)
raise RuntimeError(m)
return y
|
mattja/sdeint | sdeint/integrate.py | stratKP2iS | python | def stratKP2iS(f, G, y0, tspan, Jmethod=Jkpw, gam=None, al1=None, al2=None,
rtol=1e-4, dW=None, J=None):
try:
from scipy.optimize import fsolve
except ImportError:
raise Error('stratKP2iS() requires package ``scipy`` to be installed.')
(d, m, f, G, y0, tspan, dW, J) = _check_args(f, G, y0, tspan, dW, J)
if not callable(G):
raise SDEValueError('G should be a function returning a d x m matrix.')
if np.iscomplexobj(y0):
raise SDEValueError("stratKP2iS() can't yet handle complex variables.")
if gam is None:
gam = np.ones((d,))*0.5 # Default level of implicitness \gamma_k = 0.5
if al1 is None:
al1 = np.ones((d,))*0.5 # Default \alpha_{1,k} = 0.5
if al2 is None:
al2 = np.ones((d,))*0.5 # Default \alpha_{2,k} = 0.5
N = len(tspan)
h = (tspan[N-1] - tspan[0])/(N - 1) # assuming equal time steps
if dW is None:
# pre-generate Wiener increments (for m independent Wiener processes):
dW = deltaW(N - 1, m, h) # shape (N, m)
if J is None:
# pre-generate repeated Stratonovich integrals for each time step
__, J = Jmethod(dW, h) # shape (N, m, m)
# allocate space for result
y = np.zeros((N, d), dtype=type(y0[0]))
def _imp(Ynp1, Yn, Ynm1, Vn, Vnm1, tnp1, tn, tnm1, fn, fnm1):
"""At each step we will solve _imp(Ynp1, ...) == 0 for Ynp1.
The meaning of these arguments is: Y_{n+1}, Y_n, Y_{n-1}, V_n, V_{n-1},
t_{n+1}, t_n, t_{n-1}, f(Y_n, t_n), f(Y_{n-1}, t_{n-1})."""
return ((1 - gam)*Yn + gam*Ynm1 + (al2*f(Ynp1, tnp1) +
(gam*al1 + (1 - al2))*fn + gam*(1 - al1)*fnm1)*h + Vn +
gam*Vnm1 - Ynp1)
fn = None
Vn = None
y[0] = y0;
for n in range(0, N-1):
tn = tspan[n]
tnp1 = tspan[n+1]
h = tnp1 - tn
sqrth = np.sqrt(h)
Yn = y[n] # shape (d,)
Jk = dW[n,:] # shape (m,)
Jij = J[n,:,:] # shape (m, m)
fnm1 = fn
fn = f(Yn, tn)
Gn = G(Yn, tn)
Ybar = (Yn + fn*h).reshape((d, 1)) + Gn*sqrth # shape (d, m)
sum1 = np.zeros((d,))
for j1 in range(0, m):
sum1 += np.dot(G(Ybar[:,j1], tn) - Gn, Jij[j1,:])
Vnm1 = Vn
Vn = np.dot(Gn, Jk) + sum1/sqrth
if n == 0:
# First step uses Kloeden&Platen explicit order 1.0 strong scheme:
y[n+1] = Yn + fn*h + Vn
continue
tnm1 = tspan[n-1]
Ynm1 = y[n-1] # shape (d,)
# now solve _imp(Ynp1, ...) == 0 for Ynp1, near to Yn
args = (Yn, Ynm1, Vn, Vnm1, tnp1, tn, tnm1, fn, fnm1)
(Ynp1, __, status, msg) = fsolve(_imp, Yn, args=args, xtol=rtol,
full_output=True)
if status == 1:
y[n+1] = Ynp1
else:
m = """At time t_n = %g Failed to solve for Y_{n+1} with args %s.
Reason: %s""" % (tn, args, msg)
raise RuntimeError(m)
return y | Use the Kloeden and Platen two-step implicit order 1.0 strong algorithm
to integrate a Stratonovich equation dy = f(y,t)dt + G(y,t)\circ dW(t)
This semi-implicit algorithm may be useful for stiff systems. The noise
does not need to be scalar, diagonal, or commutative.
This algorithm is defined in Kloeden and Platen (1999) section 12.4,
equations (4.5) and (4.7). Here implementing that scheme with default
parameters \gamma_k = \alpha_{1,k} = \alpha_{2,k} = 0.5 for k=1..d using
MINPACK HYBRD algorithm to solve the implicit vector equation at each step.
Args:
f: A function f(y, t) returning an array of shape (d,) to define the
deterministic part of the system
G: A function G(y, t) returning an array of shape (d, m) to define the
noise coefficients of the system
y0: array of shape (d,) giving the initial state
tspan (array): Sequence of equally spaced time points
Jmethod (callable, optional): which function to use to simulate repeated
Stratonovich integrals. Here you can choose either sdeint.Jkpw (the
default) or sdeint.Jwik (which is more accurate but uses a lot of
memory in the current implementation).
gam, al1, al2 (optional arrays of shape (d,)): These can configure free
parameters \gamma_k, \alpha_{1,k}, \alpha_{2,k} of the algorithm.
You can omit these, then the default values 0.5 will be used.
rtol (float, optional): Relative error tolerance. The default is 1e-4.
This is the relative tolerance used when solving the implicit equation
for Y_{n+1} at each step. It does not mean that the overall sample path
approximation has this relative precision.
dW: optional array of shape (len(tspan)-1, d).
J: optional array of shape (len(tspan)-1, m, m).
These optional arguments dW and J are for advanced use, if you want to
use a specific realization of the d independent Wiener processes and
their multiple integrals at each time step. If not provided, suitable
values will be generated randomly.
Returns:
y: array, with shape (len(tspan), len(y0))
Raises:
SDEValueError, RuntimeError
See also:
P. Kloeden and E. Platen (1999) Numerical Solution of Stochastic
Differential Equations, revised and updated 3rd printing. | train | https://github.com/mattja/sdeint/blob/7cf807cdf97b3bb39d29e1c2dc834b519499b601/sdeint/integrate.py#L497-L613 | [
"def deltaW(N, m, h):\n \"\"\"Generate sequence of Wiener increments for m independent Wiener\n processes W_j(t) j=0..m-1 for each of N time intervals of length h. \n\n Returns:\n dW (array of shape (N, m)): The [n, j] element has the value\n W_j((n+1)*h) - W_j(n*h) \n \"\"\"\n return np... | # Copyright 2015 Matthew J. Aburn
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version. See <http://www.gnu.org/licenses/>.
"""Numerical integration algorithms for Ito and Stratonovich stochastic
ordinary differential equations.
Usage:
itoint(f, G, y0, tspan) for Ito equation dy = f dt + G dW
stratint(f, G, y0, tspan) for Stratonovich equation dy = f dt + G \circ dW
y0 is the initial value
tspan is an array of time values (currently these must be equally spaced)
function f is the deterministic part of the system (scalar or dx1 vector)
function G is the stochastic part of the system (scalar or d x m matrix)
sdeint will choose an algorithm for you. Or you can choose one explicitly:
itoEuler: the Euler-Maruyama algorithm for Ito equations.
stratHeun: the Stratonovich Heun algorithm for Stratonovich equations.
itoSRI2: the Roessler2010 order 1.0 strong Stochastic Runge-Kutta
algorithm SRI2 for Ito equations.
stratSRS2: the Roessler2010 order 1.0 strong Stochastic Runge-Kutta
algorithm SRS2 for Stratonovich equations.
stratKP2iS: the Kloeden and Platen two-step implicit order 1.0 strong algorithm
for Stratonovich equations.
"""
from __future__ import absolute_import
from .wiener import deltaW, Ikpw, Iwik, Jkpw, Jwik
import numpy as np
import numbers
class Error(Exception):
pass
class SDEValueError(Error):
"""Thrown if integration arguments fail some basic sanity checks"""
pass
def _check_args(f, G, y0, tspan, dW=None, IJ=None):
"""Do some validation common to all algorithms. Find dimension d and number
of Wiener processes m.
"""
if not np.isclose(min(np.diff(tspan)), max(np.diff(tspan))):
raise SDEValueError('Currently time steps must be equally spaced.')
# Be flexible to allow scalar equations. convert them to a 1D vector system
if isinstance(y0, numbers.Number):
if isinstance(y0, numbers.Integral):
numtype = np.float64
else:
numtype = type(y0)
y0_orig = y0
y0 = np.array([y0], dtype=numtype)
def make_vector_fn(fn):
def newfn(y, t):
return np.array([fn(y[0], t)], dtype=numtype)
newfn.__name__ = fn.__name__
return newfn
def make_matrix_fn(fn):
def newfn(y, t):
return np.array([[fn(y[0], t)]], dtype=numtype)
newfn.__name__ = fn.__name__
return newfn
if isinstance(f(y0_orig, tspan[0]), numbers.Number):
f = make_vector_fn(f)
if isinstance(G(y0_orig, tspan[0]), numbers.Number):
G = make_matrix_fn(G)
# determine dimension d of the system
d = len(y0)
if len(f(y0, tspan[0])) != d:
raise SDEValueError('y0 and f have incompatible shapes.')
message = """y0 has length %d. So G must either be a single function
returning a matrix of shape (%d, m), or else a list of m separate
functions each returning a column of G, with shape (%d,)""" % (
d, d, d)
if callable(G):
# then G must be a function returning a d x m matrix
Gtest = G(y0, tspan[0])
if Gtest.ndim != 2 or Gtest.shape[0] != d:
raise SDEValueError(message)
# determine number of independent Wiener processes m
m = Gtest.shape[1]
else:
# G should be a list of m functions g_i giving columns of G
G = tuple(G)
m = len(G)
Gtest = np.zeros((d, m))
for k in range(0, m):
if not callable(G[k]):
raise SDEValueError(message)
Gtestk = G[k](y0, tspan[0])
if np.shape(Gtestk) != (d,):
raise SDEValueError(message)
Gtest[:,k] = Gtestk
message = """From function G, it seems m==%d. If present, the optional
parameter dW must be an array of shape (len(tspan)-1, m) giving
m independent Wiener increments for each time interval.""" % m
if dW is not None:
if not hasattr(dW, 'shape') or dW.shape != (len(tspan) - 1, m):
raise SDEValueError(message)
message = """From function G, it seems m==%d. If present, the optional
parameter I or J must be an array of shape (len(tspan)-1, m, m)
giving an m x m matrix of repeated integral values for each
time interval.""" % m
if IJ is not None:
if not hasattr(IJ, 'shape') or IJ.shape != (len(tspan) - 1, m, m):
raise SDEValueError(message)
return (d, m, f, G, y0, tspan, dW, IJ)
def itoint(f, G, y0, tspan):
""" Numerically integrate the Ito equation dy = f(y,t)dt + G(y,t)dW
where y is the d-dimensional state vector, f is a vector-valued function,
G is an d x m matrix-valued function giving the noise coefficients and
dW(t) = (dW_1, dW_2, ... dW_m) is a vector of independent Wiener increments
Args:
f: callable(y,t) returning a numpy array of shape (d,)
Vector-valued function to define the deterministic part of the system
G: callable(y,t) returning a numpy array of shape (d,m)
Matrix-valued function to define the noise coefficients of the system
y0: array of shape (d,) giving the initial state vector y(t==0)
tspan (array): The sequence of time points for which to solve for y.
These must be equally spaced, e.g. np.arange(0,10,0.005)
tspan[0] is the intial time corresponding to the initial state y0.
Returns:
y: array, with shape (len(tspan), len(y0))
With the initial value y0 in the first row
Raises:
SDEValueError
"""
# In future versions we can automatically choose here the most suitable
# Ito algorithm based on properties of the system and noise.
(d, m, f, G, y0, tspan, __, __) = _check_args(f, G, y0, tspan, None, None)
chosenAlgorithm = itoSRI2
return chosenAlgorithm(f, G, y0, tspan)
def stratint(f, G, y0, tspan):
""" Numerically integrate Stratonovich equation dy = f(y,t)dt + G(y,t).dW
where y is the d-dimensional state vector, f is a vector-valued function,
G is an d x m matrix-valued function giving the noise coefficients and
dW(t) = (dW_1, dW_2, ... dW_m) is a vector of independent Wiener increments
Args:
f: callable(y,t) returning a numpy array of shape (d,)
Vector-valued function to define the deterministic part of the system
G: callable(y,t) returning a numpy array of shape (d,m)
Matrix-valued function to define the noise coefficients of the system
y0: array of shape (d,) giving the initial state vector y(t==0)
tspan (array): The sequence of time points for which to solve for y.
These must be equally spaced, e.g. np.arange(0,10,0.005)
tspan[0] is the intial time corresponding to the initial state y0.
Returns:
y: array, with shape (len(tspan), len(y0))
With the initial value y0 in the first row
Raises:
SDEValueError
"""
# In future versions we can automatically choose here the most suitable
# Stratonovich algorithm based on properties of the system and noise.
(d, m, f, G, y0, tspan, __, __) = _check_args(f, G, y0, tspan, None, None)
chosenAlgorithm = stratSRS2
return chosenAlgorithm(f, G, y0, tspan)
def itoEuler(f, G, y0, tspan, dW=None):
"""Use the Euler-Maruyama algorithm to integrate the Ito equation
dy = f(y,t)dt + G(y,t) dW(t)
where y is the d-dimensional state vector, f is a vector-valued function,
G is an d x m matrix-valued function giving the noise coefficients and
dW(t) = (dW_1, dW_2, ... dW_m) is a vector of independent Wiener increments
Args:
f: callable(y, t) returning (d,) array
Vector-valued function to define the deterministic part of the system
G: callable(y, t) returning (d,m) array
Matrix-valued function to define the noise coefficients of the system
y0: array of shape (d,) giving the initial state vector y(t==0)
tspan (array): The sequence of time points for which to solve for y.
These must be equally spaced, e.g. np.arange(0,10,0.005)
tspan[0] is the intial time corresponding to the initial state y0.
dW: optional array of shape (len(tspan)-1, d). This is for advanced use,
if you want to use a specific realization of the d independent Wiener
processes. If not provided Wiener increments will be generated randomly
Returns:
y: array, with shape (len(tspan), len(y0))
With the initial value y0 in the first row
Raises:
SDEValueError
See also:
G. Maruyama (1955) Continuous Markov processes and stochastic equations
Kloeden and Platen (1999) Numerical Solution of Differential Equations
"""
(d, m, f, G, y0, tspan, dW, __) = _check_args(f, G, y0, tspan, dW, None)
N = len(tspan)
h = (tspan[N-1] - tspan[0])/(N - 1)
# allocate space for result
y = np.zeros((N, d), dtype=type(y0[0]))
if dW is None:
# pre-generate Wiener increments (for m independent Wiener processes):
dW = deltaW(N - 1, m, h)
y[0] = y0;
for n in range(0, N-1):
tn = tspan[n]
yn = y[n]
dWn = dW[n,:]
y[n+1] = yn + f(yn, tn)*h + G(yn, tn).dot(dWn)
return y
def stratHeun(f, G, y0, tspan, dW=None):
"""Use the Stratonovich Heun algorithm to integrate Stratonovich equation
dy = f(y,t)dt + G(y,t) \circ dW(t)
where y is the d-dimensional state vector, f is a vector-valued function,
G is an d x m matrix-valued function giving the noise coefficients and
dW(t) = (dW_1, dW_2, ... dW_m) is a vector of independent Wiener increments
Args:
f: callable(y, t) returning (d,) array
Vector-valued function to define the deterministic part of the system
G: callable(y, t) returning (d,m) array
Matrix-valued function to define the noise coefficients of the system
y0: array of shape (d,) giving the initial state vector y(t==0)
tspan (array): The sequence of time points for which to solve for y.
These must be equally spaced, e.g. np.arange(0,10,0.005)
tspan[0] is the intial time corresponding to the initial state y0.
dW: optional array of shape (len(tspan)-1, d). This is for advanced use,
if you want to use a specific realization of the d independent Wiener
processes. If not provided Wiener increments will be generated randomly
Returns:
y: array, with shape (len(tspan), len(y0))
With the initial value y0 in the first row
Raises:
SDEValueError
See also:
W. Rumelin (1982) Numerical Treatment of Stochastic Differential
Equations
R. Mannella (2002) Integration of Stochastic Differential Equations
on a Computer
K. Burrage, P. M. Burrage and T. Tian (2004) Numerical methods for strong
solutions of stochastic differential equations: an overview
"""
(d, m, f, G, y0, tspan, dW, __) = _check_args(f, G, y0, tspan, dW, None)
N = len(tspan)
h = (tspan[N-1] - tspan[0])/(N - 1)
# allocate space for result
y = np.zeros((N, d), dtype=type(y0[0]))
if dW is None:
# pre-generate Wiener increments (for m independent Wiener processes):
dW = deltaW(N - 1, m, h)
y[0] = y0;
for n in range(0, N-1):
tn = tspan[n]
tnp1 = tspan[n+1]
yn = y[n]
dWn = dW[n,:]
fn = f(yn, tn)
Gn = G(yn, tn)
ybar = yn + fn*h + Gn.dot(dWn)
fnbar = f(ybar, tnp1)
Gnbar = G(ybar, tnp1)
y[n+1] = yn + 0.5*(fn + fnbar)*h + 0.5*(Gn + Gnbar).dot(dWn)
return y
def itoSRI2(f, G, y0, tspan, Imethod=Ikpw, dW=None, I=None):
"""Use the Roessler2010 order 1.0 strong Stochastic Runge-Kutta algorithm
SRI2 to integrate an Ito equation dy = f(y,t)dt + G(y,t)dW(t)
where y is d-dimensional vector variable, f is a vector-valued function,
G is a d x m matrix-valued function giving the noise coefficients and
dW(t) is a vector of m independent Wiener increments.
This algorithm is suitable for Ito systems with an arbitrary noise
coefficient matrix G (i.e. the noise does not need to be scalar, diagonal,
or commutative). The algorithm has order 2.0 convergence for the
deterministic part alone and order 1.0 strong convergence for the complete
stochastic system.
Args:
f: A function f(y, t) returning an array of shape (d,)
Vector-valued function to define the deterministic part of the system
G: The d x m coefficient function G can be given in two different ways:
You can provide a single function G(y, t) that returns an array of
shape (d, m). In this case the entire matrix G() will be evaluated
2m+1 times at each time step so complexity grows quadratically with m.
Alternatively you can provide a list of m functions g(y, t) each
defining one column of G (each returning an array of shape (d,).
In this case each g will be evaluated 3 times at each time step so
complexity grows linearly with m. If your system has large m and
G involves complicated functions, consider using this way.
y0: array of shape (d,) giving the initial state vector y(t==0)
tspan (array): The sequence of time points for which to solve for y.
These must be equally spaced, e.g. np.arange(0,10,0.005)
tspan[0] is the intial time corresponding to the initial state y0.
Imethod (callable, optional): which function to use to simulate repeated
Ito integrals. Here you can choose either sdeint.Ikpw (the default) or
sdeint.Iwik (which is more accurate but uses a lot of memory in the
current implementation).
dW: optional array of shape (len(tspan)-1, d).
I: optional array of shape (len(tspan)-1, m, m).
These optional arguments dW and I are for advanced use, if you want to
use a specific realization of the d independent Wiener processes and
their multiple integrals at each time step. If not provided, suitable
values will be generated randomly.
Returns:
y: array, with shape (len(tspan), len(y0))
With the initial value y0 in the first row
Raises:
SDEValueError
See also:
A. Roessler (2010) Runge-Kutta Methods for the Strong Approximation of
Solutions of Stochastic Differential Equations
"""
return _Roessler2010_SRK2(f, G, y0, tspan, Imethod, dW, I)
def stratSRS2(f, G, y0, tspan, Jmethod=Jkpw, dW=None, J=None):
"""Use the Roessler2010 order 1.0 strong Stochastic Runge-Kutta algorithm
SRS2 to integrate a Stratonovich equation dy = f(y,t)dt + G(y,t)\circ dW(t)
where y is d-dimensional vector variable, f is a vector-valued function,
G is a d x m matrix-valued function giving the noise coefficients and
dW(t) is a vector of m independent Wiener increments.
This algorithm is suitable for Stratonovich systems with an arbitrary noise
coefficient matrix G (i.e. the noise does not need to be scalar, diagonal,
or commutative). The algorithm has order 2.0 convergence for the
deterministic part alone and order 1.0 strong convergence for the complete
stochastic system.
Args:
f: A function f(y, t) returning an array of shape (d,)
Vector-valued function to define the deterministic part of the system
G: The d x m coefficient function G can be given in two different ways:
You can provide a single function G(y, t) that returns an array of
shape (d, m). In this case the entire matrix G() will be evaluated
2m+1 times at each time step so complexity grows quadratically with m.
Alternatively you can provide a list of m functions g(y, t) each
defining one column of G (each returning an array of shape (d,).
In this case each g will be evaluated 3 times at each time step so
complexity grows linearly with m. If your system has large m and
G involves complicated functions, consider using this way.
y0: array of shape (d,) giving the initial state vector y(t==0)
tspan (array): The sequence of time points for which to solve for y.
These must be equally spaced, e.g. np.arange(0,10,0.005)
tspan[0] is the intial time corresponding to the initial state y0.
Jmethod (callable, optional): which function to use to simulate repeated
Stratonovich integrals. Here you can choose either sdeint.Jkpw (the
default) or sdeint.Jwik (which is more accurate but uses a lot of
memory in the current implementation).
dW: optional array of shape (len(tspan)-1, d).
J: optional array of shape (len(tspan)-1, m, m).
These optional arguments dW and J are for advanced use, if you want to
use a specific realization of the d independent Wiener processes and
their multiple integrals at each time step. If not provided, suitable
values will be generated randomly.
Returns:
y: array, with shape (len(tspan), len(y0))
With the initial value y0 in the first row
Raises:
SDEValueError
See also:
A. Roessler (2010) Runge-Kutta Methods for the Strong Approximation of
Solutions of Stochastic Differential Equations
"""
return _Roessler2010_SRK2(f, G, y0, tspan, Jmethod, dW, J)
def _Roessler2010_SRK2(f, G, y0, tspan, IJmethod, dW=None, IJ=None):
"""Implements the Roessler2010 order 1.0 strong Stochastic Runge-Kutta
algorithms SRI2 (for Ito equations) and SRS2 (for Stratonovich equations).
Algorithms SRI2 and SRS2 are almost identical and have the same extended
Butcher tableaus. The difference is that Ito repeateded integrals I_ij are
replaced by Stratonovich repeated integrals J_ij when integrating a
Stratonovich equation (Theorem 6.2 in Roessler2010).
Args:
f: A function f(y, t) returning an array of shape (d,)
G: Either a function G(y, t) that returns an array of shape (d, m),
or a list of m functions g(y, t) each returning an array shape (d,).
y0: array of shape (d,) giving the initial state
tspan (array): Sequence of equally spaced time points
IJmethod (callable): which function to use to generate repeated
integrals. N.B. for an Ito equation, must use an Ito version here
(either Ikpw or Iwik). For a Stratonovich equation, must use a
Stratonovich version here (Jkpw or Jwik).
dW: optional array of shape (len(tspan)-1, d).
IJ: optional array of shape (len(tspan)-1, m, m).
Optional arguments dW and IJ are for advanced use, if you want to
use a specific realization of the d independent Wiener processes and
their multiple integrals at each time step. If not provided, suitable
values will be generated randomly.
Returns:
y: array, with shape (len(tspan), len(y0))
Raises:
SDEValueError
See also:
A. Roessler (2010) Runge-Kutta Methods for the Strong Approximation of
Solutions of Stochastic Differential Equations
"""
(d, m, f, G, y0, tspan, dW, IJ) = _check_args(f, G, y0, tspan, dW, IJ)
have_separate_g = (not callable(G)) # if G is given as m separate functions
N = len(tspan)
h = (tspan[N-1] - tspan[0])/(N - 1) # assuming equal time steps
if dW is None:
# pre-generate Wiener increments (for m independent Wiener processes):
dW = deltaW(N - 1, m, h) # shape (N, m)
if IJ is None:
# pre-generate repeated stochastic integrals for each time step.
# Must give I_ij for the Ito case or J_ij for the Stratonovich case:
__, I = IJmethod(dW, h) # shape (N, m, m)
else:
I = IJ
# allocate space for result
y = np.zeros((N, d), dtype=type(y0[0]))
y[0] = y0;
Gn = np.zeros((d, m), dtype=y.dtype)
for n in range(0, N-1):
tn = tspan[n]
tn1 = tspan[n+1]
h = tn1 - tn
sqrth = np.sqrt(h)
Yn = y[n] # shape (d,)
Ik = dW[n,:] # shape (m,)
Iij = I[n,:,:] # shape (m, m)
fnh = f(Yn, tn)*h # shape (d,)
if have_separate_g:
for k in range(0, m):
Gn[:,k] = G[k](Yn, tn)
else:
Gn = G(Yn, tn)
sum1 = np.dot(Gn, Iij)/sqrth # shape (d, m)
H20 = Yn + fnh # shape (d,)
H20b = np.reshape(H20, (d, 1))
H2 = H20b + sum1 # shape (d, m)
H30 = Yn
H3 = H20b - sum1
fn1h = f(H20, tn1)*h
Yn1 = Yn + 0.5*(fnh + fn1h) + np.dot(Gn, Ik)
if have_separate_g:
for k in range(0, m):
Yn1 += 0.5*sqrth*(G[k](H2[:,k], tn1) - G[k](H3[:,k], tn1))
else:
for k in range(0, m):
Yn1 += 0.5*sqrth*(G(H2[:,k], tn1)[:,k] - G(H3[:,k], tn1)[:,k])
y[n+1] = Yn1
return y
def stratKP2iS(f, G, y0, tspan, Jmethod=Jkpw, gam=None, al1=None, al2=None,
rtol=1e-4, dW=None, J=None):
"""Use the Kloeden and Platen two-step implicit order 1.0 strong algorithm
to integrate a Stratonovich equation dy = f(y,t)dt + G(y,t)\circ dW(t)
This semi-implicit algorithm may be useful for stiff systems. The noise
does not need to be scalar, diagonal, or commutative.
This algorithm is defined in Kloeden and Platen (1999) section 12.4,
equations (4.5) and (4.7). Here implementing that scheme with default
parameters \gamma_k = \alpha_{1,k} = \alpha_{2,k} = 0.5 for k=1..d using
MINPACK HYBRD algorithm to solve the implicit vector equation at each step.
Args:
f: A function f(y, t) returning an array of shape (d,) to define the
deterministic part of the system
G: A function G(y, t) returning an array of shape (d, m) to define the
noise coefficients of the system
y0: array of shape (d,) giving the initial state
tspan (array): Sequence of equally spaced time points
Jmethod (callable, optional): which function to use to simulate repeated
Stratonovich integrals. Here you can choose either sdeint.Jkpw (the
default) or sdeint.Jwik (which is more accurate but uses a lot of
memory in the current implementation).
gam, al1, al2 (optional arrays of shape (d,)): These can configure free
parameters \gamma_k, \alpha_{1,k}, \alpha_{2,k} of the algorithm.
You can omit these, then the default values 0.5 will be used.
rtol (float, optional): Relative error tolerance. The default is 1e-4.
This is the relative tolerance used when solving the implicit equation
for Y_{n+1} at each step. It does not mean that the overall sample path
approximation has this relative precision.
dW: optional array of shape (len(tspan)-1, d).
J: optional array of shape (len(tspan)-1, m, m).
These optional arguments dW and J are for advanced use, if you want to
use a specific realization of the d independent Wiener processes and
their multiple integrals at each time step. If not provided, suitable
values will be generated randomly.
Returns:
y: array, with shape (len(tspan), len(y0))
Raises:
SDEValueError, RuntimeError
See also:
P. Kloeden and E. Platen (1999) Numerical Solution of Stochastic
Differential Equations, revised and updated 3rd printing.
"""
try:
from scipy.optimize import fsolve
except ImportError:
raise Error('stratKP2iS() requires package ``scipy`` to be installed.')
(d, m, f, G, y0, tspan, dW, J) = _check_args(f, G, y0, tspan, dW, J)
if not callable(G):
raise SDEValueError('G should be a function returning a d x m matrix.')
if np.iscomplexobj(y0):
raise SDEValueError("stratKP2iS() can't yet handle complex variables.")
if gam is None:
gam = np.ones((d,))*0.5 # Default level of implicitness \gamma_k = 0.5
if al1 is None:
al1 = np.ones((d,))*0.5 # Default \alpha_{1,k} = 0.5
if al2 is None:
al2 = np.ones((d,))*0.5 # Default \alpha_{2,k} = 0.5
N = len(tspan)
h = (tspan[N-1] - tspan[0])/(N - 1) # assuming equal time steps
if dW is None:
# pre-generate Wiener increments (for m independent Wiener processes):
dW = deltaW(N - 1, m, h) # shape (N, m)
if J is None:
# pre-generate repeated Stratonovich integrals for each time step
__, J = Jmethod(dW, h) # shape (N, m, m)
# allocate space for result
y = np.zeros((N, d), dtype=type(y0[0]))
def _imp(Ynp1, Yn, Ynm1, Vn, Vnm1, tnp1, tn, tnm1, fn, fnm1):
"""At each step we will solve _imp(Ynp1, ...) == 0 for Ynp1.
The meaning of these arguments is: Y_{n+1}, Y_n, Y_{n-1}, V_n, V_{n-1},
t_{n+1}, t_n, t_{n-1}, f(Y_n, t_n), f(Y_{n-1}, t_{n-1})."""
return ((1 - gam)*Yn + gam*Ynm1 + (al2*f(Ynp1, tnp1) +
(gam*al1 + (1 - al2))*fn + gam*(1 - al1)*fnm1)*h + Vn +
gam*Vnm1 - Ynp1)
fn = None
Vn = None
y[0] = y0;
for n in range(0, N-1):
tn = tspan[n]
tnp1 = tspan[n+1]
h = tnp1 - tn
sqrth = np.sqrt(h)
Yn = y[n] # shape (d,)
Jk = dW[n,:] # shape (m,)
Jij = J[n,:,:] # shape (m, m)
fnm1 = fn
fn = f(Yn, tn)
Gn = G(Yn, tn)
Ybar = (Yn + fn*h).reshape((d, 1)) + Gn*sqrth # shape (d, m)
sum1 = np.zeros((d,))
for j1 in range(0, m):
sum1 += np.dot(G(Ybar[:,j1], tn) - Gn, Jij[j1,:])
Vnm1 = Vn
Vn = np.dot(Gn, Jk) + sum1/sqrth
if n == 0:
# First step uses Kloeden&Platen explicit order 1.0 strong scheme:
y[n+1] = Yn + fn*h + Vn
continue
tnm1 = tspan[n-1]
Ynm1 = y[n-1] # shape (d,)
# now solve _imp(Ynp1, ...) == 0 for Ynp1, near to Yn
args = (Yn, Ynm1, Vn, Vnm1, tnp1, tn, tnm1, fn, fnm1)
(Ynp1, __, status, msg) = fsolve(_imp, Yn, args=args, xtol=rtol,
full_output=True)
if status == 1:
y[n+1] = Ynp1
else:
m = """At time t_n = %g Failed to solve for Y_{n+1} with args %s.
Reason: %s""" % (tn, args, msg)
raise RuntimeError(m)
return y
|
sergiocorreia/panflute | examples/panflute/gabc.py | latex2png | python | def latex2png(snippet, outfile):
pngimage = os.path.join(IMAGEDIR, outfile + '.png')
environment = os.environ
environment['openout_any'] = 'a'
environment['shell_escape_commands'] = \
"bibtex,bibtex8,kpsewhich,makeindex,mpost,repstopdf,gregorio"
proc = Popen(
["lualatex", '-output-directory=' + IMAGEDIR],
stdin=PIPE,
stdout=DEVNULL,
env=environment
)
proc.stdin.write(
(
LATEX_DOC % (snippet)
).encode("utf-8")
)
proc.communicate()
proc.stdin.close()
call(["pdfcrop", os.path.join(IMAGEDIR, "texput.pdf")], stdout=DEVNULL)
call(
[
"gs",
"-sDEVICE=pngalpha",
"-r144",
"-sOutputFile=" + pngimage,
os.path.join(IMAGEDIR, "texput-crop.pdf"),
],
stdout=DEVNULL,
) | Compiles a LaTeX snippet to png | train | https://github.com/sergiocorreia/panflute/blob/65c2d570c26a190deb600cab5e2ad8a828a3302e/examples/panflute/gabc.py#L72-L102 | null | #!/usr/bin/env python3
"""
Pandoc filter to convert code blocks with class "gabc" to LaTeX
\\gabcsnippet commands in LaTeX output, and to images in HTML output.
Assumes Ghostscript, LuaLaTeX, [Gregorio](http://gregorio-project.github.io/)
and a reasonable selection of LaTeX packages are installed.
"""
import os
from sys import getfilesystemencoding, stderr
from subprocess import Popen, call, PIPE, DEVNULL
from hashlib import sha1
from panflute import toJSONFilter, RawBlock, RawInline, Para, Image, Code, CodeBlock
IMAGEDIR = "tmp_gabc"
LATEX_DOC = """\\documentclass{article}
\\usepackage{libertine}
\\usepackage[autocompile]{gregoriotex}
\\pagestyle{empty}
\\begin{document}
%s
\\end{document}
"""
def sha(code):
"""Returns sha1 hash of the code"""
return sha1(code.encode(getfilesystemencoding())).hexdigest()
def latex(code):
"""LaTeX inline"""
return RawInline(code, format='latex')
def latexblock(code):
"""LaTeX block"""
return RawBlock(code, format='latex')
def htmlblock(code):
"""Html block"""
return RawBlock(code, format='html')
def latexsnippet(code, kvs):
"""Take in account key/values"""
snippet = ''
staffsize = int(kvs['staffsize']) if 'staffsize' in kvs else 17
annotationsize = .56 * staffsize
if 'mode' in kvs:
snippet = (
"\\greannotation{{\\fontsize{%s}{%s}\\selectfont{}%s}}\n" %
(annotationsize, annotationsize, kvs['mode'])
) + snippet
if 'annotation' in kvs:
snippet = (
"\\grechangedim{annotationseparation}{%s mm}{0}\n"
"\\greannotation{{\\fontsize{%s}{%s}\\selectfont{}%s}}\n" %
(staffsize / 34, annotationsize, annotationsize, kvs['annotation'])
) + snippet
snippet = (
"\\grechangestaffsize{%s}\n" % staffsize +
"\\def\\greinitialformat#1{{\\fontsize{%s}{%s}\\selectfont{}#1}}" %
(2.75 * staffsize, 2.75 * staffsize)
) + snippet
snippet = "\\setlength{\\parskip}{0pt}\n" + snippet + code
return snippet
def png(contents, latex_command):
"""Creates a png if needed."""
outfile = sha(contents + latex_command)
src = os.path.join(IMAGEDIR, outfile + '.png')
if not os.path.isfile(src):
try:
os.mkdir(IMAGEDIR)
stderr.write('Created directory ' + IMAGEDIR + '\n')
except OSError:
pass
latex2png(latex_command + "{" + contents + "}", outfile)
stderr.write('Created image ' + src + '\n')
return src
def gabc(elem, doc):
"""Handle gabc file inclusion and gabc code block."""
if type(elem) == Code and "gabc" in elem.classes:
if doc.format == "latex":
if elem.identifier == "":
label = ""
else:
label = '\\label{' + elem.identifier + '}'
return latex(
"\n\\smallskip\n{%\n" +
latexsnippet('\\gregorioscore{' + elem.text + '}', elem.attributes) +
"%\n}" +
label
)
else:
infile = elem.text + (
'.gabc' if '.gabc' not in elem.text else ''
)
with open(infile, 'r') as doc:
code = doc.read().split('%%\n')[1]
return Image(png(
elem.text,
latexsnippet('\\gregorioscore', elem.attributes)
))
elif type(elem) == CodeBlock and "gabc" in elem.classes:
if doc.format == "latex":
if elem.identifier == "":
label = ""
else:
label = '\\label{' + elem.identifier + '}'
return latexblock(
"\n\\smallskip\n{%\n" +
latexsnippet('\\gabcsnippet{' + elem.text + '}', elem.attributes) +
"%\n}" +
label
)
else:
return Para(Image(url=png(elem.text, latexsnippet('\\gabcsnippet', elem.attributes))))
if __name__ == "__main__":
toJSONFilter(gabc)
|
sergiocorreia/panflute | examples/panflute/gabc.py | gabc | python | def gabc(elem, doc):
if type(elem) == Code and "gabc" in elem.classes:
if doc.format == "latex":
if elem.identifier == "":
label = ""
else:
label = '\\label{' + elem.identifier + '}'
return latex(
"\n\\smallskip\n{%\n" +
latexsnippet('\\gregorioscore{' + elem.text + '}', elem.attributes) +
"%\n}" +
label
)
else:
infile = elem.text + (
'.gabc' if '.gabc' not in elem.text else ''
)
with open(infile, 'r') as doc:
code = doc.read().split('%%\n')[1]
return Image(png(
elem.text,
latexsnippet('\\gregorioscore', elem.attributes)
))
elif type(elem) == CodeBlock and "gabc" in elem.classes:
if doc.format == "latex":
if elem.identifier == "":
label = ""
else:
label = '\\label{' + elem.identifier + '}'
return latexblock(
"\n\\smallskip\n{%\n" +
latexsnippet('\\gabcsnippet{' + elem.text + '}', elem.attributes) +
"%\n}" +
label
)
else:
return Para(Image(url=png(elem.text, latexsnippet('\\gabcsnippet', elem.attributes)))) | Handle gabc file inclusion and gabc code block. | train | https://github.com/sergiocorreia/panflute/blob/65c2d570c26a190deb600cab5e2ad8a828a3302e/examples/panflute/gabc.py#L120-L157 | [
"def png(contents, latex_command):\n \"\"\"Creates a png if needed.\"\"\"\n outfile = sha(contents + latex_command)\n src = os.path.join(IMAGEDIR, outfile + '.png')\n if not os.path.isfile(src):\n try:\n os.mkdir(IMAGEDIR)\n stderr.write('Created directory ' + IMAGEDIR + '\\... | #!/usr/bin/env python3
"""
Pandoc filter to convert code blocks with class "gabc" to LaTeX
\\gabcsnippet commands in LaTeX output, and to images in HTML output.
Assumes Ghostscript, LuaLaTeX, [Gregorio](http://gregorio-project.github.io/)
and a reasonable selection of LaTeX packages are installed.
"""
import os
from sys import getfilesystemencoding, stderr
from subprocess import Popen, call, PIPE, DEVNULL
from hashlib import sha1
from panflute import toJSONFilter, RawBlock, RawInline, Para, Image, Code, CodeBlock
IMAGEDIR = "tmp_gabc"
LATEX_DOC = """\\documentclass{article}
\\usepackage{libertine}
\\usepackage[autocompile]{gregoriotex}
\\pagestyle{empty}
\\begin{document}
%s
\\end{document}
"""
def sha(code):
"""Returns sha1 hash of the code"""
return sha1(code.encode(getfilesystemencoding())).hexdigest()
def latex(code):
"""LaTeX inline"""
return RawInline(code, format='latex')
def latexblock(code):
"""LaTeX block"""
return RawBlock(code, format='latex')
def htmlblock(code):
"""Html block"""
return RawBlock(code, format='html')
def latexsnippet(code, kvs):
"""Take in account key/values"""
snippet = ''
staffsize = int(kvs['staffsize']) if 'staffsize' in kvs else 17
annotationsize = .56 * staffsize
if 'mode' in kvs:
snippet = (
"\\greannotation{{\\fontsize{%s}{%s}\\selectfont{}%s}}\n" %
(annotationsize, annotationsize, kvs['mode'])
) + snippet
if 'annotation' in kvs:
snippet = (
"\\grechangedim{annotationseparation}{%s mm}{0}\n"
"\\greannotation{{\\fontsize{%s}{%s}\\selectfont{}%s}}\n" %
(staffsize / 34, annotationsize, annotationsize, kvs['annotation'])
) + snippet
snippet = (
"\\grechangestaffsize{%s}\n" % staffsize +
"\\def\\greinitialformat#1{{\\fontsize{%s}{%s}\\selectfont{}#1}}" %
(2.75 * staffsize, 2.75 * staffsize)
) + snippet
snippet = "\\setlength{\\parskip}{0pt}\n" + snippet + code
return snippet
def latex2png(snippet, outfile):
"""Compiles a LaTeX snippet to png"""
pngimage = os.path.join(IMAGEDIR, outfile + '.png')
environment = os.environ
environment['openout_any'] = 'a'
environment['shell_escape_commands'] = \
"bibtex,bibtex8,kpsewhich,makeindex,mpost,repstopdf,gregorio"
proc = Popen(
["lualatex", '-output-directory=' + IMAGEDIR],
stdin=PIPE,
stdout=DEVNULL,
env=environment
)
proc.stdin.write(
(
LATEX_DOC % (snippet)
).encode("utf-8")
)
proc.communicate()
proc.stdin.close()
call(["pdfcrop", os.path.join(IMAGEDIR, "texput.pdf")], stdout=DEVNULL)
call(
[
"gs",
"-sDEVICE=pngalpha",
"-r144",
"-sOutputFile=" + pngimage,
os.path.join(IMAGEDIR, "texput-crop.pdf"),
],
stdout=DEVNULL,
)
def png(contents, latex_command):
"""Creates a png if needed."""
outfile = sha(contents + latex_command)
src = os.path.join(IMAGEDIR, outfile + '.png')
if not os.path.isfile(src):
try:
os.mkdir(IMAGEDIR)
stderr.write('Created directory ' + IMAGEDIR + '\n')
except OSError:
pass
latex2png(latex_command + "{" + contents + "}", outfile)
stderr.write('Created image ' + src + '\n')
return src
if __name__ == "__main__":
toJSONFilter(gabc)
|
sergiocorreia/panflute | panflute/io.py | load | python | def load(input_stream=None):
if input_stream is None:
input_stream = io.open(sys.stdin.fileno()) if py2 else io.TextIOWrapper(sys.stdin.buffer, encoding='utf-8')
# Load JSON and validate it
doc = json.load(input_stream, object_pairs_hook=from_json)
# Notes:
# - We use 'object_pairs_hook' instead of 'object_hook' to preserve the
# order of the metadata.
# - The hook gets called for dicts (not lists), and the deepest dicts
# get called first (so you can ensure that when you receive a dict,
# its contents have already been fed to the hook).
# Compatibility:
# - Before Pandoc 1.8 (1.7 or earlier, AKA "Pandoc Legacy"),
# the JSON is a list:
# [{"unMeta":{META}},[BLOCKS]]
# - Afterwards, it's a dict:
# {"pandoc-api-version" : [MAJ, MIN, REV],
# "meta" : META, "blocks": BLOCKS}
# - This means that on legacy, the hook WILL NOT get called on the entire
# document and we need to create the Doc() element by hand
# Corner cases:
# - If META is missing, 'object_pairs_hook' will receive an empty list
# Output format
format = sys.argv[1] if len(sys.argv) > 1 else 'html'
# API Version
if isinstance(doc, Doc):
# Modern Pandoc
doc.format = format
pass
else:
# Legacy Pandoc
metadata, items = doc
assert type(items) == list
assert len(doc) == 2, 'json.load returned list with unexpected size:'
doc = Doc(*items, metadata=metadata, format=format)
return doc | Load JSON-encoded document and return a :class:`.Doc` element.
The JSON input will be read from :data:`sys.stdin` unless an alternative
text stream is given (a file handle).
To load from a file, you can do:
>>> import panflute as pf
>>> with open('some-document.json', encoding='utf-8') as f:
>>> doc = pf.load(f)
To load from a string, you can do:
>>> import io
>>> raw = '[{"unMeta":{}},
[{"t":"Para","c":[{"t":"Str","c":"Hello!"}]}]]'
>>> f = io.StringIO(raw)
>>> doc = pf.load(f)
:param input_stream: text stream used as input
(default is :data:`sys.stdin`)
:rtype: :class:`.Doc` | train | https://github.com/sergiocorreia/panflute/blob/65c2d570c26a190deb600cab5e2ad8a828a3302e/panflute/io.py#L26-L93 | null | # ---------------------------
# Imports
# ---------------------------
from .elements import Element, Doc, from_json, ListContainer
# These will get modified if using Pandoc legacy (<1.8)
from .elements import (Citation, Table, OrderedList, Quoted,
Math, EMPTY_ELEMENTS)
import io
import os
import sys
import json
import codecs # Used in sys.stdout writer
from collections import OrderedDict
from functools import partial
py2 = sys.version_info[0] == 2
# ---------------------------
# Functions
# ---------------------------
def dump(doc, output_stream=None):
"""
Dump a :class:`.Doc` object into a JSON-encoded text string.
The output will be sent to :data:`sys.stdout` unless an alternative
text stream is given.
To dump to :data:`sys.stdout` just do:
>>> import panflute as pf
>>> doc = pf.Doc(Para(Str('a'))) # Create sample document
>>> pf.dump(doc)
To dump to file:
>>> with open('some-document.json', 'w'. encoding='utf-8') as f:
>>> pf.dump(doc, f)
To dump to a string:
>>> import io
>>> with io.StringIO() as f:
>>> pf.dump(doc, f)
>>> contents = f.getvalue()
:param doc: document, usually created with :func:`.load`
:type doc: :class:`.Doc`
:param output_stream: text stream used as output
(default is :data:`sys.stdout`)
"""
assert type(doc) == Doc, "panflute.dump needs input of type panflute.Doc"
if output_stream is None:
sys.stdout = codecs.getwriter("utf-8")(sys.stdout) if py2 else codecs.getwriter("utf-8")(sys.stdout.detach())
output_stream = sys.stdout
# Switch to legacy JSON output; eg: {'t': 'Space', 'c': []}
if doc.api_version is None:
# Switch .to_json() to legacy
Citation.backup = Citation.to_json
Citation.to_json = Citation.to_json_legacy
# Switch ._slots_to_json() to legacy
for E in [Table, OrderedList, Quoted, Math]:
E.backup = E._slots_to_json
E._slots_to_json = E._slots_to_json_legacy
# Switch .to_json() to method of base class
for E in EMPTY_ELEMENTS:
E.backup = E.to_json
E.to_json = Element.to_json
json_serializer = lambda elem: elem.to_json()
output_stream.write(json.dumps(
obj=doc,
default=json_serializer, # Serializer
check_circular=False,
separators=(',', ':'), # Compact separators, like Pandoc
ensure_ascii=False # For Pandoc compat
))
# Undo legacy changes
if doc.api_version is None:
Citation.to_json = Citation.backup
for E in [Table, OrderedList, Quoted, Math]:
E._slots_to_json = E.backup
for E in EMPTY_ELEMENTS:
E.to_json = E.backup
def toJSONFilters(*args, **kwargs):
"""
Wrapper for :func:`.run_filters`
"""
return run_filters(*args, **kwargs)
def toJSONFilter(*args, **kwargs):
"""
Wapper for :func:`.run_filter`, which calls :func:`.run_filters`
toJSONFilter(action, prepare=None, finalize=None, input_stream=None, output_stream=None, \*\*kwargs)
Receive a Pandoc document from stdin, apply the *action* function to each element, and write it back to stdout.
See also :func:`.toJSONFilters`
"""
return run_filter(*args, **kwargs)
def run_filters(actions,
prepare=None, finalize=None,
input_stream=None, output_stream=None,
doc=None,
**kwargs):
"""
Receive a Pandoc document from the input stream (default is stdin),
walk through it applying the functions in *actions* to each element,
and write it back to the output stream (default is stdout).
Notes:
- It receives and writes the Pandoc documents as JSON--encoded strings;
this is done through the :func:`.load` and :func:`.dump` functions.
- It walks through the document once for every function in *actions*,
so the actions are applied sequentially.
- By default, it will read from stdin and write to stdout,
but these can be modified.
- It can also apply functions to the entire document at the beginning and
end; this allows for global operations on the document.
- If ``doc`` is a :class:`.Doc` instead of ``None``, ``run_filters``
will return the document instead of writing it to the output stream.
:param actions: sequence of functions; each function takes (element, doc)
as argument, so a valid header would be ``def action(elem, doc):``
:type actions: [:class:`function`]
:param prepare: function executed at the beginning;
right after the document is received and parsed
:type prepare: :class:`function`
:param finalize: function executed at the end;
right before the document is converted back to JSON and written to stdout.
:type finalize: :class:`function`
:param input_stream: text stream used as input
(default is :data:`sys.stdin`)
:param output_stream: text stream used as output
(default is :data:`sys.stdout`)
:param doc: ``None`` unless running panflute as a filter, in which case this will be a :class:`.Doc` element
:type doc: ``None`` | :class:`.Doc`
:param \*kwargs: keyword arguments will be passed through to the *action*
functions (so they can actually receive more than just two arguments
(*element* and *doc*)
"""
load_and_dump = (doc is None)
if load_and_dump:
doc = load(input_stream=input_stream)
if prepare is not None:
prepare(doc)
for action in actions:
if kwargs:
action = partial(action, **kwargs)
doc = doc.walk(action, doc)
if finalize is not None:
finalize(doc)
if load_and_dump:
dump(doc, output_stream=output_stream)
else:
return(doc)
def run_filter(action, *args, **kwargs):
"""
Wapper for :func:`.run_filters`
Receive a Pandoc document from stdin, apply the *action* function to each element, and write it back to stdout.
See :func:`.run_filters`
"""
return run_filters([action], *args, **kwargs)
def load_reader_options():
"""
Retrieve Pandoc Reader options from the environment
"""
options = os.environ['PANDOC_READER_OPTIONS']
options = json.loads(options, object_pairs_hook=OrderedDict)
return options
|
sergiocorreia/panflute | panflute/io.py | dump | python | def dump(doc, output_stream=None):
assert type(doc) == Doc, "panflute.dump needs input of type panflute.Doc"
if output_stream is None:
sys.stdout = codecs.getwriter("utf-8")(sys.stdout) if py2 else codecs.getwriter("utf-8")(sys.stdout.detach())
output_stream = sys.stdout
# Switch to legacy JSON output; eg: {'t': 'Space', 'c': []}
if doc.api_version is None:
# Switch .to_json() to legacy
Citation.backup = Citation.to_json
Citation.to_json = Citation.to_json_legacy
# Switch ._slots_to_json() to legacy
for E in [Table, OrderedList, Quoted, Math]:
E.backup = E._slots_to_json
E._slots_to_json = E._slots_to_json_legacy
# Switch .to_json() to method of base class
for E in EMPTY_ELEMENTS:
E.backup = E.to_json
E.to_json = Element.to_json
json_serializer = lambda elem: elem.to_json()
output_stream.write(json.dumps(
obj=doc,
default=json_serializer, # Serializer
check_circular=False,
separators=(',', ':'), # Compact separators, like Pandoc
ensure_ascii=False # For Pandoc compat
))
# Undo legacy changes
if doc.api_version is None:
Citation.to_json = Citation.backup
for E in [Table, OrderedList, Quoted, Math]:
E._slots_to_json = E.backup
for E in EMPTY_ELEMENTS:
E.to_json = E.backup | Dump a :class:`.Doc` object into a JSON-encoded text string.
The output will be sent to :data:`sys.stdout` unless an alternative
text stream is given.
To dump to :data:`sys.stdout` just do:
>>> import panflute as pf
>>> doc = pf.Doc(Para(Str('a'))) # Create sample document
>>> pf.dump(doc)
To dump to file:
>>> with open('some-document.json', 'w'. encoding='utf-8') as f:
>>> pf.dump(doc, f)
To dump to a string:
>>> import io
>>> with io.StringIO() as f:
>>> pf.dump(doc, f)
>>> contents = f.getvalue()
:param doc: document, usually created with :func:`.load`
:type doc: :class:`.Doc`
:param output_stream: text stream used as output
(default is :data:`sys.stdout`) | train | https://github.com/sergiocorreia/panflute/blob/65c2d570c26a190deb600cab5e2ad8a828a3302e/panflute/io.py#L96-L165 | null | # ---------------------------
# Imports
# ---------------------------
from .elements import Element, Doc, from_json, ListContainer
# These will get modified if using Pandoc legacy (<1.8)
from .elements import (Citation, Table, OrderedList, Quoted,
Math, EMPTY_ELEMENTS)
import io
import os
import sys
import json
import codecs # Used in sys.stdout writer
from collections import OrderedDict
from functools import partial
py2 = sys.version_info[0] == 2
# ---------------------------
# Functions
# ---------------------------
def load(input_stream=None):
"""
Load JSON-encoded document and return a :class:`.Doc` element.
The JSON input will be read from :data:`sys.stdin` unless an alternative
text stream is given (a file handle).
To load from a file, you can do:
>>> import panflute as pf
>>> with open('some-document.json', encoding='utf-8') as f:
>>> doc = pf.load(f)
To load from a string, you can do:
>>> import io
>>> raw = '[{"unMeta":{}},
[{"t":"Para","c":[{"t":"Str","c":"Hello!"}]}]]'
>>> f = io.StringIO(raw)
>>> doc = pf.load(f)
:param input_stream: text stream used as input
(default is :data:`sys.stdin`)
:rtype: :class:`.Doc`
"""
if input_stream is None:
input_stream = io.open(sys.stdin.fileno()) if py2 else io.TextIOWrapper(sys.stdin.buffer, encoding='utf-8')
# Load JSON and validate it
doc = json.load(input_stream, object_pairs_hook=from_json)
# Notes:
# - We use 'object_pairs_hook' instead of 'object_hook' to preserve the
# order of the metadata.
# - The hook gets called for dicts (not lists), and the deepest dicts
# get called first (so you can ensure that when you receive a dict,
# its contents have already been fed to the hook).
# Compatibility:
# - Before Pandoc 1.8 (1.7 or earlier, AKA "Pandoc Legacy"),
# the JSON is a list:
# [{"unMeta":{META}},[BLOCKS]]
# - Afterwards, it's a dict:
# {"pandoc-api-version" : [MAJ, MIN, REV],
# "meta" : META, "blocks": BLOCKS}
# - This means that on legacy, the hook WILL NOT get called on the entire
# document and we need to create the Doc() element by hand
# Corner cases:
# - If META is missing, 'object_pairs_hook' will receive an empty list
# Output format
format = sys.argv[1] if len(sys.argv) > 1 else 'html'
# API Version
if isinstance(doc, Doc):
# Modern Pandoc
doc.format = format
pass
else:
# Legacy Pandoc
metadata, items = doc
assert type(items) == list
assert len(doc) == 2, 'json.load returned list with unexpected size:'
doc = Doc(*items, metadata=metadata, format=format)
return doc
def toJSONFilters(*args, **kwargs):
"""
Wrapper for :func:`.run_filters`
"""
return run_filters(*args, **kwargs)
def toJSONFilter(*args, **kwargs):
"""
Wapper for :func:`.run_filter`, which calls :func:`.run_filters`
toJSONFilter(action, prepare=None, finalize=None, input_stream=None, output_stream=None, \*\*kwargs)
Receive a Pandoc document from stdin, apply the *action* function to each element, and write it back to stdout.
See also :func:`.toJSONFilters`
"""
return run_filter(*args, **kwargs)
def run_filters(actions,
prepare=None, finalize=None,
input_stream=None, output_stream=None,
doc=None,
**kwargs):
"""
Receive a Pandoc document from the input stream (default is stdin),
walk through it applying the functions in *actions* to each element,
and write it back to the output stream (default is stdout).
Notes:
- It receives and writes the Pandoc documents as JSON--encoded strings;
this is done through the :func:`.load` and :func:`.dump` functions.
- It walks through the document once for every function in *actions*,
so the actions are applied sequentially.
- By default, it will read from stdin and write to stdout,
but these can be modified.
- It can also apply functions to the entire document at the beginning and
end; this allows for global operations on the document.
- If ``doc`` is a :class:`.Doc` instead of ``None``, ``run_filters``
will return the document instead of writing it to the output stream.
:param actions: sequence of functions; each function takes (element, doc)
as argument, so a valid header would be ``def action(elem, doc):``
:type actions: [:class:`function`]
:param prepare: function executed at the beginning;
right after the document is received and parsed
:type prepare: :class:`function`
:param finalize: function executed at the end;
right before the document is converted back to JSON and written to stdout.
:type finalize: :class:`function`
:param input_stream: text stream used as input
(default is :data:`sys.stdin`)
:param output_stream: text stream used as output
(default is :data:`sys.stdout`)
:param doc: ``None`` unless running panflute as a filter, in which case this will be a :class:`.Doc` element
:type doc: ``None`` | :class:`.Doc`
:param \*kwargs: keyword arguments will be passed through to the *action*
functions (so they can actually receive more than just two arguments
(*element* and *doc*)
"""
load_and_dump = (doc is None)
if load_and_dump:
doc = load(input_stream=input_stream)
if prepare is not None:
prepare(doc)
for action in actions:
if kwargs:
action = partial(action, **kwargs)
doc = doc.walk(action, doc)
if finalize is not None:
finalize(doc)
if load_and_dump:
dump(doc, output_stream=output_stream)
else:
return(doc)
def run_filter(action, *args, **kwargs):
"""
Wapper for :func:`.run_filters`
Receive a Pandoc document from stdin, apply the *action* function to each element, and write it back to stdout.
See :func:`.run_filters`
"""
return run_filters([action], *args, **kwargs)
def load_reader_options():
"""
Retrieve Pandoc Reader options from the environment
"""
options = os.environ['PANDOC_READER_OPTIONS']
options = json.loads(options, object_pairs_hook=OrderedDict)
return options
|
sergiocorreia/panflute | panflute/io.py | run_filters | python | def run_filters(actions,
prepare=None, finalize=None,
input_stream=None, output_stream=None,
doc=None,
**kwargs):
load_and_dump = (doc is None)
if load_and_dump:
doc = load(input_stream=input_stream)
if prepare is not None:
prepare(doc)
for action in actions:
if kwargs:
action = partial(action, **kwargs)
doc = doc.walk(action, doc)
if finalize is not None:
finalize(doc)
if load_and_dump:
dump(doc, output_stream=output_stream)
else:
return(doc) | Receive a Pandoc document from the input stream (default is stdin),
walk through it applying the functions in *actions* to each element,
and write it back to the output stream (default is stdout).
Notes:
- It receives and writes the Pandoc documents as JSON--encoded strings;
this is done through the :func:`.load` and :func:`.dump` functions.
- It walks through the document once for every function in *actions*,
so the actions are applied sequentially.
- By default, it will read from stdin and write to stdout,
but these can be modified.
- It can also apply functions to the entire document at the beginning and
end; this allows for global operations on the document.
- If ``doc`` is a :class:`.Doc` instead of ``None``, ``run_filters``
will return the document instead of writing it to the output stream.
:param actions: sequence of functions; each function takes (element, doc)
as argument, so a valid header would be ``def action(elem, doc):``
:type actions: [:class:`function`]
:param prepare: function executed at the beginning;
right after the document is received and parsed
:type prepare: :class:`function`
:param finalize: function executed at the end;
right before the document is converted back to JSON and written to stdout.
:type finalize: :class:`function`
:param input_stream: text stream used as input
(default is :data:`sys.stdin`)
:param output_stream: text stream used as output
(default is :data:`sys.stdout`)
:param doc: ``None`` unless running panflute as a filter, in which case this will be a :class:`.Doc` element
:type doc: ``None`` | :class:`.Doc`
:param \*kwargs: keyword arguments will be passed through to the *action*
functions (so they can actually receive more than just two arguments
(*element* and *doc*) | train | https://github.com/sergiocorreia/panflute/blob/65c2d570c26a190deb600cab5e2ad8a828a3302e/panflute/io.py#L187-L249 | [
"def dump(doc, output_stream=None):\n \"\"\"\n Dump a :class:`.Doc` object into a JSON-encoded text string.\n\n The output will be sent to :data:`sys.stdout` unless an alternative\n text stream is given.\n\n To dump to :data:`sys.stdout` just do:\n\n >>> import panflute as pf\n >>> doc ... | # ---------------------------
# Imports
# ---------------------------
from .elements import Element, Doc, from_json, ListContainer
# These will get modified if using Pandoc legacy (<1.8)
from .elements import (Citation, Table, OrderedList, Quoted,
Math, EMPTY_ELEMENTS)
import io
import os
import sys
import json
import codecs # Used in sys.stdout writer
from collections import OrderedDict
from functools import partial
py2 = sys.version_info[0] == 2
# ---------------------------
# Functions
# ---------------------------
def load(input_stream=None):
"""
Load JSON-encoded document and return a :class:`.Doc` element.
The JSON input will be read from :data:`sys.stdin` unless an alternative
text stream is given (a file handle).
To load from a file, you can do:
>>> import panflute as pf
>>> with open('some-document.json', encoding='utf-8') as f:
>>> doc = pf.load(f)
To load from a string, you can do:
>>> import io
>>> raw = '[{"unMeta":{}},
[{"t":"Para","c":[{"t":"Str","c":"Hello!"}]}]]'
>>> f = io.StringIO(raw)
>>> doc = pf.load(f)
:param input_stream: text stream used as input
(default is :data:`sys.stdin`)
:rtype: :class:`.Doc`
"""
if input_stream is None:
input_stream = io.open(sys.stdin.fileno()) if py2 else io.TextIOWrapper(sys.stdin.buffer, encoding='utf-8')
# Load JSON and validate it
doc = json.load(input_stream, object_pairs_hook=from_json)
# Notes:
# - We use 'object_pairs_hook' instead of 'object_hook' to preserve the
# order of the metadata.
# - The hook gets called for dicts (not lists), and the deepest dicts
# get called first (so you can ensure that when you receive a dict,
# its contents have already been fed to the hook).
# Compatibility:
# - Before Pandoc 1.8 (1.7 or earlier, AKA "Pandoc Legacy"),
# the JSON is a list:
# [{"unMeta":{META}},[BLOCKS]]
# - Afterwards, it's a dict:
# {"pandoc-api-version" : [MAJ, MIN, REV],
# "meta" : META, "blocks": BLOCKS}
# - This means that on legacy, the hook WILL NOT get called on the entire
# document and we need to create the Doc() element by hand
# Corner cases:
# - If META is missing, 'object_pairs_hook' will receive an empty list
# Output format
format = sys.argv[1] if len(sys.argv) > 1 else 'html'
# API Version
if isinstance(doc, Doc):
# Modern Pandoc
doc.format = format
pass
else:
# Legacy Pandoc
metadata, items = doc
assert type(items) == list
assert len(doc) == 2, 'json.load returned list with unexpected size:'
doc = Doc(*items, metadata=metadata, format=format)
return doc
def dump(doc, output_stream=None):
"""
Dump a :class:`.Doc` object into a JSON-encoded text string.
The output will be sent to :data:`sys.stdout` unless an alternative
text stream is given.
To dump to :data:`sys.stdout` just do:
>>> import panflute as pf
>>> doc = pf.Doc(Para(Str('a'))) # Create sample document
>>> pf.dump(doc)
To dump to file:
>>> with open('some-document.json', 'w'. encoding='utf-8') as f:
>>> pf.dump(doc, f)
To dump to a string:
>>> import io
>>> with io.StringIO() as f:
>>> pf.dump(doc, f)
>>> contents = f.getvalue()
:param doc: document, usually created with :func:`.load`
:type doc: :class:`.Doc`
:param output_stream: text stream used as output
(default is :data:`sys.stdout`)
"""
assert type(doc) == Doc, "panflute.dump needs input of type panflute.Doc"
if output_stream is None:
sys.stdout = codecs.getwriter("utf-8")(sys.stdout) if py2 else codecs.getwriter("utf-8")(sys.stdout.detach())
output_stream = sys.stdout
# Switch to legacy JSON output; eg: {'t': 'Space', 'c': []}
if doc.api_version is None:
# Switch .to_json() to legacy
Citation.backup = Citation.to_json
Citation.to_json = Citation.to_json_legacy
# Switch ._slots_to_json() to legacy
for E in [Table, OrderedList, Quoted, Math]:
E.backup = E._slots_to_json
E._slots_to_json = E._slots_to_json_legacy
# Switch .to_json() to method of base class
for E in EMPTY_ELEMENTS:
E.backup = E.to_json
E.to_json = Element.to_json
json_serializer = lambda elem: elem.to_json()
output_stream.write(json.dumps(
obj=doc,
default=json_serializer, # Serializer
check_circular=False,
separators=(',', ':'), # Compact separators, like Pandoc
ensure_ascii=False # For Pandoc compat
))
# Undo legacy changes
if doc.api_version is None:
Citation.to_json = Citation.backup
for E in [Table, OrderedList, Quoted, Math]:
E._slots_to_json = E.backup
for E in EMPTY_ELEMENTS:
E.to_json = E.backup
def toJSONFilters(*args, **kwargs):
"""
Wrapper for :func:`.run_filters`
"""
return run_filters(*args, **kwargs)
def toJSONFilter(*args, **kwargs):
"""
Wapper for :func:`.run_filter`, which calls :func:`.run_filters`
toJSONFilter(action, prepare=None, finalize=None, input_stream=None, output_stream=None, \*\*kwargs)
Receive a Pandoc document from stdin, apply the *action* function to each element, and write it back to stdout.
See also :func:`.toJSONFilters`
"""
return run_filter(*args, **kwargs)
def run_filter(action, *args, **kwargs):
"""
Wapper for :func:`.run_filters`
Receive a Pandoc document from stdin, apply the *action* function to each element, and write it back to stdout.
See :func:`.run_filters`
"""
return run_filters([action], *args, **kwargs)
def load_reader_options():
"""
Retrieve Pandoc Reader options from the environment
"""
options = os.environ['PANDOC_READER_OPTIONS']
options = json.loads(options, object_pairs_hook=OrderedDict)
return options
|
sergiocorreia/panflute | panflute/io.py | load_reader_options | python | def load_reader_options():
options = os.environ['PANDOC_READER_OPTIONS']
options = json.loads(options, object_pairs_hook=OrderedDict)
return options | Retrieve Pandoc Reader options from the environment | train | https://github.com/sergiocorreia/panflute/blob/65c2d570c26a190deb600cab5e2ad8a828a3302e/panflute/io.py#L263-L269 | null | # ---------------------------
# Imports
# ---------------------------
from .elements import Element, Doc, from_json, ListContainer
# These will get modified if using Pandoc legacy (<1.8)
from .elements import (Citation, Table, OrderedList, Quoted,
Math, EMPTY_ELEMENTS)
import io
import os
import sys
import json
import codecs # Used in sys.stdout writer
from collections import OrderedDict
from functools import partial
py2 = sys.version_info[0] == 2
# ---------------------------
# Functions
# ---------------------------
def load(input_stream=None):
"""
Load JSON-encoded document and return a :class:`.Doc` element.
The JSON input will be read from :data:`sys.stdin` unless an alternative
text stream is given (a file handle).
To load from a file, you can do:
>>> import panflute as pf
>>> with open('some-document.json', encoding='utf-8') as f:
>>> doc = pf.load(f)
To load from a string, you can do:
>>> import io
>>> raw = '[{"unMeta":{}},
[{"t":"Para","c":[{"t":"Str","c":"Hello!"}]}]]'
>>> f = io.StringIO(raw)
>>> doc = pf.load(f)
:param input_stream: text stream used as input
(default is :data:`sys.stdin`)
:rtype: :class:`.Doc`
"""
if input_stream is None:
input_stream = io.open(sys.stdin.fileno()) if py2 else io.TextIOWrapper(sys.stdin.buffer, encoding='utf-8')
# Load JSON and validate it
doc = json.load(input_stream, object_pairs_hook=from_json)
# Notes:
# - We use 'object_pairs_hook' instead of 'object_hook' to preserve the
# order of the metadata.
# - The hook gets called for dicts (not lists), and the deepest dicts
# get called first (so you can ensure that when you receive a dict,
# its contents have already been fed to the hook).
# Compatibility:
# - Before Pandoc 1.8 (1.7 or earlier, AKA "Pandoc Legacy"),
# the JSON is a list:
# [{"unMeta":{META}},[BLOCKS]]
# - Afterwards, it's a dict:
# {"pandoc-api-version" : [MAJ, MIN, REV],
# "meta" : META, "blocks": BLOCKS}
# - This means that on legacy, the hook WILL NOT get called on the entire
# document and we need to create the Doc() element by hand
# Corner cases:
# - If META is missing, 'object_pairs_hook' will receive an empty list
# Output format
format = sys.argv[1] if len(sys.argv) > 1 else 'html'
# API Version
if isinstance(doc, Doc):
# Modern Pandoc
doc.format = format
pass
else:
# Legacy Pandoc
metadata, items = doc
assert type(items) == list
assert len(doc) == 2, 'json.load returned list with unexpected size:'
doc = Doc(*items, metadata=metadata, format=format)
return doc
def dump(doc, output_stream=None):
"""
Dump a :class:`.Doc` object into a JSON-encoded text string.
The output will be sent to :data:`sys.stdout` unless an alternative
text stream is given.
To dump to :data:`sys.stdout` just do:
>>> import panflute as pf
>>> doc = pf.Doc(Para(Str('a'))) # Create sample document
>>> pf.dump(doc)
To dump to file:
>>> with open('some-document.json', 'w'. encoding='utf-8') as f:
>>> pf.dump(doc, f)
To dump to a string:
>>> import io
>>> with io.StringIO() as f:
>>> pf.dump(doc, f)
>>> contents = f.getvalue()
:param doc: document, usually created with :func:`.load`
:type doc: :class:`.Doc`
:param output_stream: text stream used as output
(default is :data:`sys.stdout`)
"""
assert type(doc) == Doc, "panflute.dump needs input of type panflute.Doc"
if output_stream is None:
sys.stdout = codecs.getwriter("utf-8")(sys.stdout) if py2 else codecs.getwriter("utf-8")(sys.stdout.detach())
output_stream = sys.stdout
# Switch to legacy JSON output; eg: {'t': 'Space', 'c': []}
if doc.api_version is None:
# Switch .to_json() to legacy
Citation.backup = Citation.to_json
Citation.to_json = Citation.to_json_legacy
# Switch ._slots_to_json() to legacy
for E in [Table, OrderedList, Quoted, Math]:
E.backup = E._slots_to_json
E._slots_to_json = E._slots_to_json_legacy
# Switch .to_json() to method of base class
for E in EMPTY_ELEMENTS:
E.backup = E.to_json
E.to_json = Element.to_json
json_serializer = lambda elem: elem.to_json()
output_stream.write(json.dumps(
obj=doc,
default=json_serializer, # Serializer
check_circular=False,
separators=(',', ':'), # Compact separators, like Pandoc
ensure_ascii=False # For Pandoc compat
))
# Undo legacy changes
if doc.api_version is None:
Citation.to_json = Citation.backup
for E in [Table, OrderedList, Quoted, Math]:
E._slots_to_json = E.backup
for E in EMPTY_ELEMENTS:
E.to_json = E.backup
def toJSONFilters(*args, **kwargs):
"""
Wrapper for :func:`.run_filters`
"""
return run_filters(*args, **kwargs)
def toJSONFilter(*args, **kwargs):
"""
Wapper for :func:`.run_filter`, which calls :func:`.run_filters`
toJSONFilter(action, prepare=None, finalize=None, input_stream=None, output_stream=None, \*\*kwargs)
Receive a Pandoc document from stdin, apply the *action* function to each element, and write it back to stdout.
See also :func:`.toJSONFilters`
"""
return run_filter(*args, **kwargs)
def run_filters(actions,
prepare=None, finalize=None,
input_stream=None, output_stream=None,
doc=None,
**kwargs):
"""
Receive a Pandoc document from the input stream (default is stdin),
walk through it applying the functions in *actions* to each element,
and write it back to the output stream (default is stdout).
Notes:
- It receives and writes the Pandoc documents as JSON--encoded strings;
this is done through the :func:`.load` and :func:`.dump` functions.
- It walks through the document once for every function in *actions*,
so the actions are applied sequentially.
- By default, it will read from stdin and write to stdout,
but these can be modified.
- It can also apply functions to the entire document at the beginning and
end; this allows for global operations on the document.
- If ``doc`` is a :class:`.Doc` instead of ``None``, ``run_filters``
will return the document instead of writing it to the output stream.
:param actions: sequence of functions; each function takes (element, doc)
as argument, so a valid header would be ``def action(elem, doc):``
:type actions: [:class:`function`]
:param prepare: function executed at the beginning;
right after the document is received and parsed
:type prepare: :class:`function`
:param finalize: function executed at the end;
right before the document is converted back to JSON and written to stdout.
:type finalize: :class:`function`
:param input_stream: text stream used as input
(default is :data:`sys.stdin`)
:param output_stream: text stream used as output
(default is :data:`sys.stdout`)
:param doc: ``None`` unless running panflute as a filter, in which case this will be a :class:`.Doc` element
:type doc: ``None`` | :class:`.Doc`
:param \*kwargs: keyword arguments will be passed through to the *action*
functions (so they can actually receive more than just two arguments
(*element* and *doc*)
"""
load_and_dump = (doc is None)
if load_and_dump:
doc = load(input_stream=input_stream)
if prepare is not None:
prepare(doc)
for action in actions:
if kwargs:
action = partial(action, **kwargs)
doc = doc.walk(action, doc)
if finalize is not None:
finalize(doc)
if load_and_dump:
dump(doc, output_stream=output_stream)
else:
return(doc)
def run_filter(action, *args, **kwargs):
"""
Wapper for :func:`.run_filters`
Receive a Pandoc document from stdin, apply the *action* function to each element, and write it back to stdout.
See :func:`.run_filters`
"""
return run_filters([action], *args, **kwargs)
|
sergiocorreia/panflute | examples/pandocfilters/gabc.py | gabc | python | def gabc(key, value, fmt, meta): # pylint:disable=I0011,W0613
if key == 'Code':
[[ident, classes, kvs], contents] = value # pylint:disable=I0011,W0612
kvs = {key: value for key, value in kvs}
if "gabc" in classes:
if fmt == "latex":
if ident == "":
label = ""
else:
label = '\\label{' + ident + '}'
return latex(
"\n\\smallskip\n{%\n" +
latexsnippet('\\gregorioscore{' + contents + '}', kvs) +
"%\n}" +
label
)
else:
infile = contents + (
'.gabc' if '.gabc' not in contents else ''
)
with open(infile, 'r') as doc:
code = doc.read().split('%%\n')[1]
return [Image(['', [], []], [], [
png(
contents,
latexsnippet('\\gregorioscore', kvs)
),
""
])]
elif key == 'CodeBlock':
[[ident, classes, kvs], contents] = value
kvs = {key: value for key, value in kvs}
if "gabc" in classes:
if fmt == "latex":
if ident == "":
label = ""
else:
label = '\\label{' + ident + '}'
return [latexblock(
"\n\\smallskip\n{%\n" +
latexsnippet('\\gabcsnippet{' + contents + '}', kvs) +
"%\n}" +
label
)]
else:
return Para([Image(['', [], []], [], [
png(
contents,
latexsnippet('\\gabcsnippet', kvs)
),
""
])]) | Handle gabc file inclusion and gabc code block. | train | https://github.com/sergiocorreia/panflute/blob/65c2d570c26a190deb600cab5e2ad8a828a3302e/examples/pandocfilters/gabc.py#L120-L172 | [
"def png(contents, latex_command):\n \"\"\"Creates a png if needed.\"\"\"\n outfile = sha(contents + latex_command)\n src = os.path.join(IMAGEDIR, outfile + '.png')\n if not os.path.isfile(src):\n try:\n os.mkdir(IMAGEDIR)\n stderr.write('Created directory ' + IMAGEDIR + '\\... | #!/usr/bin/env python3
"""
Pandoc filter to convert code blocks with class "gabc" to LaTeX
\\gabcsnippet commands in LaTeX output, and to images in HTML output.
Assumes Ghostscript, LuaLaTeX, [Gregorio](http://gregorio-project.github.io/)
and a reasonable selection of LaTeX packages are installed.
"""
import os
from sys import getfilesystemencoding, stderr
from subprocess import Popen, call, PIPE, DEVNULL
from hashlib import sha1
from pandocfilters import toJSONFilter, RawBlock, RawInline, Para, Image
IMAGEDIR = "tmp_gabc"
LATEX_DOC = """\\documentclass{article}
\\usepackage{libertine}
\\usepackage[autocompile]{gregoriotex}
\\pagestyle{empty}
\\begin{document}
%s
\\end{document}
"""
def sha(code):
"""Returns sha1 hash of the code"""
return sha1(code.encode(getfilesystemencoding())).hexdigest()
def latex(code):
"""LaTeX inline"""
return RawInline('latex', code)
def latexblock(code):
"""LaTeX block"""
return RawBlock('latex', code)
def htmlblock(code):
"""Html block"""
return RawBlock('html', code)
def latexsnippet(code, kvs):
"""Take in account key/values"""
snippet = ''
staffsize = int(kvs['staffsize']) if 'staffsize' in kvs else 17
annotationsize = .56 * staffsize
if 'mode' in kvs:
snippet = (
"\\greannotation{{\\fontsize{%s}{%s}\\selectfont{}%s}}\n" %
(annotationsize, annotationsize, kvs['mode'])
) + snippet
if 'annotation' in kvs:
snippet = (
"\\grechangedim{annotationseparation}{%s mm}{0}\n"
"\\greannotation{{\\fontsize{%s}{%s}\\selectfont{}%s}}\n" %
(staffsize / 34, annotationsize, annotationsize, kvs['annotation'])
) + snippet
snippet = (
"\\grechangestaffsize{%s}\n" % staffsize +
"\\def\\greinitialformat#1{{\\fontsize{%s}{%s}\\selectfont{}#1}}" %
(2.75 * staffsize, 2.75 * staffsize)
) + snippet
snippet = "\\setlength{\\parskip}{0pt}\n" + snippet + code
return snippet
def latex2png(snippet, outfile):
"""Compiles a LaTeX snippet to png"""
pngimage = os.path.join(IMAGEDIR, outfile + '.png')
environment = os.environ
environment['openout_any'] = 'a'
environment['shell_escape_commands'] = \
"bibtex,bibtex8,kpsewhich,makeindex,mpost,repstopdf,gregorio"
proc = Popen(
["lualatex", '-output-directory=' + IMAGEDIR],
stdin=PIPE,
stdout=DEVNULL,
env=environment
)
proc.stdin.write(
(
LATEX_DOC % (snippet)
).encode("utf-8")
)
proc.communicate()
proc.stdin.close()
call(["pdfcrop", os.path.join(IMAGEDIR, "texput.pdf")], stdout=DEVNULL)
call(
[
"gs",
"-sDEVICE=pngalpha",
"-r144",
"-sOutputFile=" + pngimage,
os.path.join(IMAGEDIR, "texput-crop.pdf"),
],
stdout=DEVNULL,
)
def png(contents, latex_command):
"""Creates a png if needed."""
outfile = sha(contents + latex_command)
src = os.path.join(IMAGEDIR, outfile + '.png')
if not os.path.isfile(src):
try:
os.mkdir(IMAGEDIR)
stderr.write('Created directory ' + IMAGEDIR + '\n')
except OSError:
pass
latex2png(latex_command + "{" + contents + "}", outfile)
stderr.write('Created image ' + src + '\n')
return src
if __name__ == "__main__":
toJSONFilter(gabc)
|
sergiocorreia/panflute | panflute/tools.py | yaml_filter | python | def yaml_filter(element, doc, tag=None, function=None, tags=None,
strict_yaml=False):
'''
Convenience function for parsing code blocks with YAML options
This function is useful to create a filter that applies to
code blocks that have specific classes.
It is used as an argument of ``run_filter``, with two additional options:
``tag`` and ``function``.
Using this is equivalent to having filter functions that:
1. Check if the element is a code block
2. Check if the element belongs to a specific class
3. Split the YAML options (at the beginning of the block, by looking
for ``...`` or ``---`` strings in a separate line
4. Parse the YAML
5. Use the YAML options and (optionally) the data that follows the YAML
to return a new or modified element
Instead, you just need to:
1. Call ``run_filter`` with ``yaml_filter`` as the action function, and
with the additional arguments ``tag`` and ``function``
2. Construct a ``fenced_action`` function that takes four arguments:
(options, data, element, doc). Note that options is a dict and data
is a raw string. Notice that this is similar to the ``action``
functions of standard filters, but with *options* and *data* as the
new ones.
Note: if you want to apply multiple functions to separate classes,
you can use the ``tags`` argument, which receives a dict of
``tag: function`` pairs.
Note: use the ``strict_yaml=True`` option in order to allow for more verbose
but flexible YAML metadata: more than one YAML blocks are allowed, but
they all must start with ``---`` (even at the beginning) and end with
``---`` or ``...``. Also, YAML is not the default content
when no delimiters are set.
Example::
"""
Replace code blocks of class 'foo' with # horizontal rules
"""
import panflute as pf
def fenced_action(options, data, element, doc):
count = options.get('count', 1)
div = pf.Div(attributes={'count': str(count)})
div.content.extend([pf.HorizontalRule] * count)
return div
if __name__ == '__main__':
pf.run_filter(pf.yaml_filter, tag='foo', function=fenced_action)
'''
# Allow for either tag+function or a dict {tag: function}
assert (tag is None) + (tags is None) == 1 # XOR
if tags is None:
tags = {tag: function}
if type(element) == CodeBlock:
for tag in tags:
if tag in element.classes:
function = tags[tag]
if not strict_yaml:
# Split YAML and data parts (separated by ... or ---)
raw = re.split("^([.]{3,}|[-]{3,})$",
element.text, 1, re.MULTILINE)
data = raw[2] if len(raw) > 2 else ''
data = data.lstrip('\n')
raw = raw[0]
try:
options = yaml.safe_load(raw)
except yaml.scanner.ScannerError:
debug("panflute: malformed YAML block")
return
if options is None:
options = {}
else:
options = {}
data = []
raw = re.split("^([.]{3,}|[-]{3,})$",
element.text, 0, re.MULTILINE)
rawmode = True
for chunk in raw:
chunk = chunk.strip('\n')
if not chunk:
continue
if rawmode:
if chunk.startswith('---'):
rawmode = False
else:
data.append(chunk)
else:
if chunk.startswith('---') or chunk.startswith('...'):
rawmode = True
else:
try:
options.update(yaml.safe_load(chunk))
except yaml.scanner.ScannerError:
debug("panflute: malformed YAML block")
return
data = '\n'.join(data)
return function(options=options, data=data,
element=element, doc=doc) | Convenience function for parsing code blocks with YAML options
This function is useful to create a filter that applies to
code blocks that have specific classes.
It is used as an argument of ``run_filter``, with two additional options:
``tag`` and ``function``.
Using this is equivalent to having filter functions that:
1. Check if the element is a code block
2. Check if the element belongs to a specific class
3. Split the YAML options (at the beginning of the block, by looking
for ``...`` or ``---`` strings in a separate line
4. Parse the YAML
5. Use the YAML options and (optionally) the data that follows the YAML
to return a new or modified element
Instead, you just need to:
1. Call ``run_filter`` with ``yaml_filter`` as the action function, and
with the additional arguments ``tag`` and ``function``
2. Construct a ``fenced_action`` function that takes four arguments:
(options, data, element, doc). Note that options is a dict and data
is a raw string. Notice that this is similar to the ``action``
functions of standard filters, but with *options* and *data* as the
new ones.
Note: if you want to apply multiple functions to separate classes,
you can use the ``tags`` argument, which receives a dict of
``tag: function`` pairs.
Note: use the ``strict_yaml=True`` option in order to allow for more verbose
but flexible YAML metadata: more than one YAML blocks are allowed, but
they all must start with ``---`` (even at the beginning) and end with
``---`` or ``...``. Also, YAML is not the default content
when no delimiters are set.
Example::
"""
Replace code blocks of class 'foo' with # horizontal rules
"""
import panflute as pf
def fenced_action(options, data, element, doc):
count = options.get('count', 1)
div = pf.Div(attributes={'count': str(count)})
div.content.extend([pf.HorizontalRule] * count)
return div
if __name__ == '__main__':
pf.run_filter(pf.yaml_filter, tag='foo', function=fenced_action) | train | https://github.com/sergiocorreia/panflute/blob/65c2d570c26a190deb600cab5e2ad8a828a3302e/panflute/tools.py#L44-L158 | null | # ---------------------------
# Imports
# ---------------------------
from .base import Element
from .elements import *
from .io import dump
import io
import os
import re
import sys
import json
import yaml
import shlex
# shutil.which: new in version 3.3
try:
from shutil import which
except ImportError:
from shutilwhich import which
from subprocess import Popen, PIPE
from functools import partial
py2 = sys.version_info[0] == 2
if py2: str = basestring
# ---------------------------
# Constants
# ---------------------------
HorizontalSpaces = (Space, LineBreak, SoftBreak)
VerticalSpaces = (Para, )
# ---------------------------
# Convenience functions
# ---------------------------
def debug(*args, **kwargs):
"""
Same as print, but prints to ``stderr``
(which is not intercepted by Pandoc).
"""
print(file=sys.stderr, *args, **kwargs)
# ---------------------------
# Functions that extract content
# ---------------------------
def stringify(element, newlines=True):
"""
Return the raw text version of an elements (and its children element).
Example:
>>> from panflute import *
>>> e1 = Emph(Str('Hello'), Space, Str('world!'))
>>> e2 = Strong(Str('Bye!'))
>>> para = Para(e1, Space, e2)
>>> stringify(para)
'Hello world! Bye!\n\n'
:param newlines: add a new line after a paragraph (default True)
:type newlines: :class:`bool`
:rtype: :class:`str`
"""
def attach_str(e, doc, answer):
if hasattr(e, 'text'):
ans = e.text
elif isinstance(e, HorizontalSpaces):
ans = ' '
elif isinstance(e, VerticalSpaces) and newlines:
ans = '\n\n'
elif type(e) == Citation:
ans = ''
else:
ans = ''
answer.append(ans)
answer = []
f = partial(attach_str, answer=answer)
element.walk(f)
return ''.join(answer)
def _get_metadata(self, key='', default=None, builtin=True):
"""
get_metadata([key, default, simple])
Retrieve metadata with nested keys separated by dots.
This is useful to avoid repeatedly checking if a dict exists, as
the frontmatter might not have the keys that we expect.
With ``builtin=True`` (the default), it will convert the results to
built-in Python types, instead of :class:`.MetaValue` elements. EG: instead of returning a MetaBool it will return True|False.
:param key: string with the keys separated by a dot (``key1.key2``). Default is an empty string (which returns the entire metadata dict)
:type key: ``str``
:param default: return value in case the key is not found (default is ``None``)
:param builtin: If True, return built-in Python types (default is ``True``)
:Example:
>>> doc.metadata['format']['show-frame'] = True
>>> # ...
>>> # afterwards:
>>> show_frame = doc.get_metadata('format.show-frame', False)
>>> stata_path = doc.get_metadata('media.path.figures', '.')
"""
# Retrieve metadata
assert isinstance(key, str)
meta = self.metadata
# Retrieve specific key
if key:
for k in key.split('.'):
if isinstance(meta, MetaMap) and k in meta.content:
meta = meta[k]
else:
return default
# Stringify contents
return meta2builtin(meta) if builtin else meta
def meta2builtin(meta):
if isinstance(meta, MetaBool):
return meta.boolean
elif isinstance(meta, MetaString):
return meta.text
elif isinstance(meta, MetaList):
return [meta2builtin(v) for v in meta.content.list]
elif isinstance(meta, MetaMap):
return OrderedDict((k, meta2builtin(v)) for (k, v)
in meta.content.dict.items())
elif isinstance(meta, (MetaInlines, MetaBlocks)):
return stringify(meta)
else:
debug("MISSING", type(meta))
return meta
# Bind the method
Doc.get_metadata = _get_metadata
# ---------------------------
# Functions that rely on external calls
# ---------------------------
def shell(args, wait=True, msg=None):
"""
Execute the external command and get its exitcode, stdout and stderr.
"""
# Fix Windows error if passed a string
if isinstance(args, str):
args = shlex.split(args, posix=(os.name != "nt"))
if os.name == "nt":
args = [arg.replace('/', '\\') for arg in args]
if wait:
proc = Popen(args, stdin=PIPE, stdout=PIPE, stderr=PIPE)
out, err = proc.communicate(input=msg)
exitcode = proc.returncode
if exitcode != 0:
raise IOError(err)
return out
else:
DETACHED_PROCESS = 0x00000008
proc = Popen(args, creationflags=DETACHED_PROCESS)
#def get_exe_path():
# reg = winreg.ConnectRegistry(None,winreg.HKEY_CLASSES_ROOT)
#
# # Fetch verb linked to the dta extension
# path = '.dta'
# key = winreg.OpenKey(reg, path)
# verb = winreg.QueryValue(key, None) # Alternatives: .dta .do
#
# # Fetch command linked to that verb
# path = '{}\shell\open\command'.format(verb)
# key = winreg.OpenKey(reg, path)
# cmd = winreg.QueryValue(key, None)
# fn = cmd.strip('"').split('"')[0]
# #raise(Exception(fn))
# return fn
#
#def check_correct_executable(fn):
# return os.path.isfile(fn) and 'stata' in fn.lower()
def run_pandoc(text='', args=None):
"""
Low level function that calls Pandoc with (optionally)
some input text and/or arguments
"""
if args is None:
args = []
pandoc_path = which('pandoc')
if pandoc_path is None or not os.path.exists(pandoc_path):
raise OSError("Path to pandoc executable does not exists")
proc = Popen([pandoc_path] + args, stdin=PIPE, stdout=PIPE, stderr=PIPE)
out, err = proc.communicate(input=text.encode('utf-8'))
exitcode = proc.returncode
if exitcode != 0:
raise IOError(err)
return out.decode('utf-8')
def convert_text(text,
input_format='markdown',
output_format='panflute',
standalone=False,
extra_args=None):
"""
Convert formatted text (usually markdown) by calling Pandoc internally
The default output format ('panflute') will return a tree
of Pandoc elements. When combined with 'standalone=True', the tree root
will be a 'Doc' element.
Example:
>>> from panflute import *
>>> md = 'Some *markdown* **text** ~xyz~'
>>> tex = r'Some $x^y$ or $x_n = \sqrt{a + b}$ \textit{a}'
>>> convert_text(md)
[Para(Str(Some) Space Emph(Str(markdown)) Space Strong(Str(text)) Space Subscript(Str(xyz)))]
>>> convert_text(tex)
[Para(Str(Some) Space Math(x^y; format='InlineMath') Space Str(or) Space Math(x_n = \sqrt{a + b}; format='InlineMath') Space RawInline(\textit{a}; format='tex'))]
:param text: text that will be converted
:type text: :class:`str` | :class:`.Element` | :class:`list` of :class:`.Element`
:param input_format: format of the text (default 'markdown').
Any Pandoc input format is valid, plus 'panflute' (a tree of Pandoc
elements)
:param output_format: format of the output
(default is 'panflute' which creates the tree of Pandoc elements).
Non-binary Pandoc formats are allowed (e.g. markdown, latex is allowed,
but docx and pdf are not).
:param standalone: whether the results will be a standalone document
or not.
:type standalone: :class:`bool`
:param extra_args: extra arguments passed to Pandoc
:type extra_args: :class:`list`
:rtype: :class:`list` | :class:`.Doc` | :class:`str`
Note: for a more general solution,
see `pyandoc <https://github.com/kennethreitz/pyandoc/>`_
by Kenneth Reitz.
"""
if input_format == 'panflute':
# Problem:
# We need a Doc element, but received a list of elements.
# So we wrap-up the list in a Doc, but with what pandoc-api version?
# (remember that Pandoc requires a matching api-version!)
# Workaround: call Pandoc with empty text to get its api-version
if not isinstance(text, Doc):
tmp_doc = convert_text('', standalone=True)
api_version = tmp_doc.api_version
if isinstance(text, Element):
text = [text]
text = Doc(*text, api_version=api_version)
# Dump the Doc into json
with io.StringIO() as f:
dump(text, f)
text = f.getvalue()
in_fmt = 'json' if input_format == 'panflute' else input_format
out_fmt = 'json' if output_format == 'panflute' else output_format
if extra_args is None:
extra_args = []
if standalone:
extra_args.append('--standalone')
out = inner_convert_text(text, in_fmt, out_fmt, extra_args)
if output_format == 'panflute':
out = json.loads(out, object_pairs_hook=from_json)
if standalone:
if not isinstance(out, Doc): # Pandoc 1.7.2 and earlier
metadata, items = out
out = Doc(*items, metadata=metadata)
else:
if isinstance(out, Doc): # Pandoc 1.8 and later
out = out.content.list
else:
out = out[1] # Pandoc 1.7.2 and earlier
return out
def inner_convert_text(text, input_format, output_format, extra_args):
# like convert_text(), but does not support 'panflute' input/output
from_arg = '--from={}'.format(input_format)
to_arg = '--to={}'.format(output_format)
args = [from_arg, to_arg] + extra_args
out = run_pandoc(text, args)
out = "\n".join(out.splitlines()) # Replace \r\n with \n
return out
# ---------------------------
# Functions that modify content
# ---------------------------
def _replace_keyword(self, keyword, replacement, count=0):
"""
replace_keyword(keyword, replacement[, count])
Walk through the element and its children
and look for Str() objects that contains
exactly the keyword. Then, replace it.
Usually applied to an entire document (a :class:`.Doc` element)
Note: If the replacement is a block, it cannot be put in place of
a Str element. As a solution, the closest ancestor (e.g. the parent)
will be replaced instead, but only if possible
(if the parent only has one child).
Example:
>>> from panflute import *
>>> p1 = Para(Str('Spam'), Space, Emph(Str('and'), Space, Str('eggs')))
>>> p2 = Para(Str('eggs'))
>>> p3 = Plain(Emph(Str('eggs')))
>>> doc = Doc(p1, p2, p3)
>>> doc.content
ListContainer(Para(Str(Spam) Space Emph(Str(and) Space Str(eggs))) Para(Str(eggs)) Plain(Emph(Str(eggs))))
>>> doc.replace_keyword('eggs', Str('ham'))
>>> doc.content
ListContainer(Para(Str(Spam) Space Emph(Str(and) Space Str(ham))) Para(Str(ham)) Plain(Emph(Str(ham))))
>>> doc.replace_keyword(keyword='ham', replacement=Para(Str('spam')))
>>> doc.content
ListContainer(Para(Str(Spam) Space Emph(Str(and) Space Str(ham))) Para(Str(spam)) Para(Str(spam)))
:param keyword: string that will be searched (cannot have spaces!)
:type keyword: :class:`str`
:param replacement: element that will be placed in turn of the ``Str``
element that contains the keyword.
:type replacement: :class:`.Element`
:param count: number of occurrences that will be replaced.
If count is not given or is set to zero, all occurrences
will be replaced.
:type count: :class:`int`
"""
def replace_with_inline(e, doc):
if type(e) == Str and e.text == keyword:
doc.num_matches += 1
if not count or doc.num_matches <= count:
return replacement
def replace_with_block(e, doc):
if hasattr(e, 'content') and len(e.content) == 1:
ee = e.content[0]
if type(ee) == Str and ee.text == keyword:
if isinstance(e, Block):
doc.num_matches += 1
if not count or doc.num_matches <= count:
return replacement
elif isinstance(e, Inline):
return Str(keyword)
doc = self.doc
if doc is None:
raise Exception('No root document')
doc.num_matches = 0
if isinstance(replacement, Inline):
return self.walk(replace_with_inline, doc)
elif isinstance(replacement, Block):
return self.walk(replace_with_block, doc)
else:
raise NotImplementedError(type(replacement))
# Bind the method
Element.replace_keyword = _replace_keyword
def get_option(options=None, local_tag=None, doc=None, doc_tag=None, default=None, error_on_none=True):
""" fetch an option variable,
from either a local (element) level option/attribute tag,
document level metadata tag,
or a default
:type options: ``dict``
:type local_tag: ``str``
:type doc: :class:`Doc`
:type doc_tag: ``str``
:type default: ``any``
:type error_on_none: ``bool``
The order of preference is local > document > default,
although if a local or document tag returns None, then the next level down is used.
Also, if error_on_none=True and the final variable is None, then a ValueError will be raised
In this manner you can set global variables, which can be optionally overriden at a local level.
For example, to apply different styles to docx text
main.md:
------------------
style-div:
name: MyStyle
------------------
:::style
some text
:::
::: {.style name=MyOtherStyle}
some more text
:::
style_filter.py:
import panflute as pf
def action(elem, doc):
if type(elem) == pf.Div:
style = pf.get_option(elem.attributes, "name", doc, "style-div.name")
elem.attributes["custom-style"] = style
def main(doc=None):
return run_filter(action, doc=doc)
if __name__ == "__main__":
main()
"""
variable = None
# element level
if options is not None and local_tag is not None:
if local_tag in options and options[local_tag] is not None:
variable = options[local_tag]
if variable is not None:
return variable
# doc level
if doc is not None and doc_tag is not None:
variable = doc.get_metadata(doc_tag, None)
if variable is not None:
return variable
# default level
variable = default
if variable is None and error_on_none:
raise ValueError("could not retrieve a value for tag; local={0}, doc={1}".format(local_tag, doc_tag))
return variable
|
sergiocorreia/panflute | panflute/tools.py | stringify | python | def stringify(element, newlines=True):
def attach_str(e, doc, answer):
if hasattr(e, 'text'):
ans = e.text
elif isinstance(e, HorizontalSpaces):
ans = ' '
elif isinstance(e, VerticalSpaces) and newlines:
ans = '\n\n'
elif type(e) == Citation:
ans = ''
else:
ans = ''
answer.append(ans)
answer = []
f = partial(attach_str, answer=answer)
element.walk(f)
return ''.join(answer) | Return the raw text version of an elements (and its children element).
Example:
>>> from panflute import *
>>> e1 = Emph(Str('Hello'), Space, Str('world!'))
>>> e2 = Strong(Str('Bye!'))
>>> para = Para(e1, Space, e2)
>>> stringify(para)
'Hello world! Bye!\n\n'
:param newlines: add a new line after a paragraph (default True)
:type newlines: :class:`bool`
:rtype: :class:`str` | train | https://github.com/sergiocorreia/panflute/blob/65c2d570c26a190deb600cab5e2ad8a828a3302e/panflute/tools.py#L173-L207 | null | # ---------------------------
# Imports
# ---------------------------
from .base import Element
from .elements import *
from .io import dump
import io
import os
import re
import sys
import json
import yaml
import shlex
# shutil.which: new in version 3.3
try:
from shutil import which
except ImportError:
from shutilwhich import which
from subprocess import Popen, PIPE
from functools import partial
py2 = sys.version_info[0] == 2
if py2: str = basestring
# ---------------------------
# Constants
# ---------------------------
HorizontalSpaces = (Space, LineBreak, SoftBreak)
VerticalSpaces = (Para, )
# ---------------------------
# Convenience functions
# ---------------------------
def yaml_filter(element, doc, tag=None, function=None, tags=None,
strict_yaml=False):
'''
Convenience function for parsing code blocks with YAML options
This function is useful to create a filter that applies to
code blocks that have specific classes.
It is used as an argument of ``run_filter``, with two additional options:
``tag`` and ``function``.
Using this is equivalent to having filter functions that:
1. Check if the element is a code block
2. Check if the element belongs to a specific class
3. Split the YAML options (at the beginning of the block, by looking
for ``...`` or ``---`` strings in a separate line
4. Parse the YAML
5. Use the YAML options and (optionally) the data that follows the YAML
to return a new or modified element
Instead, you just need to:
1. Call ``run_filter`` with ``yaml_filter`` as the action function, and
with the additional arguments ``tag`` and ``function``
2. Construct a ``fenced_action`` function that takes four arguments:
(options, data, element, doc). Note that options is a dict and data
is a raw string. Notice that this is similar to the ``action``
functions of standard filters, but with *options* and *data* as the
new ones.
Note: if you want to apply multiple functions to separate classes,
you can use the ``tags`` argument, which receives a dict of
``tag: function`` pairs.
Note: use the ``strict_yaml=True`` option in order to allow for more verbose
but flexible YAML metadata: more than one YAML blocks are allowed, but
they all must start with ``---`` (even at the beginning) and end with
``---`` or ``...``. Also, YAML is not the default content
when no delimiters are set.
Example::
"""
Replace code blocks of class 'foo' with # horizontal rules
"""
import panflute as pf
def fenced_action(options, data, element, doc):
count = options.get('count', 1)
div = pf.Div(attributes={'count': str(count)})
div.content.extend([pf.HorizontalRule] * count)
return div
if __name__ == '__main__':
pf.run_filter(pf.yaml_filter, tag='foo', function=fenced_action)
'''
# Allow for either tag+function or a dict {tag: function}
assert (tag is None) + (tags is None) == 1 # XOR
if tags is None:
tags = {tag: function}
if type(element) == CodeBlock:
for tag in tags:
if tag in element.classes:
function = tags[tag]
if not strict_yaml:
# Split YAML and data parts (separated by ... or ---)
raw = re.split("^([.]{3,}|[-]{3,})$",
element.text, 1, re.MULTILINE)
data = raw[2] if len(raw) > 2 else ''
data = data.lstrip('\n')
raw = raw[0]
try:
options = yaml.safe_load(raw)
except yaml.scanner.ScannerError:
debug("panflute: malformed YAML block")
return
if options is None:
options = {}
else:
options = {}
data = []
raw = re.split("^([.]{3,}|[-]{3,})$",
element.text, 0, re.MULTILINE)
rawmode = True
for chunk in raw:
chunk = chunk.strip('\n')
if not chunk:
continue
if rawmode:
if chunk.startswith('---'):
rawmode = False
else:
data.append(chunk)
else:
if chunk.startswith('---') or chunk.startswith('...'):
rawmode = True
else:
try:
options.update(yaml.safe_load(chunk))
except yaml.scanner.ScannerError:
debug("panflute: malformed YAML block")
return
data = '\n'.join(data)
return function(options=options, data=data,
element=element, doc=doc)
def debug(*args, **kwargs):
"""
Same as print, but prints to ``stderr``
(which is not intercepted by Pandoc).
"""
print(file=sys.stderr, *args, **kwargs)
# ---------------------------
# Functions that extract content
# ---------------------------
def _get_metadata(self, key='', default=None, builtin=True):
"""
get_metadata([key, default, simple])
Retrieve metadata with nested keys separated by dots.
This is useful to avoid repeatedly checking if a dict exists, as
the frontmatter might not have the keys that we expect.
With ``builtin=True`` (the default), it will convert the results to
built-in Python types, instead of :class:`.MetaValue` elements. EG: instead of returning a MetaBool it will return True|False.
:param key: string with the keys separated by a dot (``key1.key2``). Default is an empty string (which returns the entire metadata dict)
:type key: ``str``
:param default: return value in case the key is not found (default is ``None``)
:param builtin: If True, return built-in Python types (default is ``True``)
:Example:
>>> doc.metadata['format']['show-frame'] = True
>>> # ...
>>> # afterwards:
>>> show_frame = doc.get_metadata('format.show-frame', False)
>>> stata_path = doc.get_metadata('media.path.figures', '.')
"""
# Retrieve metadata
assert isinstance(key, str)
meta = self.metadata
# Retrieve specific key
if key:
for k in key.split('.'):
if isinstance(meta, MetaMap) and k in meta.content:
meta = meta[k]
else:
return default
# Stringify contents
return meta2builtin(meta) if builtin else meta
def meta2builtin(meta):
if isinstance(meta, MetaBool):
return meta.boolean
elif isinstance(meta, MetaString):
return meta.text
elif isinstance(meta, MetaList):
return [meta2builtin(v) for v in meta.content.list]
elif isinstance(meta, MetaMap):
return OrderedDict((k, meta2builtin(v)) for (k, v)
in meta.content.dict.items())
elif isinstance(meta, (MetaInlines, MetaBlocks)):
return stringify(meta)
else:
debug("MISSING", type(meta))
return meta
# Bind the method
Doc.get_metadata = _get_metadata
# ---------------------------
# Functions that rely on external calls
# ---------------------------
def shell(args, wait=True, msg=None):
"""
Execute the external command and get its exitcode, stdout and stderr.
"""
# Fix Windows error if passed a string
if isinstance(args, str):
args = shlex.split(args, posix=(os.name != "nt"))
if os.name == "nt":
args = [arg.replace('/', '\\') for arg in args]
if wait:
proc = Popen(args, stdin=PIPE, stdout=PIPE, stderr=PIPE)
out, err = proc.communicate(input=msg)
exitcode = proc.returncode
if exitcode != 0:
raise IOError(err)
return out
else:
DETACHED_PROCESS = 0x00000008
proc = Popen(args, creationflags=DETACHED_PROCESS)
#def get_exe_path():
# reg = winreg.ConnectRegistry(None,winreg.HKEY_CLASSES_ROOT)
#
# # Fetch verb linked to the dta extension
# path = '.dta'
# key = winreg.OpenKey(reg, path)
# verb = winreg.QueryValue(key, None) # Alternatives: .dta .do
#
# # Fetch command linked to that verb
# path = '{}\shell\open\command'.format(verb)
# key = winreg.OpenKey(reg, path)
# cmd = winreg.QueryValue(key, None)
# fn = cmd.strip('"').split('"')[0]
# #raise(Exception(fn))
# return fn
#
#def check_correct_executable(fn):
# return os.path.isfile(fn) and 'stata' in fn.lower()
def run_pandoc(text='', args=None):
"""
Low level function that calls Pandoc with (optionally)
some input text and/or arguments
"""
if args is None:
args = []
pandoc_path = which('pandoc')
if pandoc_path is None or not os.path.exists(pandoc_path):
raise OSError("Path to pandoc executable does not exists")
proc = Popen([pandoc_path] + args, stdin=PIPE, stdout=PIPE, stderr=PIPE)
out, err = proc.communicate(input=text.encode('utf-8'))
exitcode = proc.returncode
if exitcode != 0:
raise IOError(err)
return out.decode('utf-8')
def convert_text(text,
input_format='markdown',
output_format='panflute',
standalone=False,
extra_args=None):
"""
Convert formatted text (usually markdown) by calling Pandoc internally
The default output format ('panflute') will return a tree
of Pandoc elements. When combined with 'standalone=True', the tree root
will be a 'Doc' element.
Example:
>>> from panflute import *
>>> md = 'Some *markdown* **text** ~xyz~'
>>> tex = r'Some $x^y$ or $x_n = \sqrt{a + b}$ \textit{a}'
>>> convert_text(md)
[Para(Str(Some) Space Emph(Str(markdown)) Space Strong(Str(text)) Space Subscript(Str(xyz)))]
>>> convert_text(tex)
[Para(Str(Some) Space Math(x^y; format='InlineMath') Space Str(or) Space Math(x_n = \sqrt{a + b}; format='InlineMath') Space RawInline(\textit{a}; format='tex'))]
:param text: text that will be converted
:type text: :class:`str` | :class:`.Element` | :class:`list` of :class:`.Element`
:param input_format: format of the text (default 'markdown').
Any Pandoc input format is valid, plus 'panflute' (a tree of Pandoc
elements)
:param output_format: format of the output
(default is 'panflute' which creates the tree of Pandoc elements).
Non-binary Pandoc formats are allowed (e.g. markdown, latex is allowed,
but docx and pdf are not).
:param standalone: whether the results will be a standalone document
or not.
:type standalone: :class:`bool`
:param extra_args: extra arguments passed to Pandoc
:type extra_args: :class:`list`
:rtype: :class:`list` | :class:`.Doc` | :class:`str`
Note: for a more general solution,
see `pyandoc <https://github.com/kennethreitz/pyandoc/>`_
by Kenneth Reitz.
"""
if input_format == 'panflute':
# Problem:
# We need a Doc element, but received a list of elements.
# So we wrap-up the list in a Doc, but with what pandoc-api version?
# (remember that Pandoc requires a matching api-version!)
# Workaround: call Pandoc with empty text to get its api-version
if not isinstance(text, Doc):
tmp_doc = convert_text('', standalone=True)
api_version = tmp_doc.api_version
if isinstance(text, Element):
text = [text]
text = Doc(*text, api_version=api_version)
# Dump the Doc into json
with io.StringIO() as f:
dump(text, f)
text = f.getvalue()
in_fmt = 'json' if input_format == 'panflute' else input_format
out_fmt = 'json' if output_format == 'panflute' else output_format
if extra_args is None:
extra_args = []
if standalone:
extra_args.append('--standalone')
out = inner_convert_text(text, in_fmt, out_fmt, extra_args)
if output_format == 'panflute':
out = json.loads(out, object_pairs_hook=from_json)
if standalone:
if not isinstance(out, Doc): # Pandoc 1.7.2 and earlier
metadata, items = out
out = Doc(*items, metadata=metadata)
else:
if isinstance(out, Doc): # Pandoc 1.8 and later
out = out.content.list
else:
out = out[1] # Pandoc 1.7.2 and earlier
return out
def inner_convert_text(text, input_format, output_format, extra_args):
# like convert_text(), but does not support 'panflute' input/output
from_arg = '--from={}'.format(input_format)
to_arg = '--to={}'.format(output_format)
args = [from_arg, to_arg] + extra_args
out = run_pandoc(text, args)
out = "\n".join(out.splitlines()) # Replace \r\n with \n
return out
# ---------------------------
# Functions that modify content
# ---------------------------
def _replace_keyword(self, keyword, replacement, count=0):
"""
replace_keyword(keyword, replacement[, count])
Walk through the element and its children
and look for Str() objects that contains
exactly the keyword. Then, replace it.
Usually applied to an entire document (a :class:`.Doc` element)
Note: If the replacement is a block, it cannot be put in place of
a Str element. As a solution, the closest ancestor (e.g. the parent)
will be replaced instead, but only if possible
(if the parent only has one child).
Example:
>>> from panflute import *
>>> p1 = Para(Str('Spam'), Space, Emph(Str('and'), Space, Str('eggs')))
>>> p2 = Para(Str('eggs'))
>>> p3 = Plain(Emph(Str('eggs')))
>>> doc = Doc(p1, p2, p3)
>>> doc.content
ListContainer(Para(Str(Spam) Space Emph(Str(and) Space Str(eggs))) Para(Str(eggs)) Plain(Emph(Str(eggs))))
>>> doc.replace_keyword('eggs', Str('ham'))
>>> doc.content
ListContainer(Para(Str(Spam) Space Emph(Str(and) Space Str(ham))) Para(Str(ham)) Plain(Emph(Str(ham))))
>>> doc.replace_keyword(keyword='ham', replacement=Para(Str('spam')))
>>> doc.content
ListContainer(Para(Str(Spam) Space Emph(Str(and) Space Str(ham))) Para(Str(spam)) Para(Str(spam)))
:param keyword: string that will be searched (cannot have spaces!)
:type keyword: :class:`str`
:param replacement: element that will be placed in turn of the ``Str``
element that contains the keyword.
:type replacement: :class:`.Element`
:param count: number of occurrences that will be replaced.
If count is not given or is set to zero, all occurrences
will be replaced.
:type count: :class:`int`
"""
def replace_with_inline(e, doc):
if type(e) == Str and e.text == keyword:
doc.num_matches += 1
if not count or doc.num_matches <= count:
return replacement
def replace_with_block(e, doc):
if hasattr(e, 'content') and len(e.content) == 1:
ee = e.content[0]
if type(ee) == Str and ee.text == keyword:
if isinstance(e, Block):
doc.num_matches += 1
if not count or doc.num_matches <= count:
return replacement
elif isinstance(e, Inline):
return Str(keyword)
doc = self.doc
if doc is None:
raise Exception('No root document')
doc.num_matches = 0
if isinstance(replacement, Inline):
return self.walk(replace_with_inline, doc)
elif isinstance(replacement, Block):
return self.walk(replace_with_block, doc)
else:
raise NotImplementedError(type(replacement))
# Bind the method
Element.replace_keyword = _replace_keyword
def get_option(options=None, local_tag=None, doc=None, doc_tag=None, default=None, error_on_none=True):
""" fetch an option variable,
from either a local (element) level option/attribute tag,
document level metadata tag,
or a default
:type options: ``dict``
:type local_tag: ``str``
:type doc: :class:`Doc`
:type doc_tag: ``str``
:type default: ``any``
:type error_on_none: ``bool``
The order of preference is local > document > default,
although if a local or document tag returns None, then the next level down is used.
Also, if error_on_none=True and the final variable is None, then a ValueError will be raised
In this manner you can set global variables, which can be optionally overriden at a local level.
For example, to apply different styles to docx text
main.md:
------------------
style-div:
name: MyStyle
------------------
:::style
some text
:::
::: {.style name=MyOtherStyle}
some more text
:::
style_filter.py:
import panflute as pf
def action(elem, doc):
if type(elem) == pf.Div:
style = pf.get_option(elem.attributes, "name", doc, "style-div.name")
elem.attributes["custom-style"] = style
def main(doc=None):
return run_filter(action, doc=doc)
if __name__ == "__main__":
main()
"""
variable = None
# element level
if options is not None and local_tag is not None:
if local_tag in options and options[local_tag] is not None:
variable = options[local_tag]
if variable is not None:
return variable
# doc level
if doc is not None and doc_tag is not None:
variable = doc.get_metadata(doc_tag, None)
if variable is not None:
return variable
# default level
variable = default
if variable is None and error_on_none:
raise ValueError("could not retrieve a value for tag; local={0}, doc={1}".format(local_tag, doc_tag))
return variable
|
sergiocorreia/panflute | panflute/tools.py | _get_metadata | python | def _get_metadata(self, key='', default=None, builtin=True):
# Retrieve metadata
assert isinstance(key, str)
meta = self.metadata
# Retrieve specific key
if key:
for k in key.split('.'):
if isinstance(meta, MetaMap) and k in meta.content:
meta = meta[k]
else:
return default
# Stringify contents
return meta2builtin(meta) if builtin else meta | get_metadata([key, default, simple])
Retrieve metadata with nested keys separated by dots.
This is useful to avoid repeatedly checking if a dict exists, as
the frontmatter might not have the keys that we expect.
With ``builtin=True`` (the default), it will convert the results to
built-in Python types, instead of :class:`.MetaValue` elements. EG: instead of returning a MetaBool it will return True|False.
:param key: string with the keys separated by a dot (``key1.key2``). Default is an empty string (which returns the entire metadata dict)
:type key: ``str``
:param default: return value in case the key is not found (default is ``None``)
:param builtin: If True, return built-in Python types (default is ``True``)
:Example:
>>> doc.metadata['format']['show-frame'] = True
>>> # ...
>>> # afterwards:
>>> show_frame = doc.get_metadata('format.show-frame', False)
>>> stata_path = doc.get_metadata('media.path.figures', '.') | train | https://github.com/sergiocorreia/panflute/blob/65c2d570c26a190deb600cab5e2ad8a828a3302e/panflute/tools.py#L210-L249 | null | # ---------------------------
# Imports
# ---------------------------
from .base import Element
from .elements import *
from .io import dump
import io
import os
import re
import sys
import json
import yaml
import shlex
# shutil.which: new in version 3.3
try:
from shutil import which
except ImportError:
from shutilwhich import which
from subprocess import Popen, PIPE
from functools import partial
py2 = sys.version_info[0] == 2
if py2: str = basestring
# ---------------------------
# Constants
# ---------------------------
HorizontalSpaces = (Space, LineBreak, SoftBreak)
VerticalSpaces = (Para, )
# ---------------------------
# Convenience functions
# ---------------------------
def yaml_filter(element, doc, tag=None, function=None, tags=None,
strict_yaml=False):
'''
Convenience function for parsing code blocks with YAML options
This function is useful to create a filter that applies to
code blocks that have specific classes.
It is used as an argument of ``run_filter``, with two additional options:
``tag`` and ``function``.
Using this is equivalent to having filter functions that:
1. Check if the element is a code block
2. Check if the element belongs to a specific class
3. Split the YAML options (at the beginning of the block, by looking
for ``...`` or ``---`` strings in a separate line
4. Parse the YAML
5. Use the YAML options and (optionally) the data that follows the YAML
to return a new or modified element
Instead, you just need to:
1. Call ``run_filter`` with ``yaml_filter`` as the action function, and
with the additional arguments ``tag`` and ``function``
2. Construct a ``fenced_action`` function that takes four arguments:
(options, data, element, doc). Note that options is a dict and data
is a raw string. Notice that this is similar to the ``action``
functions of standard filters, but with *options* and *data* as the
new ones.
Note: if you want to apply multiple functions to separate classes,
you can use the ``tags`` argument, which receives a dict of
``tag: function`` pairs.
Note: use the ``strict_yaml=True`` option in order to allow for more verbose
but flexible YAML metadata: more than one YAML blocks are allowed, but
they all must start with ``---`` (even at the beginning) and end with
``---`` or ``...``. Also, YAML is not the default content
when no delimiters are set.
Example::
"""
Replace code blocks of class 'foo' with # horizontal rules
"""
import panflute as pf
def fenced_action(options, data, element, doc):
count = options.get('count', 1)
div = pf.Div(attributes={'count': str(count)})
div.content.extend([pf.HorizontalRule] * count)
return div
if __name__ == '__main__':
pf.run_filter(pf.yaml_filter, tag='foo', function=fenced_action)
'''
# Allow for either tag+function or a dict {tag: function}
assert (tag is None) + (tags is None) == 1 # XOR
if tags is None:
tags = {tag: function}
if type(element) == CodeBlock:
for tag in tags:
if tag in element.classes:
function = tags[tag]
if not strict_yaml:
# Split YAML and data parts (separated by ... or ---)
raw = re.split("^([.]{3,}|[-]{3,})$",
element.text, 1, re.MULTILINE)
data = raw[2] if len(raw) > 2 else ''
data = data.lstrip('\n')
raw = raw[0]
try:
options = yaml.safe_load(raw)
except yaml.scanner.ScannerError:
debug("panflute: malformed YAML block")
return
if options is None:
options = {}
else:
options = {}
data = []
raw = re.split("^([.]{3,}|[-]{3,})$",
element.text, 0, re.MULTILINE)
rawmode = True
for chunk in raw:
chunk = chunk.strip('\n')
if not chunk:
continue
if rawmode:
if chunk.startswith('---'):
rawmode = False
else:
data.append(chunk)
else:
if chunk.startswith('---') or chunk.startswith('...'):
rawmode = True
else:
try:
options.update(yaml.safe_load(chunk))
except yaml.scanner.ScannerError:
debug("panflute: malformed YAML block")
return
data = '\n'.join(data)
return function(options=options, data=data,
element=element, doc=doc)
def debug(*args, **kwargs):
"""
Same as print, but prints to ``stderr``
(which is not intercepted by Pandoc).
"""
print(file=sys.stderr, *args, **kwargs)
# ---------------------------
# Functions that extract content
# ---------------------------
def stringify(element, newlines=True):
"""
Return the raw text version of an elements (and its children element).
Example:
>>> from panflute import *
>>> e1 = Emph(Str('Hello'), Space, Str('world!'))
>>> e2 = Strong(Str('Bye!'))
>>> para = Para(e1, Space, e2)
>>> stringify(para)
'Hello world! Bye!\n\n'
:param newlines: add a new line after a paragraph (default True)
:type newlines: :class:`bool`
:rtype: :class:`str`
"""
def attach_str(e, doc, answer):
if hasattr(e, 'text'):
ans = e.text
elif isinstance(e, HorizontalSpaces):
ans = ' '
elif isinstance(e, VerticalSpaces) and newlines:
ans = '\n\n'
elif type(e) == Citation:
ans = ''
else:
ans = ''
answer.append(ans)
answer = []
f = partial(attach_str, answer=answer)
element.walk(f)
return ''.join(answer)
def meta2builtin(meta):
if isinstance(meta, MetaBool):
return meta.boolean
elif isinstance(meta, MetaString):
return meta.text
elif isinstance(meta, MetaList):
return [meta2builtin(v) for v in meta.content.list]
elif isinstance(meta, MetaMap):
return OrderedDict((k, meta2builtin(v)) for (k, v)
in meta.content.dict.items())
elif isinstance(meta, (MetaInlines, MetaBlocks)):
return stringify(meta)
else:
debug("MISSING", type(meta))
return meta
# Bind the method
Doc.get_metadata = _get_metadata
# ---------------------------
# Functions that rely on external calls
# ---------------------------
def shell(args, wait=True, msg=None):
"""
Execute the external command and get its exitcode, stdout and stderr.
"""
# Fix Windows error if passed a string
if isinstance(args, str):
args = shlex.split(args, posix=(os.name != "nt"))
if os.name == "nt":
args = [arg.replace('/', '\\') for arg in args]
if wait:
proc = Popen(args, stdin=PIPE, stdout=PIPE, stderr=PIPE)
out, err = proc.communicate(input=msg)
exitcode = proc.returncode
if exitcode != 0:
raise IOError(err)
return out
else:
DETACHED_PROCESS = 0x00000008
proc = Popen(args, creationflags=DETACHED_PROCESS)
#def get_exe_path():
# reg = winreg.ConnectRegistry(None,winreg.HKEY_CLASSES_ROOT)
#
# # Fetch verb linked to the dta extension
# path = '.dta'
# key = winreg.OpenKey(reg, path)
# verb = winreg.QueryValue(key, None) # Alternatives: .dta .do
#
# # Fetch command linked to that verb
# path = '{}\shell\open\command'.format(verb)
# key = winreg.OpenKey(reg, path)
# cmd = winreg.QueryValue(key, None)
# fn = cmd.strip('"').split('"')[0]
# #raise(Exception(fn))
# return fn
#
#def check_correct_executable(fn):
# return os.path.isfile(fn) and 'stata' in fn.lower()
def run_pandoc(text='', args=None):
"""
Low level function that calls Pandoc with (optionally)
some input text and/or arguments
"""
if args is None:
args = []
pandoc_path = which('pandoc')
if pandoc_path is None or not os.path.exists(pandoc_path):
raise OSError("Path to pandoc executable does not exists")
proc = Popen([pandoc_path] + args, stdin=PIPE, stdout=PIPE, stderr=PIPE)
out, err = proc.communicate(input=text.encode('utf-8'))
exitcode = proc.returncode
if exitcode != 0:
raise IOError(err)
return out.decode('utf-8')
def convert_text(text,
input_format='markdown',
output_format='panflute',
standalone=False,
extra_args=None):
"""
Convert formatted text (usually markdown) by calling Pandoc internally
The default output format ('panflute') will return a tree
of Pandoc elements. When combined with 'standalone=True', the tree root
will be a 'Doc' element.
Example:
>>> from panflute import *
>>> md = 'Some *markdown* **text** ~xyz~'
>>> tex = r'Some $x^y$ or $x_n = \sqrt{a + b}$ \textit{a}'
>>> convert_text(md)
[Para(Str(Some) Space Emph(Str(markdown)) Space Strong(Str(text)) Space Subscript(Str(xyz)))]
>>> convert_text(tex)
[Para(Str(Some) Space Math(x^y; format='InlineMath') Space Str(or) Space Math(x_n = \sqrt{a + b}; format='InlineMath') Space RawInline(\textit{a}; format='tex'))]
:param text: text that will be converted
:type text: :class:`str` | :class:`.Element` | :class:`list` of :class:`.Element`
:param input_format: format of the text (default 'markdown').
Any Pandoc input format is valid, plus 'panflute' (a tree of Pandoc
elements)
:param output_format: format of the output
(default is 'panflute' which creates the tree of Pandoc elements).
Non-binary Pandoc formats are allowed (e.g. markdown, latex is allowed,
but docx and pdf are not).
:param standalone: whether the results will be a standalone document
or not.
:type standalone: :class:`bool`
:param extra_args: extra arguments passed to Pandoc
:type extra_args: :class:`list`
:rtype: :class:`list` | :class:`.Doc` | :class:`str`
Note: for a more general solution,
see `pyandoc <https://github.com/kennethreitz/pyandoc/>`_
by Kenneth Reitz.
"""
if input_format == 'panflute':
# Problem:
# We need a Doc element, but received a list of elements.
# So we wrap-up the list in a Doc, but with what pandoc-api version?
# (remember that Pandoc requires a matching api-version!)
# Workaround: call Pandoc with empty text to get its api-version
if not isinstance(text, Doc):
tmp_doc = convert_text('', standalone=True)
api_version = tmp_doc.api_version
if isinstance(text, Element):
text = [text]
text = Doc(*text, api_version=api_version)
# Dump the Doc into json
with io.StringIO() as f:
dump(text, f)
text = f.getvalue()
in_fmt = 'json' if input_format == 'panflute' else input_format
out_fmt = 'json' if output_format == 'panflute' else output_format
if extra_args is None:
extra_args = []
if standalone:
extra_args.append('--standalone')
out = inner_convert_text(text, in_fmt, out_fmt, extra_args)
if output_format == 'panflute':
out = json.loads(out, object_pairs_hook=from_json)
if standalone:
if not isinstance(out, Doc): # Pandoc 1.7.2 and earlier
metadata, items = out
out = Doc(*items, metadata=metadata)
else:
if isinstance(out, Doc): # Pandoc 1.8 and later
out = out.content.list
else:
out = out[1] # Pandoc 1.7.2 and earlier
return out
def inner_convert_text(text, input_format, output_format, extra_args):
# like convert_text(), but does not support 'panflute' input/output
from_arg = '--from={}'.format(input_format)
to_arg = '--to={}'.format(output_format)
args = [from_arg, to_arg] + extra_args
out = run_pandoc(text, args)
out = "\n".join(out.splitlines()) # Replace \r\n with \n
return out
# ---------------------------
# Functions that modify content
# ---------------------------
def _replace_keyword(self, keyword, replacement, count=0):
"""
replace_keyword(keyword, replacement[, count])
Walk through the element and its children
and look for Str() objects that contains
exactly the keyword. Then, replace it.
Usually applied to an entire document (a :class:`.Doc` element)
Note: If the replacement is a block, it cannot be put in place of
a Str element. As a solution, the closest ancestor (e.g. the parent)
will be replaced instead, but only if possible
(if the parent only has one child).
Example:
>>> from panflute import *
>>> p1 = Para(Str('Spam'), Space, Emph(Str('and'), Space, Str('eggs')))
>>> p2 = Para(Str('eggs'))
>>> p3 = Plain(Emph(Str('eggs')))
>>> doc = Doc(p1, p2, p3)
>>> doc.content
ListContainer(Para(Str(Spam) Space Emph(Str(and) Space Str(eggs))) Para(Str(eggs)) Plain(Emph(Str(eggs))))
>>> doc.replace_keyword('eggs', Str('ham'))
>>> doc.content
ListContainer(Para(Str(Spam) Space Emph(Str(and) Space Str(ham))) Para(Str(ham)) Plain(Emph(Str(ham))))
>>> doc.replace_keyword(keyword='ham', replacement=Para(Str('spam')))
>>> doc.content
ListContainer(Para(Str(Spam) Space Emph(Str(and) Space Str(ham))) Para(Str(spam)) Para(Str(spam)))
:param keyword: string that will be searched (cannot have spaces!)
:type keyword: :class:`str`
:param replacement: element that will be placed in turn of the ``Str``
element that contains the keyword.
:type replacement: :class:`.Element`
:param count: number of occurrences that will be replaced.
If count is not given or is set to zero, all occurrences
will be replaced.
:type count: :class:`int`
"""
def replace_with_inline(e, doc):
if type(e) == Str and e.text == keyword:
doc.num_matches += 1
if not count or doc.num_matches <= count:
return replacement
def replace_with_block(e, doc):
if hasattr(e, 'content') and len(e.content) == 1:
ee = e.content[0]
if type(ee) == Str and ee.text == keyword:
if isinstance(e, Block):
doc.num_matches += 1
if not count or doc.num_matches <= count:
return replacement
elif isinstance(e, Inline):
return Str(keyword)
doc = self.doc
if doc is None:
raise Exception('No root document')
doc.num_matches = 0
if isinstance(replacement, Inline):
return self.walk(replace_with_inline, doc)
elif isinstance(replacement, Block):
return self.walk(replace_with_block, doc)
else:
raise NotImplementedError(type(replacement))
# Bind the method
Element.replace_keyword = _replace_keyword
def get_option(options=None, local_tag=None, doc=None, doc_tag=None, default=None, error_on_none=True):
""" fetch an option variable,
from either a local (element) level option/attribute tag,
document level metadata tag,
or a default
:type options: ``dict``
:type local_tag: ``str``
:type doc: :class:`Doc`
:type doc_tag: ``str``
:type default: ``any``
:type error_on_none: ``bool``
The order of preference is local > document > default,
although if a local or document tag returns None, then the next level down is used.
Also, if error_on_none=True and the final variable is None, then a ValueError will be raised
In this manner you can set global variables, which can be optionally overriden at a local level.
For example, to apply different styles to docx text
main.md:
------------------
style-div:
name: MyStyle
------------------
:::style
some text
:::
::: {.style name=MyOtherStyle}
some more text
:::
style_filter.py:
import panflute as pf
def action(elem, doc):
if type(elem) == pf.Div:
style = pf.get_option(elem.attributes, "name", doc, "style-div.name")
elem.attributes["custom-style"] = style
def main(doc=None):
return run_filter(action, doc=doc)
if __name__ == "__main__":
main()
"""
variable = None
# element level
if options is not None and local_tag is not None:
if local_tag in options and options[local_tag] is not None:
variable = options[local_tag]
if variable is not None:
return variable
# doc level
if doc is not None and doc_tag is not None:
variable = doc.get_metadata(doc_tag, None)
if variable is not None:
return variable
# default level
variable = default
if variable is None and error_on_none:
raise ValueError("could not retrieve a value for tag; local={0}, doc={1}".format(local_tag, doc_tag))
return variable
|
sergiocorreia/panflute | panflute/tools.py | shell | python | def shell(args, wait=True, msg=None):
# Fix Windows error if passed a string
if isinstance(args, str):
args = shlex.split(args, posix=(os.name != "nt"))
if os.name == "nt":
args = [arg.replace('/', '\\') for arg in args]
if wait:
proc = Popen(args, stdin=PIPE, stdout=PIPE, stderr=PIPE)
out, err = proc.communicate(input=msg)
exitcode = proc.returncode
if exitcode != 0:
raise IOError(err)
return out
else:
DETACHED_PROCESS = 0x00000008
proc = Popen(args, creationflags=DETACHED_PROCESS) | Execute the external command and get its exitcode, stdout and stderr. | train | https://github.com/sergiocorreia/panflute/blob/65c2d570c26a190deb600cab5e2ad8a828a3302e/panflute/tools.py#L277-L297 | null | # ---------------------------
# Imports
# ---------------------------
from .base import Element
from .elements import *
from .io import dump
import io
import os
import re
import sys
import json
import yaml
import shlex
# shutil.which: new in version 3.3
try:
from shutil import which
except ImportError:
from shutilwhich import which
from subprocess import Popen, PIPE
from functools import partial
py2 = sys.version_info[0] == 2
if py2: str = basestring
# ---------------------------
# Constants
# ---------------------------
HorizontalSpaces = (Space, LineBreak, SoftBreak)
VerticalSpaces = (Para, )
# ---------------------------
# Convenience functions
# ---------------------------
def yaml_filter(element, doc, tag=None, function=None, tags=None,
strict_yaml=False):
'''
Convenience function for parsing code blocks with YAML options
This function is useful to create a filter that applies to
code blocks that have specific classes.
It is used as an argument of ``run_filter``, with two additional options:
``tag`` and ``function``.
Using this is equivalent to having filter functions that:
1. Check if the element is a code block
2. Check if the element belongs to a specific class
3. Split the YAML options (at the beginning of the block, by looking
for ``...`` or ``---`` strings in a separate line
4. Parse the YAML
5. Use the YAML options and (optionally) the data that follows the YAML
to return a new or modified element
Instead, you just need to:
1. Call ``run_filter`` with ``yaml_filter`` as the action function, and
with the additional arguments ``tag`` and ``function``
2. Construct a ``fenced_action`` function that takes four arguments:
(options, data, element, doc). Note that options is a dict and data
is a raw string. Notice that this is similar to the ``action``
functions of standard filters, but with *options* and *data* as the
new ones.
Note: if you want to apply multiple functions to separate classes,
you can use the ``tags`` argument, which receives a dict of
``tag: function`` pairs.
Note: use the ``strict_yaml=True`` option in order to allow for more verbose
but flexible YAML metadata: more than one YAML blocks are allowed, but
they all must start with ``---`` (even at the beginning) and end with
``---`` or ``...``. Also, YAML is not the default content
when no delimiters are set.
Example::
"""
Replace code blocks of class 'foo' with # horizontal rules
"""
import panflute as pf
def fenced_action(options, data, element, doc):
count = options.get('count', 1)
div = pf.Div(attributes={'count': str(count)})
div.content.extend([pf.HorizontalRule] * count)
return div
if __name__ == '__main__':
pf.run_filter(pf.yaml_filter, tag='foo', function=fenced_action)
'''
# Allow for either tag+function or a dict {tag: function}
assert (tag is None) + (tags is None) == 1 # XOR
if tags is None:
tags = {tag: function}
if type(element) == CodeBlock:
for tag in tags:
if tag in element.classes:
function = tags[tag]
if not strict_yaml:
# Split YAML and data parts (separated by ... or ---)
raw = re.split("^([.]{3,}|[-]{3,})$",
element.text, 1, re.MULTILINE)
data = raw[2] if len(raw) > 2 else ''
data = data.lstrip('\n')
raw = raw[0]
try:
options = yaml.safe_load(raw)
except yaml.scanner.ScannerError:
debug("panflute: malformed YAML block")
return
if options is None:
options = {}
else:
options = {}
data = []
raw = re.split("^([.]{3,}|[-]{3,})$",
element.text, 0, re.MULTILINE)
rawmode = True
for chunk in raw:
chunk = chunk.strip('\n')
if not chunk:
continue
if rawmode:
if chunk.startswith('---'):
rawmode = False
else:
data.append(chunk)
else:
if chunk.startswith('---') or chunk.startswith('...'):
rawmode = True
else:
try:
options.update(yaml.safe_load(chunk))
except yaml.scanner.ScannerError:
debug("panflute: malformed YAML block")
return
data = '\n'.join(data)
return function(options=options, data=data,
element=element, doc=doc)
def debug(*args, **kwargs):
"""
Same as print, but prints to ``stderr``
(which is not intercepted by Pandoc).
"""
print(file=sys.stderr, *args, **kwargs)
# ---------------------------
# Functions that extract content
# ---------------------------
def stringify(element, newlines=True):
"""
Return the raw text version of an elements (and its children element).
Example:
>>> from panflute import *
>>> e1 = Emph(Str('Hello'), Space, Str('world!'))
>>> e2 = Strong(Str('Bye!'))
>>> para = Para(e1, Space, e2)
>>> stringify(para)
'Hello world! Bye!\n\n'
:param newlines: add a new line after a paragraph (default True)
:type newlines: :class:`bool`
:rtype: :class:`str`
"""
def attach_str(e, doc, answer):
if hasattr(e, 'text'):
ans = e.text
elif isinstance(e, HorizontalSpaces):
ans = ' '
elif isinstance(e, VerticalSpaces) and newlines:
ans = '\n\n'
elif type(e) == Citation:
ans = ''
else:
ans = ''
answer.append(ans)
answer = []
f = partial(attach_str, answer=answer)
element.walk(f)
return ''.join(answer)
def _get_metadata(self, key='', default=None, builtin=True):
"""
get_metadata([key, default, simple])
Retrieve metadata with nested keys separated by dots.
This is useful to avoid repeatedly checking if a dict exists, as
the frontmatter might not have the keys that we expect.
With ``builtin=True`` (the default), it will convert the results to
built-in Python types, instead of :class:`.MetaValue` elements. EG: instead of returning a MetaBool it will return True|False.
:param key: string with the keys separated by a dot (``key1.key2``). Default is an empty string (which returns the entire metadata dict)
:type key: ``str``
:param default: return value in case the key is not found (default is ``None``)
:param builtin: If True, return built-in Python types (default is ``True``)
:Example:
>>> doc.metadata['format']['show-frame'] = True
>>> # ...
>>> # afterwards:
>>> show_frame = doc.get_metadata('format.show-frame', False)
>>> stata_path = doc.get_metadata('media.path.figures', '.')
"""
# Retrieve metadata
assert isinstance(key, str)
meta = self.metadata
# Retrieve specific key
if key:
for k in key.split('.'):
if isinstance(meta, MetaMap) and k in meta.content:
meta = meta[k]
else:
return default
# Stringify contents
return meta2builtin(meta) if builtin else meta
def meta2builtin(meta):
if isinstance(meta, MetaBool):
return meta.boolean
elif isinstance(meta, MetaString):
return meta.text
elif isinstance(meta, MetaList):
return [meta2builtin(v) for v in meta.content.list]
elif isinstance(meta, MetaMap):
return OrderedDict((k, meta2builtin(v)) for (k, v)
in meta.content.dict.items())
elif isinstance(meta, (MetaInlines, MetaBlocks)):
return stringify(meta)
else:
debug("MISSING", type(meta))
return meta
# Bind the method
Doc.get_metadata = _get_metadata
# ---------------------------
# Functions that rely on external calls
# ---------------------------
#def get_exe_path():
# reg = winreg.ConnectRegistry(None,winreg.HKEY_CLASSES_ROOT)
#
# # Fetch verb linked to the dta extension
# path = '.dta'
# key = winreg.OpenKey(reg, path)
# verb = winreg.QueryValue(key, None) # Alternatives: .dta .do
#
# # Fetch command linked to that verb
# path = '{}\shell\open\command'.format(verb)
# key = winreg.OpenKey(reg, path)
# cmd = winreg.QueryValue(key, None)
# fn = cmd.strip('"').split('"')[0]
# #raise(Exception(fn))
# return fn
#
#def check_correct_executable(fn):
# return os.path.isfile(fn) and 'stata' in fn.lower()
def run_pandoc(text='', args=None):
"""
Low level function that calls Pandoc with (optionally)
some input text and/or arguments
"""
if args is None:
args = []
pandoc_path = which('pandoc')
if pandoc_path is None or not os.path.exists(pandoc_path):
raise OSError("Path to pandoc executable does not exists")
proc = Popen([pandoc_path] + args, stdin=PIPE, stdout=PIPE, stderr=PIPE)
out, err = proc.communicate(input=text.encode('utf-8'))
exitcode = proc.returncode
if exitcode != 0:
raise IOError(err)
return out.decode('utf-8')
def convert_text(text,
input_format='markdown',
output_format='panflute',
standalone=False,
extra_args=None):
"""
Convert formatted text (usually markdown) by calling Pandoc internally
The default output format ('panflute') will return a tree
of Pandoc elements. When combined with 'standalone=True', the tree root
will be a 'Doc' element.
Example:
>>> from panflute import *
>>> md = 'Some *markdown* **text** ~xyz~'
>>> tex = r'Some $x^y$ or $x_n = \sqrt{a + b}$ \textit{a}'
>>> convert_text(md)
[Para(Str(Some) Space Emph(Str(markdown)) Space Strong(Str(text)) Space Subscript(Str(xyz)))]
>>> convert_text(tex)
[Para(Str(Some) Space Math(x^y; format='InlineMath') Space Str(or) Space Math(x_n = \sqrt{a + b}; format='InlineMath') Space RawInline(\textit{a}; format='tex'))]
:param text: text that will be converted
:type text: :class:`str` | :class:`.Element` | :class:`list` of :class:`.Element`
:param input_format: format of the text (default 'markdown').
Any Pandoc input format is valid, plus 'panflute' (a tree of Pandoc
elements)
:param output_format: format of the output
(default is 'panflute' which creates the tree of Pandoc elements).
Non-binary Pandoc formats are allowed (e.g. markdown, latex is allowed,
but docx and pdf are not).
:param standalone: whether the results will be a standalone document
or not.
:type standalone: :class:`bool`
:param extra_args: extra arguments passed to Pandoc
:type extra_args: :class:`list`
:rtype: :class:`list` | :class:`.Doc` | :class:`str`
Note: for a more general solution,
see `pyandoc <https://github.com/kennethreitz/pyandoc/>`_
by Kenneth Reitz.
"""
if input_format == 'panflute':
# Problem:
# We need a Doc element, but received a list of elements.
# So we wrap-up the list in a Doc, but with what pandoc-api version?
# (remember that Pandoc requires a matching api-version!)
# Workaround: call Pandoc with empty text to get its api-version
if not isinstance(text, Doc):
tmp_doc = convert_text('', standalone=True)
api_version = tmp_doc.api_version
if isinstance(text, Element):
text = [text]
text = Doc(*text, api_version=api_version)
# Dump the Doc into json
with io.StringIO() as f:
dump(text, f)
text = f.getvalue()
in_fmt = 'json' if input_format == 'panflute' else input_format
out_fmt = 'json' if output_format == 'panflute' else output_format
if extra_args is None:
extra_args = []
if standalone:
extra_args.append('--standalone')
out = inner_convert_text(text, in_fmt, out_fmt, extra_args)
if output_format == 'panflute':
out = json.loads(out, object_pairs_hook=from_json)
if standalone:
if not isinstance(out, Doc): # Pandoc 1.7.2 and earlier
metadata, items = out
out = Doc(*items, metadata=metadata)
else:
if isinstance(out, Doc): # Pandoc 1.8 and later
out = out.content.list
else:
out = out[1] # Pandoc 1.7.2 and earlier
return out
def inner_convert_text(text, input_format, output_format, extra_args):
# like convert_text(), but does not support 'panflute' input/output
from_arg = '--from={}'.format(input_format)
to_arg = '--to={}'.format(output_format)
args = [from_arg, to_arg] + extra_args
out = run_pandoc(text, args)
out = "\n".join(out.splitlines()) # Replace \r\n with \n
return out
# ---------------------------
# Functions that modify content
# ---------------------------
def _replace_keyword(self, keyword, replacement, count=0):
"""
replace_keyword(keyword, replacement[, count])
Walk through the element and its children
and look for Str() objects that contains
exactly the keyword. Then, replace it.
Usually applied to an entire document (a :class:`.Doc` element)
Note: If the replacement is a block, it cannot be put in place of
a Str element. As a solution, the closest ancestor (e.g. the parent)
will be replaced instead, but only if possible
(if the parent only has one child).
Example:
>>> from panflute import *
>>> p1 = Para(Str('Spam'), Space, Emph(Str('and'), Space, Str('eggs')))
>>> p2 = Para(Str('eggs'))
>>> p3 = Plain(Emph(Str('eggs')))
>>> doc = Doc(p1, p2, p3)
>>> doc.content
ListContainer(Para(Str(Spam) Space Emph(Str(and) Space Str(eggs))) Para(Str(eggs)) Plain(Emph(Str(eggs))))
>>> doc.replace_keyword('eggs', Str('ham'))
>>> doc.content
ListContainer(Para(Str(Spam) Space Emph(Str(and) Space Str(ham))) Para(Str(ham)) Plain(Emph(Str(ham))))
>>> doc.replace_keyword(keyword='ham', replacement=Para(Str('spam')))
>>> doc.content
ListContainer(Para(Str(Spam) Space Emph(Str(and) Space Str(ham))) Para(Str(spam)) Para(Str(spam)))
:param keyword: string that will be searched (cannot have spaces!)
:type keyword: :class:`str`
:param replacement: element that will be placed in turn of the ``Str``
element that contains the keyword.
:type replacement: :class:`.Element`
:param count: number of occurrences that will be replaced.
If count is not given or is set to zero, all occurrences
will be replaced.
:type count: :class:`int`
"""
def replace_with_inline(e, doc):
if type(e) == Str and e.text == keyword:
doc.num_matches += 1
if not count or doc.num_matches <= count:
return replacement
def replace_with_block(e, doc):
if hasattr(e, 'content') and len(e.content) == 1:
ee = e.content[0]
if type(ee) == Str and ee.text == keyword:
if isinstance(e, Block):
doc.num_matches += 1
if not count or doc.num_matches <= count:
return replacement
elif isinstance(e, Inline):
return Str(keyword)
doc = self.doc
if doc is None:
raise Exception('No root document')
doc.num_matches = 0
if isinstance(replacement, Inline):
return self.walk(replace_with_inline, doc)
elif isinstance(replacement, Block):
return self.walk(replace_with_block, doc)
else:
raise NotImplementedError(type(replacement))
# Bind the method
Element.replace_keyword = _replace_keyword
def get_option(options=None, local_tag=None, doc=None, doc_tag=None, default=None, error_on_none=True):
""" fetch an option variable,
from either a local (element) level option/attribute tag,
document level metadata tag,
or a default
:type options: ``dict``
:type local_tag: ``str``
:type doc: :class:`Doc`
:type doc_tag: ``str``
:type default: ``any``
:type error_on_none: ``bool``
The order of preference is local > document > default,
although if a local or document tag returns None, then the next level down is used.
Also, if error_on_none=True and the final variable is None, then a ValueError will be raised
In this manner you can set global variables, which can be optionally overriden at a local level.
For example, to apply different styles to docx text
main.md:
------------------
style-div:
name: MyStyle
------------------
:::style
some text
:::
::: {.style name=MyOtherStyle}
some more text
:::
style_filter.py:
import panflute as pf
def action(elem, doc):
if type(elem) == pf.Div:
style = pf.get_option(elem.attributes, "name", doc, "style-div.name")
elem.attributes["custom-style"] = style
def main(doc=None):
return run_filter(action, doc=doc)
if __name__ == "__main__":
main()
"""
variable = None
# element level
if options is not None and local_tag is not None:
if local_tag in options and options[local_tag] is not None:
variable = options[local_tag]
if variable is not None:
return variable
# doc level
if doc is not None and doc_tag is not None:
variable = doc.get_metadata(doc_tag, None)
if variable is not None:
return variable
# default level
variable = default
if variable is None and error_on_none:
raise ValueError("could not retrieve a value for tag; local={0}, doc={1}".format(local_tag, doc_tag))
return variable
|
sergiocorreia/panflute | panflute/tools.py | run_pandoc | python | def run_pandoc(text='', args=None):
if args is None:
args = []
pandoc_path = which('pandoc')
if pandoc_path is None or not os.path.exists(pandoc_path):
raise OSError("Path to pandoc executable does not exists")
proc = Popen([pandoc_path] + args, stdin=PIPE, stdout=PIPE, stderr=PIPE)
out, err = proc.communicate(input=text.encode('utf-8'))
exitcode = proc.returncode
if exitcode != 0:
raise IOError(err)
return out.decode('utf-8') | Low level function that calls Pandoc with (optionally)
some input text and/or arguments | train | https://github.com/sergiocorreia/panflute/blob/65c2d570c26a190deb600cab5e2ad8a828a3302e/panflute/tools.py#L319-L337 | null | # ---------------------------
# Imports
# ---------------------------
from .base import Element
from .elements import *
from .io import dump
import io
import os
import re
import sys
import json
import yaml
import shlex
# shutil.which: new in version 3.3
try:
from shutil import which
except ImportError:
from shutilwhich import which
from subprocess import Popen, PIPE
from functools import partial
py2 = sys.version_info[0] == 2
if py2: str = basestring
# ---------------------------
# Constants
# ---------------------------
HorizontalSpaces = (Space, LineBreak, SoftBreak)
VerticalSpaces = (Para, )
# ---------------------------
# Convenience functions
# ---------------------------
def yaml_filter(element, doc, tag=None, function=None, tags=None,
strict_yaml=False):
'''
Convenience function for parsing code blocks with YAML options
This function is useful to create a filter that applies to
code blocks that have specific classes.
It is used as an argument of ``run_filter``, with two additional options:
``tag`` and ``function``.
Using this is equivalent to having filter functions that:
1. Check if the element is a code block
2. Check if the element belongs to a specific class
3. Split the YAML options (at the beginning of the block, by looking
for ``...`` or ``---`` strings in a separate line
4. Parse the YAML
5. Use the YAML options and (optionally) the data that follows the YAML
to return a new or modified element
Instead, you just need to:
1. Call ``run_filter`` with ``yaml_filter`` as the action function, and
with the additional arguments ``tag`` and ``function``
2. Construct a ``fenced_action`` function that takes four arguments:
(options, data, element, doc). Note that options is a dict and data
is a raw string. Notice that this is similar to the ``action``
functions of standard filters, but with *options* and *data* as the
new ones.
Note: if you want to apply multiple functions to separate classes,
you can use the ``tags`` argument, which receives a dict of
``tag: function`` pairs.
Note: use the ``strict_yaml=True`` option in order to allow for more verbose
but flexible YAML metadata: more than one YAML blocks are allowed, but
they all must start with ``---`` (even at the beginning) and end with
``---`` or ``...``. Also, YAML is not the default content
when no delimiters are set.
Example::
"""
Replace code blocks of class 'foo' with # horizontal rules
"""
import panflute as pf
def fenced_action(options, data, element, doc):
count = options.get('count', 1)
div = pf.Div(attributes={'count': str(count)})
div.content.extend([pf.HorizontalRule] * count)
return div
if __name__ == '__main__':
pf.run_filter(pf.yaml_filter, tag='foo', function=fenced_action)
'''
# Allow for either tag+function or a dict {tag: function}
assert (tag is None) + (tags is None) == 1 # XOR
if tags is None:
tags = {tag: function}
if type(element) == CodeBlock:
for tag in tags:
if tag in element.classes:
function = tags[tag]
if not strict_yaml:
# Split YAML and data parts (separated by ... or ---)
raw = re.split("^([.]{3,}|[-]{3,})$",
element.text, 1, re.MULTILINE)
data = raw[2] if len(raw) > 2 else ''
data = data.lstrip('\n')
raw = raw[0]
try:
options = yaml.safe_load(raw)
except yaml.scanner.ScannerError:
debug("panflute: malformed YAML block")
return
if options is None:
options = {}
else:
options = {}
data = []
raw = re.split("^([.]{3,}|[-]{3,})$",
element.text, 0, re.MULTILINE)
rawmode = True
for chunk in raw:
chunk = chunk.strip('\n')
if not chunk:
continue
if rawmode:
if chunk.startswith('---'):
rawmode = False
else:
data.append(chunk)
else:
if chunk.startswith('---') or chunk.startswith('...'):
rawmode = True
else:
try:
options.update(yaml.safe_load(chunk))
except yaml.scanner.ScannerError:
debug("panflute: malformed YAML block")
return
data = '\n'.join(data)
return function(options=options, data=data,
element=element, doc=doc)
def debug(*args, **kwargs):
"""
Same as print, but prints to ``stderr``
(which is not intercepted by Pandoc).
"""
print(file=sys.stderr, *args, **kwargs)
# ---------------------------
# Functions that extract content
# ---------------------------
def stringify(element, newlines=True):
"""
Return the raw text version of an elements (and its children element).
Example:
>>> from panflute import *
>>> e1 = Emph(Str('Hello'), Space, Str('world!'))
>>> e2 = Strong(Str('Bye!'))
>>> para = Para(e1, Space, e2)
>>> stringify(para)
'Hello world! Bye!\n\n'
:param newlines: add a new line after a paragraph (default True)
:type newlines: :class:`bool`
:rtype: :class:`str`
"""
def attach_str(e, doc, answer):
if hasattr(e, 'text'):
ans = e.text
elif isinstance(e, HorizontalSpaces):
ans = ' '
elif isinstance(e, VerticalSpaces) and newlines:
ans = '\n\n'
elif type(e) == Citation:
ans = ''
else:
ans = ''
answer.append(ans)
answer = []
f = partial(attach_str, answer=answer)
element.walk(f)
return ''.join(answer)
def _get_metadata(self, key='', default=None, builtin=True):
"""
get_metadata([key, default, simple])
Retrieve metadata with nested keys separated by dots.
This is useful to avoid repeatedly checking if a dict exists, as
the frontmatter might not have the keys that we expect.
With ``builtin=True`` (the default), it will convert the results to
built-in Python types, instead of :class:`.MetaValue` elements. EG: instead of returning a MetaBool it will return True|False.
:param key: string with the keys separated by a dot (``key1.key2``). Default is an empty string (which returns the entire metadata dict)
:type key: ``str``
:param default: return value in case the key is not found (default is ``None``)
:param builtin: If True, return built-in Python types (default is ``True``)
:Example:
>>> doc.metadata['format']['show-frame'] = True
>>> # ...
>>> # afterwards:
>>> show_frame = doc.get_metadata('format.show-frame', False)
>>> stata_path = doc.get_metadata('media.path.figures', '.')
"""
# Retrieve metadata
assert isinstance(key, str)
meta = self.metadata
# Retrieve specific key
if key:
for k in key.split('.'):
if isinstance(meta, MetaMap) and k in meta.content:
meta = meta[k]
else:
return default
# Stringify contents
return meta2builtin(meta) if builtin else meta
def meta2builtin(meta):
if isinstance(meta, MetaBool):
return meta.boolean
elif isinstance(meta, MetaString):
return meta.text
elif isinstance(meta, MetaList):
return [meta2builtin(v) for v in meta.content.list]
elif isinstance(meta, MetaMap):
return OrderedDict((k, meta2builtin(v)) for (k, v)
in meta.content.dict.items())
elif isinstance(meta, (MetaInlines, MetaBlocks)):
return stringify(meta)
else:
debug("MISSING", type(meta))
return meta
# Bind the method
Doc.get_metadata = _get_metadata
# ---------------------------
# Functions that rely on external calls
# ---------------------------
def shell(args, wait=True, msg=None):
"""
Execute the external command and get its exitcode, stdout and stderr.
"""
# Fix Windows error if passed a string
if isinstance(args, str):
args = shlex.split(args, posix=(os.name != "nt"))
if os.name == "nt":
args = [arg.replace('/', '\\') for arg in args]
if wait:
proc = Popen(args, stdin=PIPE, stdout=PIPE, stderr=PIPE)
out, err = proc.communicate(input=msg)
exitcode = proc.returncode
if exitcode != 0:
raise IOError(err)
return out
else:
DETACHED_PROCESS = 0x00000008
proc = Popen(args, creationflags=DETACHED_PROCESS)
#def get_exe_path():
# reg = winreg.ConnectRegistry(None,winreg.HKEY_CLASSES_ROOT)
#
# # Fetch verb linked to the dta extension
# path = '.dta'
# key = winreg.OpenKey(reg, path)
# verb = winreg.QueryValue(key, None) # Alternatives: .dta .do
#
# # Fetch command linked to that verb
# path = '{}\shell\open\command'.format(verb)
# key = winreg.OpenKey(reg, path)
# cmd = winreg.QueryValue(key, None)
# fn = cmd.strip('"').split('"')[0]
# #raise(Exception(fn))
# return fn
#
#def check_correct_executable(fn):
# return os.path.isfile(fn) and 'stata' in fn.lower()
def convert_text(text,
input_format='markdown',
output_format='panflute',
standalone=False,
extra_args=None):
"""
Convert formatted text (usually markdown) by calling Pandoc internally
The default output format ('panflute') will return a tree
of Pandoc elements. When combined with 'standalone=True', the tree root
will be a 'Doc' element.
Example:
>>> from panflute import *
>>> md = 'Some *markdown* **text** ~xyz~'
>>> tex = r'Some $x^y$ or $x_n = \sqrt{a + b}$ \textit{a}'
>>> convert_text(md)
[Para(Str(Some) Space Emph(Str(markdown)) Space Strong(Str(text)) Space Subscript(Str(xyz)))]
>>> convert_text(tex)
[Para(Str(Some) Space Math(x^y; format='InlineMath') Space Str(or) Space Math(x_n = \sqrt{a + b}; format='InlineMath') Space RawInline(\textit{a}; format='tex'))]
:param text: text that will be converted
:type text: :class:`str` | :class:`.Element` | :class:`list` of :class:`.Element`
:param input_format: format of the text (default 'markdown').
Any Pandoc input format is valid, plus 'panflute' (a tree of Pandoc
elements)
:param output_format: format of the output
(default is 'panflute' which creates the tree of Pandoc elements).
Non-binary Pandoc formats are allowed (e.g. markdown, latex is allowed,
but docx and pdf are not).
:param standalone: whether the results will be a standalone document
or not.
:type standalone: :class:`bool`
:param extra_args: extra arguments passed to Pandoc
:type extra_args: :class:`list`
:rtype: :class:`list` | :class:`.Doc` | :class:`str`
Note: for a more general solution,
see `pyandoc <https://github.com/kennethreitz/pyandoc/>`_
by Kenneth Reitz.
"""
if input_format == 'panflute':
# Problem:
# We need a Doc element, but received a list of elements.
# So we wrap-up the list in a Doc, but with what pandoc-api version?
# (remember that Pandoc requires a matching api-version!)
# Workaround: call Pandoc with empty text to get its api-version
if not isinstance(text, Doc):
tmp_doc = convert_text('', standalone=True)
api_version = tmp_doc.api_version
if isinstance(text, Element):
text = [text]
text = Doc(*text, api_version=api_version)
# Dump the Doc into json
with io.StringIO() as f:
dump(text, f)
text = f.getvalue()
in_fmt = 'json' if input_format == 'panflute' else input_format
out_fmt = 'json' if output_format == 'panflute' else output_format
if extra_args is None:
extra_args = []
if standalone:
extra_args.append('--standalone')
out = inner_convert_text(text, in_fmt, out_fmt, extra_args)
if output_format == 'panflute':
out = json.loads(out, object_pairs_hook=from_json)
if standalone:
if not isinstance(out, Doc): # Pandoc 1.7.2 and earlier
metadata, items = out
out = Doc(*items, metadata=metadata)
else:
if isinstance(out, Doc): # Pandoc 1.8 and later
out = out.content.list
else:
out = out[1] # Pandoc 1.7.2 and earlier
return out
def inner_convert_text(text, input_format, output_format, extra_args):
# like convert_text(), but does not support 'panflute' input/output
from_arg = '--from={}'.format(input_format)
to_arg = '--to={}'.format(output_format)
args = [from_arg, to_arg] + extra_args
out = run_pandoc(text, args)
out = "\n".join(out.splitlines()) # Replace \r\n with \n
return out
# ---------------------------
# Functions that modify content
# ---------------------------
def _replace_keyword(self, keyword, replacement, count=0):
"""
replace_keyword(keyword, replacement[, count])
Walk through the element and its children
and look for Str() objects that contains
exactly the keyword. Then, replace it.
Usually applied to an entire document (a :class:`.Doc` element)
Note: If the replacement is a block, it cannot be put in place of
a Str element. As a solution, the closest ancestor (e.g. the parent)
will be replaced instead, but only if possible
(if the parent only has one child).
Example:
>>> from panflute import *
>>> p1 = Para(Str('Spam'), Space, Emph(Str('and'), Space, Str('eggs')))
>>> p2 = Para(Str('eggs'))
>>> p3 = Plain(Emph(Str('eggs')))
>>> doc = Doc(p1, p2, p3)
>>> doc.content
ListContainer(Para(Str(Spam) Space Emph(Str(and) Space Str(eggs))) Para(Str(eggs)) Plain(Emph(Str(eggs))))
>>> doc.replace_keyword('eggs', Str('ham'))
>>> doc.content
ListContainer(Para(Str(Spam) Space Emph(Str(and) Space Str(ham))) Para(Str(ham)) Plain(Emph(Str(ham))))
>>> doc.replace_keyword(keyword='ham', replacement=Para(Str('spam')))
>>> doc.content
ListContainer(Para(Str(Spam) Space Emph(Str(and) Space Str(ham))) Para(Str(spam)) Para(Str(spam)))
:param keyword: string that will be searched (cannot have spaces!)
:type keyword: :class:`str`
:param replacement: element that will be placed in turn of the ``Str``
element that contains the keyword.
:type replacement: :class:`.Element`
:param count: number of occurrences that will be replaced.
If count is not given or is set to zero, all occurrences
will be replaced.
:type count: :class:`int`
"""
def replace_with_inline(e, doc):
if type(e) == Str and e.text == keyword:
doc.num_matches += 1
if not count or doc.num_matches <= count:
return replacement
def replace_with_block(e, doc):
if hasattr(e, 'content') and len(e.content) == 1:
ee = e.content[0]
if type(ee) == Str and ee.text == keyword:
if isinstance(e, Block):
doc.num_matches += 1
if not count or doc.num_matches <= count:
return replacement
elif isinstance(e, Inline):
return Str(keyword)
doc = self.doc
if doc is None:
raise Exception('No root document')
doc.num_matches = 0
if isinstance(replacement, Inline):
return self.walk(replace_with_inline, doc)
elif isinstance(replacement, Block):
return self.walk(replace_with_block, doc)
else:
raise NotImplementedError(type(replacement))
# Bind the method
Element.replace_keyword = _replace_keyword
def get_option(options=None, local_tag=None, doc=None, doc_tag=None, default=None, error_on_none=True):
""" fetch an option variable,
from either a local (element) level option/attribute tag,
document level metadata tag,
or a default
:type options: ``dict``
:type local_tag: ``str``
:type doc: :class:`Doc`
:type doc_tag: ``str``
:type default: ``any``
:type error_on_none: ``bool``
The order of preference is local > document > default,
although if a local or document tag returns None, then the next level down is used.
Also, if error_on_none=True and the final variable is None, then a ValueError will be raised
In this manner you can set global variables, which can be optionally overriden at a local level.
For example, to apply different styles to docx text
main.md:
------------------
style-div:
name: MyStyle
------------------
:::style
some text
:::
::: {.style name=MyOtherStyle}
some more text
:::
style_filter.py:
import panflute as pf
def action(elem, doc):
if type(elem) == pf.Div:
style = pf.get_option(elem.attributes, "name", doc, "style-div.name")
elem.attributes["custom-style"] = style
def main(doc=None):
return run_filter(action, doc=doc)
if __name__ == "__main__":
main()
"""
variable = None
# element level
if options is not None and local_tag is not None:
if local_tag in options and options[local_tag] is not None:
variable = options[local_tag]
if variable is not None:
return variable
# doc level
if doc is not None and doc_tag is not None:
variable = doc.get_metadata(doc_tag, None)
if variable is not None:
return variable
# default level
variable = default
if variable is None and error_on_none:
raise ValueError("could not retrieve a value for tag; local={0}, doc={1}".format(local_tag, doc_tag))
return variable
|
sergiocorreia/panflute | panflute/tools.py | convert_text | python | def convert_text(text,
input_format='markdown',
output_format='panflute',
standalone=False,
extra_args=None):
if input_format == 'panflute':
# Problem:
# We need a Doc element, but received a list of elements.
# So we wrap-up the list in a Doc, but with what pandoc-api version?
# (remember that Pandoc requires a matching api-version!)
# Workaround: call Pandoc with empty text to get its api-version
if not isinstance(text, Doc):
tmp_doc = convert_text('', standalone=True)
api_version = tmp_doc.api_version
if isinstance(text, Element):
text = [text]
text = Doc(*text, api_version=api_version)
# Dump the Doc into json
with io.StringIO() as f:
dump(text, f)
text = f.getvalue()
in_fmt = 'json' if input_format == 'panflute' else input_format
out_fmt = 'json' if output_format == 'panflute' else output_format
if extra_args is None:
extra_args = []
if standalone:
extra_args.append('--standalone')
out = inner_convert_text(text, in_fmt, out_fmt, extra_args)
if output_format == 'panflute':
out = json.loads(out, object_pairs_hook=from_json)
if standalone:
if not isinstance(out, Doc): # Pandoc 1.7.2 and earlier
metadata, items = out
out = Doc(*items, metadata=metadata)
else:
if isinstance(out, Doc): # Pandoc 1.8 and later
out = out.content.list
else:
out = out[1] # Pandoc 1.7.2 and earlier
return out | Convert formatted text (usually markdown) by calling Pandoc internally
The default output format ('panflute') will return a tree
of Pandoc elements. When combined with 'standalone=True', the tree root
will be a 'Doc' element.
Example:
>>> from panflute import *
>>> md = 'Some *markdown* **text** ~xyz~'
>>> tex = r'Some $x^y$ or $x_n = \sqrt{a + b}$ \textit{a}'
>>> convert_text(md)
[Para(Str(Some) Space Emph(Str(markdown)) Space Strong(Str(text)) Space Subscript(Str(xyz)))]
>>> convert_text(tex)
[Para(Str(Some) Space Math(x^y; format='InlineMath') Space Str(or) Space Math(x_n = \sqrt{a + b}; format='InlineMath') Space RawInline(\textit{a}; format='tex'))]
:param text: text that will be converted
:type text: :class:`str` | :class:`.Element` | :class:`list` of :class:`.Element`
:param input_format: format of the text (default 'markdown').
Any Pandoc input format is valid, plus 'panflute' (a tree of Pandoc
elements)
:param output_format: format of the output
(default is 'panflute' which creates the tree of Pandoc elements).
Non-binary Pandoc formats are allowed (e.g. markdown, latex is allowed,
but docx and pdf are not).
:param standalone: whether the results will be a standalone document
or not.
:type standalone: :class:`bool`
:param extra_args: extra arguments passed to Pandoc
:type extra_args: :class:`list`
:rtype: :class:`list` | :class:`.Doc` | :class:`str`
Note: for a more general solution,
see `pyandoc <https://github.com/kennethreitz/pyandoc/>`_
by Kenneth Reitz. | train | https://github.com/sergiocorreia/panflute/blob/65c2d570c26a190deb600cab5e2ad8a828a3302e/panflute/tools.py#L340-L427 | [
"def dump(doc, output_stream=None):\n \"\"\"\n Dump a :class:`.Doc` object into a JSON-encoded text string.\n\n The output will be sent to :data:`sys.stdout` unless an alternative\n text stream is given.\n\n To dump to :data:`sys.stdout` just do:\n\n >>> import panflute as pf\n >>> doc ... | # ---------------------------
# Imports
# ---------------------------
from .base import Element
from .elements import *
from .io import dump
import io
import os
import re
import sys
import json
import yaml
import shlex
# shutil.which: new in version 3.3
try:
from shutil import which
except ImportError:
from shutilwhich import which
from subprocess import Popen, PIPE
from functools import partial
py2 = sys.version_info[0] == 2
if py2: str = basestring
# ---------------------------
# Constants
# ---------------------------
HorizontalSpaces = (Space, LineBreak, SoftBreak)
VerticalSpaces = (Para, )
# ---------------------------
# Convenience functions
# ---------------------------
def yaml_filter(element, doc, tag=None, function=None, tags=None,
strict_yaml=False):
'''
Convenience function for parsing code blocks with YAML options
This function is useful to create a filter that applies to
code blocks that have specific classes.
It is used as an argument of ``run_filter``, with two additional options:
``tag`` and ``function``.
Using this is equivalent to having filter functions that:
1. Check if the element is a code block
2. Check if the element belongs to a specific class
3. Split the YAML options (at the beginning of the block, by looking
for ``...`` or ``---`` strings in a separate line
4. Parse the YAML
5. Use the YAML options and (optionally) the data that follows the YAML
to return a new or modified element
Instead, you just need to:
1. Call ``run_filter`` with ``yaml_filter`` as the action function, and
with the additional arguments ``tag`` and ``function``
2. Construct a ``fenced_action`` function that takes four arguments:
(options, data, element, doc). Note that options is a dict and data
is a raw string. Notice that this is similar to the ``action``
functions of standard filters, but with *options* and *data* as the
new ones.
Note: if you want to apply multiple functions to separate classes,
you can use the ``tags`` argument, which receives a dict of
``tag: function`` pairs.
Note: use the ``strict_yaml=True`` option in order to allow for more verbose
but flexible YAML metadata: more than one YAML blocks are allowed, but
they all must start with ``---`` (even at the beginning) and end with
``---`` or ``...``. Also, YAML is not the default content
when no delimiters are set.
Example::
"""
Replace code blocks of class 'foo' with # horizontal rules
"""
import panflute as pf
def fenced_action(options, data, element, doc):
count = options.get('count', 1)
div = pf.Div(attributes={'count': str(count)})
div.content.extend([pf.HorizontalRule] * count)
return div
if __name__ == '__main__':
pf.run_filter(pf.yaml_filter, tag='foo', function=fenced_action)
'''
# Allow for either tag+function or a dict {tag: function}
assert (tag is None) + (tags is None) == 1 # XOR
if tags is None:
tags = {tag: function}
if type(element) == CodeBlock:
for tag in tags:
if tag in element.classes:
function = tags[tag]
if not strict_yaml:
# Split YAML and data parts (separated by ... or ---)
raw = re.split("^([.]{3,}|[-]{3,})$",
element.text, 1, re.MULTILINE)
data = raw[2] if len(raw) > 2 else ''
data = data.lstrip('\n')
raw = raw[0]
try:
options = yaml.safe_load(raw)
except yaml.scanner.ScannerError:
debug("panflute: malformed YAML block")
return
if options is None:
options = {}
else:
options = {}
data = []
raw = re.split("^([.]{3,}|[-]{3,})$",
element.text, 0, re.MULTILINE)
rawmode = True
for chunk in raw:
chunk = chunk.strip('\n')
if not chunk:
continue
if rawmode:
if chunk.startswith('---'):
rawmode = False
else:
data.append(chunk)
else:
if chunk.startswith('---') or chunk.startswith('...'):
rawmode = True
else:
try:
options.update(yaml.safe_load(chunk))
except yaml.scanner.ScannerError:
debug("panflute: malformed YAML block")
return
data = '\n'.join(data)
return function(options=options, data=data,
element=element, doc=doc)
def debug(*args, **kwargs):
"""
Same as print, but prints to ``stderr``
(which is not intercepted by Pandoc).
"""
print(file=sys.stderr, *args, **kwargs)
# ---------------------------
# Functions that extract content
# ---------------------------
def stringify(element, newlines=True):
"""
Return the raw text version of an elements (and its children element).
Example:
>>> from panflute import *
>>> e1 = Emph(Str('Hello'), Space, Str('world!'))
>>> e2 = Strong(Str('Bye!'))
>>> para = Para(e1, Space, e2)
>>> stringify(para)
'Hello world! Bye!\n\n'
:param newlines: add a new line after a paragraph (default True)
:type newlines: :class:`bool`
:rtype: :class:`str`
"""
def attach_str(e, doc, answer):
if hasattr(e, 'text'):
ans = e.text
elif isinstance(e, HorizontalSpaces):
ans = ' '
elif isinstance(e, VerticalSpaces) and newlines:
ans = '\n\n'
elif type(e) == Citation:
ans = ''
else:
ans = ''
answer.append(ans)
answer = []
f = partial(attach_str, answer=answer)
element.walk(f)
return ''.join(answer)
def _get_metadata(self, key='', default=None, builtin=True):
"""
get_metadata([key, default, simple])
Retrieve metadata with nested keys separated by dots.
This is useful to avoid repeatedly checking if a dict exists, as
the frontmatter might not have the keys that we expect.
With ``builtin=True`` (the default), it will convert the results to
built-in Python types, instead of :class:`.MetaValue` elements. EG: instead of returning a MetaBool it will return True|False.
:param key: string with the keys separated by a dot (``key1.key2``). Default is an empty string (which returns the entire metadata dict)
:type key: ``str``
:param default: return value in case the key is not found (default is ``None``)
:param builtin: If True, return built-in Python types (default is ``True``)
:Example:
>>> doc.metadata['format']['show-frame'] = True
>>> # ...
>>> # afterwards:
>>> show_frame = doc.get_metadata('format.show-frame', False)
>>> stata_path = doc.get_metadata('media.path.figures', '.')
"""
# Retrieve metadata
assert isinstance(key, str)
meta = self.metadata
# Retrieve specific key
if key:
for k in key.split('.'):
if isinstance(meta, MetaMap) and k in meta.content:
meta = meta[k]
else:
return default
# Stringify contents
return meta2builtin(meta) if builtin else meta
def meta2builtin(meta):
if isinstance(meta, MetaBool):
return meta.boolean
elif isinstance(meta, MetaString):
return meta.text
elif isinstance(meta, MetaList):
return [meta2builtin(v) for v in meta.content.list]
elif isinstance(meta, MetaMap):
return OrderedDict((k, meta2builtin(v)) for (k, v)
in meta.content.dict.items())
elif isinstance(meta, (MetaInlines, MetaBlocks)):
return stringify(meta)
else:
debug("MISSING", type(meta))
return meta
# Bind the method
Doc.get_metadata = _get_metadata
# ---------------------------
# Functions that rely on external calls
# ---------------------------
def shell(args, wait=True, msg=None):
"""
Execute the external command and get its exitcode, stdout and stderr.
"""
# Fix Windows error if passed a string
if isinstance(args, str):
args = shlex.split(args, posix=(os.name != "nt"))
if os.name == "nt":
args = [arg.replace('/', '\\') for arg in args]
if wait:
proc = Popen(args, stdin=PIPE, stdout=PIPE, stderr=PIPE)
out, err = proc.communicate(input=msg)
exitcode = proc.returncode
if exitcode != 0:
raise IOError(err)
return out
else:
DETACHED_PROCESS = 0x00000008
proc = Popen(args, creationflags=DETACHED_PROCESS)
#def get_exe_path():
# reg = winreg.ConnectRegistry(None,winreg.HKEY_CLASSES_ROOT)
#
# # Fetch verb linked to the dta extension
# path = '.dta'
# key = winreg.OpenKey(reg, path)
# verb = winreg.QueryValue(key, None) # Alternatives: .dta .do
#
# # Fetch command linked to that verb
# path = '{}\shell\open\command'.format(verb)
# key = winreg.OpenKey(reg, path)
# cmd = winreg.QueryValue(key, None)
# fn = cmd.strip('"').split('"')[0]
# #raise(Exception(fn))
# return fn
#
#def check_correct_executable(fn):
# return os.path.isfile(fn) and 'stata' in fn.lower()
def run_pandoc(text='', args=None):
"""
Low level function that calls Pandoc with (optionally)
some input text and/or arguments
"""
if args is None:
args = []
pandoc_path = which('pandoc')
if pandoc_path is None or not os.path.exists(pandoc_path):
raise OSError("Path to pandoc executable does not exists")
proc = Popen([pandoc_path] + args, stdin=PIPE, stdout=PIPE, stderr=PIPE)
out, err = proc.communicate(input=text.encode('utf-8'))
exitcode = proc.returncode
if exitcode != 0:
raise IOError(err)
return out.decode('utf-8')
def inner_convert_text(text, input_format, output_format, extra_args):
# like convert_text(), but does not support 'panflute' input/output
from_arg = '--from={}'.format(input_format)
to_arg = '--to={}'.format(output_format)
args = [from_arg, to_arg] + extra_args
out = run_pandoc(text, args)
out = "\n".join(out.splitlines()) # Replace \r\n with \n
return out
# ---------------------------
# Functions that modify content
# ---------------------------
def _replace_keyword(self, keyword, replacement, count=0):
"""
replace_keyword(keyword, replacement[, count])
Walk through the element and its children
and look for Str() objects that contains
exactly the keyword. Then, replace it.
Usually applied to an entire document (a :class:`.Doc` element)
Note: If the replacement is a block, it cannot be put in place of
a Str element. As a solution, the closest ancestor (e.g. the parent)
will be replaced instead, but only if possible
(if the parent only has one child).
Example:
>>> from panflute import *
>>> p1 = Para(Str('Spam'), Space, Emph(Str('and'), Space, Str('eggs')))
>>> p2 = Para(Str('eggs'))
>>> p3 = Plain(Emph(Str('eggs')))
>>> doc = Doc(p1, p2, p3)
>>> doc.content
ListContainer(Para(Str(Spam) Space Emph(Str(and) Space Str(eggs))) Para(Str(eggs)) Plain(Emph(Str(eggs))))
>>> doc.replace_keyword('eggs', Str('ham'))
>>> doc.content
ListContainer(Para(Str(Spam) Space Emph(Str(and) Space Str(ham))) Para(Str(ham)) Plain(Emph(Str(ham))))
>>> doc.replace_keyword(keyword='ham', replacement=Para(Str('spam')))
>>> doc.content
ListContainer(Para(Str(Spam) Space Emph(Str(and) Space Str(ham))) Para(Str(spam)) Para(Str(spam)))
:param keyword: string that will be searched (cannot have spaces!)
:type keyword: :class:`str`
:param replacement: element that will be placed in turn of the ``Str``
element that contains the keyword.
:type replacement: :class:`.Element`
:param count: number of occurrences that will be replaced.
If count is not given or is set to zero, all occurrences
will be replaced.
:type count: :class:`int`
"""
def replace_with_inline(e, doc):
if type(e) == Str and e.text == keyword:
doc.num_matches += 1
if not count or doc.num_matches <= count:
return replacement
def replace_with_block(e, doc):
if hasattr(e, 'content') and len(e.content) == 1:
ee = e.content[0]
if type(ee) == Str and ee.text == keyword:
if isinstance(e, Block):
doc.num_matches += 1
if not count or doc.num_matches <= count:
return replacement
elif isinstance(e, Inline):
return Str(keyword)
doc = self.doc
if doc is None:
raise Exception('No root document')
doc.num_matches = 0
if isinstance(replacement, Inline):
return self.walk(replace_with_inline, doc)
elif isinstance(replacement, Block):
return self.walk(replace_with_block, doc)
else:
raise NotImplementedError(type(replacement))
# Bind the method
Element.replace_keyword = _replace_keyword
def get_option(options=None, local_tag=None, doc=None, doc_tag=None, default=None, error_on_none=True):
""" fetch an option variable,
from either a local (element) level option/attribute tag,
document level metadata tag,
or a default
:type options: ``dict``
:type local_tag: ``str``
:type doc: :class:`Doc`
:type doc_tag: ``str``
:type default: ``any``
:type error_on_none: ``bool``
The order of preference is local > document > default,
although if a local or document tag returns None, then the next level down is used.
Also, if error_on_none=True and the final variable is None, then a ValueError will be raised
In this manner you can set global variables, which can be optionally overriden at a local level.
For example, to apply different styles to docx text
main.md:
------------------
style-div:
name: MyStyle
------------------
:::style
some text
:::
::: {.style name=MyOtherStyle}
some more text
:::
style_filter.py:
import panflute as pf
def action(elem, doc):
if type(elem) == pf.Div:
style = pf.get_option(elem.attributes, "name", doc, "style-div.name")
elem.attributes["custom-style"] = style
def main(doc=None):
return run_filter(action, doc=doc)
if __name__ == "__main__":
main()
"""
variable = None
# element level
if options is not None and local_tag is not None:
if local_tag in options and options[local_tag] is not None:
variable = options[local_tag]
if variable is not None:
return variable
# doc level
if doc is not None and doc_tag is not None:
variable = doc.get_metadata(doc_tag, None)
if variable is not None:
return variable
# default level
variable = default
if variable is None and error_on_none:
raise ValueError("could not retrieve a value for tag; local={0}, doc={1}".format(local_tag, doc_tag))
return variable
|
sergiocorreia/panflute | panflute/tools.py | _replace_keyword | python | def _replace_keyword(self, keyword, replacement, count=0):
def replace_with_inline(e, doc):
if type(e) == Str and e.text == keyword:
doc.num_matches += 1
if not count or doc.num_matches <= count:
return replacement
def replace_with_block(e, doc):
if hasattr(e, 'content') and len(e.content) == 1:
ee = e.content[0]
if type(ee) == Str and ee.text == keyword:
if isinstance(e, Block):
doc.num_matches += 1
if not count or doc.num_matches <= count:
return replacement
elif isinstance(e, Inline):
return Str(keyword)
doc = self.doc
if doc is None:
raise Exception('No root document')
doc.num_matches = 0
if isinstance(replacement, Inline):
return self.walk(replace_with_inline, doc)
elif isinstance(replacement, Block):
return self.walk(replace_with_block, doc)
else:
raise NotImplementedError(type(replacement)) | replace_keyword(keyword, replacement[, count])
Walk through the element and its children
and look for Str() objects that contains
exactly the keyword. Then, replace it.
Usually applied to an entire document (a :class:`.Doc` element)
Note: If the replacement is a block, it cannot be put in place of
a Str element. As a solution, the closest ancestor (e.g. the parent)
will be replaced instead, but only if possible
(if the parent only has one child).
Example:
>>> from panflute import *
>>> p1 = Para(Str('Spam'), Space, Emph(Str('and'), Space, Str('eggs')))
>>> p2 = Para(Str('eggs'))
>>> p3 = Plain(Emph(Str('eggs')))
>>> doc = Doc(p1, p2, p3)
>>> doc.content
ListContainer(Para(Str(Spam) Space Emph(Str(and) Space Str(eggs))) Para(Str(eggs)) Plain(Emph(Str(eggs))))
>>> doc.replace_keyword('eggs', Str('ham'))
>>> doc.content
ListContainer(Para(Str(Spam) Space Emph(Str(and) Space Str(ham))) Para(Str(ham)) Plain(Emph(Str(ham))))
>>> doc.replace_keyword(keyword='ham', replacement=Para(Str('spam')))
>>> doc.content
ListContainer(Para(Str(Spam) Space Emph(Str(and) Space Str(ham))) Para(Str(spam)) Para(Str(spam)))
:param keyword: string that will be searched (cannot have spaces!)
:type keyword: :class:`str`
:param replacement: element that will be placed in turn of the ``Str``
element that contains the keyword.
:type replacement: :class:`.Element`
:param count: number of occurrences that will be replaced.
If count is not given or is set to zero, all occurrences
will be replaced.
:type count: :class:`int` | train | https://github.com/sergiocorreia/panflute/blob/65c2d570c26a190deb600cab5e2ad8a828a3302e/panflute/tools.py#L444-L512 | null | # ---------------------------
# Imports
# ---------------------------
from .base import Element
from .elements import *
from .io import dump
import io
import os
import re
import sys
import json
import yaml
import shlex
# shutil.which: new in version 3.3
try:
from shutil import which
except ImportError:
from shutilwhich import which
from subprocess import Popen, PIPE
from functools import partial
py2 = sys.version_info[0] == 2
if py2: str = basestring
# ---------------------------
# Constants
# ---------------------------
HorizontalSpaces = (Space, LineBreak, SoftBreak)
VerticalSpaces = (Para, )
# ---------------------------
# Convenience functions
# ---------------------------
def yaml_filter(element, doc, tag=None, function=None, tags=None,
strict_yaml=False):
'''
Convenience function for parsing code blocks with YAML options
This function is useful to create a filter that applies to
code blocks that have specific classes.
It is used as an argument of ``run_filter``, with two additional options:
``tag`` and ``function``.
Using this is equivalent to having filter functions that:
1. Check if the element is a code block
2. Check if the element belongs to a specific class
3. Split the YAML options (at the beginning of the block, by looking
for ``...`` or ``---`` strings in a separate line
4. Parse the YAML
5. Use the YAML options and (optionally) the data that follows the YAML
to return a new or modified element
Instead, you just need to:
1. Call ``run_filter`` with ``yaml_filter`` as the action function, and
with the additional arguments ``tag`` and ``function``
2. Construct a ``fenced_action`` function that takes four arguments:
(options, data, element, doc). Note that options is a dict and data
is a raw string. Notice that this is similar to the ``action``
functions of standard filters, but with *options* and *data* as the
new ones.
Note: if you want to apply multiple functions to separate classes,
you can use the ``tags`` argument, which receives a dict of
``tag: function`` pairs.
Note: use the ``strict_yaml=True`` option in order to allow for more verbose
but flexible YAML metadata: more than one YAML blocks are allowed, but
they all must start with ``---`` (even at the beginning) and end with
``---`` or ``...``. Also, YAML is not the default content
when no delimiters are set.
Example::
"""
Replace code blocks of class 'foo' with # horizontal rules
"""
import panflute as pf
def fenced_action(options, data, element, doc):
count = options.get('count', 1)
div = pf.Div(attributes={'count': str(count)})
div.content.extend([pf.HorizontalRule] * count)
return div
if __name__ == '__main__':
pf.run_filter(pf.yaml_filter, tag='foo', function=fenced_action)
'''
# Allow for either tag+function or a dict {tag: function}
assert (tag is None) + (tags is None) == 1 # XOR
if tags is None:
tags = {tag: function}
if type(element) == CodeBlock:
for tag in tags:
if tag in element.classes:
function = tags[tag]
if not strict_yaml:
# Split YAML and data parts (separated by ... or ---)
raw = re.split("^([.]{3,}|[-]{3,})$",
element.text, 1, re.MULTILINE)
data = raw[2] if len(raw) > 2 else ''
data = data.lstrip('\n')
raw = raw[0]
try:
options = yaml.safe_load(raw)
except yaml.scanner.ScannerError:
debug("panflute: malformed YAML block")
return
if options is None:
options = {}
else:
options = {}
data = []
raw = re.split("^([.]{3,}|[-]{3,})$",
element.text, 0, re.MULTILINE)
rawmode = True
for chunk in raw:
chunk = chunk.strip('\n')
if not chunk:
continue
if rawmode:
if chunk.startswith('---'):
rawmode = False
else:
data.append(chunk)
else:
if chunk.startswith('---') or chunk.startswith('...'):
rawmode = True
else:
try:
options.update(yaml.safe_load(chunk))
except yaml.scanner.ScannerError:
debug("panflute: malformed YAML block")
return
data = '\n'.join(data)
return function(options=options, data=data,
element=element, doc=doc)
def debug(*args, **kwargs):
"""
Same as print, but prints to ``stderr``
(which is not intercepted by Pandoc).
"""
print(file=sys.stderr, *args, **kwargs)
# ---------------------------
# Functions that extract content
# ---------------------------
def stringify(element, newlines=True):
"""
Return the raw text version of an elements (and its children element).
Example:
>>> from panflute import *
>>> e1 = Emph(Str('Hello'), Space, Str('world!'))
>>> e2 = Strong(Str('Bye!'))
>>> para = Para(e1, Space, e2)
>>> stringify(para)
'Hello world! Bye!\n\n'
:param newlines: add a new line after a paragraph (default True)
:type newlines: :class:`bool`
:rtype: :class:`str`
"""
def attach_str(e, doc, answer):
if hasattr(e, 'text'):
ans = e.text
elif isinstance(e, HorizontalSpaces):
ans = ' '
elif isinstance(e, VerticalSpaces) and newlines:
ans = '\n\n'
elif type(e) == Citation:
ans = ''
else:
ans = ''
answer.append(ans)
answer = []
f = partial(attach_str, answer=answer)
element.walk(f)
return ''.join(answer)
def _get_metadata(self, key='', default=None, builtin=True):
"""
get_metadata([key, default, simple])
Retrieve metadata with nested keys separated by dots.
This is useful to avoid repeatedly checking if a dict exists, as
the frontmatter might not have the keys that we expect.
With ``builtin=True`` (the default), it will convert the results to
built-in Python types, instead of :class:`.MetaValue` elements. EG: instead of returning a MetaBool it will return True|False.
:param key: string with the keys separated by a dot (``key1.key2``). Default is an empty string (which returns the entire metadata dict)
:type key: ``str``
:param default: return value in case the key is not found (default is ``None``)
:param builtin: If True, return built-in Python types (default is ``True``)
:Example:
>>> doc.metadata['format']['show-frame'] = True
>>> # ...
>>> # afterwards:
>>> show_frame = doc.get_metadata('format.show-frame', False)
>>> stata_path = doc.get_metadata('media.path.figures', '.')
"""
# Retrieve metadata
assert isinstance(key, str)
meta = self.metadata
# Retrieve specific key
if key:
for k in key.split('.'):
if isinstance(meta, MetaMap) and k in meta.content:
meta = meta[k]
else:
return default
# Stringify contents
return meta2builtin(meta) if builtin else meta
def meta2builtin(meta):
if isinstance(meta, MetaBool):
return meta.boolean
elif isinstance(meta, MetaString):
return meta.text
elif isinstance(meta, MetaList):
return [meta2builtin(v) for v in meta.content.list]
elif isinstance(meta, MetaMap):
return OrderedDict((k, meta2builtin(v)) for (k, v)
in meta.content.dict.items())
elif isinstance(meta, (MetaInlines, MetaBlocks)):
return stringify(meta)
else:
debug("MISSING", type(meta))
return meta
# Bind the method
Doc.get_metadata = _get_metadata
# ---------------------------
# Functions that rely on external calls
# ---------------------------
def shell(args, wait=True, msg=None):
"""
Execute the external command and get its exitcode, stdout and stderr.
"""
# Fix Windows error if passed a string
if isinstance(args, str):
args = shlex.split(args, posix=(os.name != "nt"))
if os.name == "nt":
args = [arg.replace('/', '\\') for arg in args]
if wait:
proc = Popen(args, stdin=PIPE, stdout=PIPE, stderr=PIPE)
out, err = proc.communicate(input=msg)
exitcode = proc.returncode
if exitcode != 0:
raise IOError(err)
return out
else:
DETACHED_PROCESS = 0x00000008
proc = Popen(args, creationflags=DETACHED_PROCESS)
#def get_exe_path():
# reg = winreg.ConnectRegistry(None,winreg.HKEY_CLASSES_ROOT)
#
# # Fetch verb linked to the dta extension
# path = '.dta'
# key = winreg.OpenKey(reg, path)
# verb = winreg.QueryValue(key, None) # Alternatives: .dta .do
#
# # Fetch command linked to that verb
# path = '{}\shell\open\command'.format(verb)
# key = winreg.OpenKey(reg, path)
# cmd = winreg.QueryValue(key, None)
# fn = cmd.strip('"').split('"')[0]
# #raise(Exception(fn))
# return fn
#
#def check_correct_executable(fn):
# return os.path.isfile(fn) and 'stata' in fn.lower()
def run_pandoc(text='', args=None):
"""
Low level function that calls Pandoc with (optionally)
some input text and/or arguments
"""
if args is None:
args = []
pandoc_path = which('pandoc')
if pandoc_path is None or not os.path.exists(pandoc_path):
raise OSError("Path to pandoc executable does not exists")
proc = Popen([pandoc_path] + args, stdin=PIPE, stdout=PIPE, stderr=PIPE)
out, err = proc.communicate(input=text.encode('utf-8'))
exitcode = proc.returncode
if exitcode != 0:
raise IOError(err)
return out.decode('utf-8')
def convert_text(text,
input_format='markdown',
output_format='panflute',
standalone=False,
extra_args=None):
"""
Convert formatted text (usually markdown) by calling Pandoc internally
The default output format ('panflute') will return a tree
of Pandoc elements. When combined with 'standalone=True', the tree root
will be a 'Doc' element.
Example:
>>> from panflute import *
>>> md = 'Some *markdown* **text** ~xyz~'
>>> tex = r'Some $x^y$ or $x_n = \sqrt{a + b}$ \textit{a}'
>>> convert_text(md)
[Para(Str(Some) Space Emph(Str(markdown)) Space Strong(Str(text)) Space Subscript(Str(xyz)))]
>>> convert_text(tex)
[Para(Str(Some) Space Math(x^y; format='InlineMath') Space Str(or) Space Math(x_n = \sqrt{a + b}; format='InlineMath') Space RawInline(\textit{a}; format='tex'))]
:param text: text that will be converted
:type text: :class:`str` | :class:`.Element` | :class:`list` of :class:`.Element`
:param input_format: format of the text (default 'markdown').
Any Pandoc input format is valid, plus 'panflute' (a tree of Pandoc
elements)
:param output_format: format of the output
(default is 'panflute' which creates the tree of Pandoc elements).
Non-binary Pandoc formats are allowed (e.g. markdown, latex is allowed,
but docx and pdf are not).
:param standalone: whether the results will be a standalone document
or not.
:type standalone: :class:`bool`
:param extra_args: extra arguments passed to Pandoc
:type extra_args: :class:`list`
:rtype: :class:`list` | :class:`.Doc` | :class:`str`
Note: for a more general solution,
see `pyandoc <https://github.com/kennethreitz/pyandoc/>`_
by Kenneth Reitz.
"""
if input_format == 'panflute':
# Problem:
# We need a Doc element, but received a list of elements.
# So we wrap-up the list in a Doc, but with what pandoc-api version?
# (remember that Pandoc requires a matching api-version!)
# Workaround: call Pandoc with empty text to get its api-version
if not isinstance(text, Doc):
tmp_doc = convert_text('', standalone=True)
api_version = tmp_doc.api_version
if isinstance(text, Element):
text = [text]
text = Doc(*text, api_version=api_version)
# Dump the Doc into json
with io.StringIO() as f:
dump(text, f)
text = f.getvalue()
in_fmt = 'json' if input_format == 'panflute' else input_format
out_fmt = 'json' if output_format == 'panflute' else output_format
if extra_args is None:
extra_args = []
if standalone:
extra_args.append('--standalone')
out = inner_convert_text(text, in_fmt, out_fmt, extra_args)
if output_format == 'panflute':
out = json.loads(out, object_pairs_hook=from_json)
if standalone:
if not isinstance(out, Doc): # Pandoc 1.7.2 and earlier
metadata, items = out
out = Doc(*items, metadata=metadata)
else:
if isinstance(out, Doc): # Pandoc 1.8 and later
out = out.content.list
else:
out = out[1] # Pandoc 1.7.2 and earlier
return out
def inner_convert_text(text, input_format, output_format, extra_args):
# like convert_text(), but does not support 'panflute' input/output
from_arg = '--from={}'.format(input_format)
to_arg = '--to={}'.format(output_format)
args = [from_arg, to_arg] + extra_args
out = run_pandoc(text, args)
out = "\n".join(out.splitlines()) # Replace \r\n with \n
return out
# ---------------------------
# Functions that modify content
# ---------------------------
# Bind the method
Element.replace_keyword = _replace_keyword
def get_option(options=None, local_tag=None, doc=None, doc_tag=None, default=None, error_on_none=True):
""" fetch an option variable,
from either a local (element) level option/attribute tag,
document level metadata tag,
or a default
:type options: ``dict``
:type local_tag: ``str``
:type doc: :class:`Doc`
:type doc_tag: ``str``
:type default: ``any``
:type error_on_none: ``bool``
The order of preference is local > document > default,
although if a local or document tag returns None, then the next level down is used.
Also, if error_on_none=True and the final variable is None, then a ValueError will be raised
In this manner you can set global variables, which can be optionally overriden at a local level.
For example, to apply different styles to docx text
main.md:
------------------
style-div:
name: MyStyle
------------------
:::style
some text
:::
::: {.style name=MyOtherStyle}
some more text
:::
style_filter.py:
import panflute as pf
def action(elem, doc):
if type(elem) == pf.Div:
style = pf.get_option(elem.attributes, "name", doc, "style-div.name")
elem.attributes["custom-style"] = style
def main(doc=None):
return run_filter(action, doc=doc)
if __name__ == "__main__":
main()
"""
variable = None
# element level
if options is not None and local_tag is not None:
if local_tag in options and options[local_tag] is not None:
variable = options[local_tag]
if variable is not None:
return variable
# doc level
if doc is not None and doc_tag is not None:
variable = doc.get_metadata(doc_tag, None)
if variable is not None:
return variable
# default level
variable = default
if variable is None and error_on_none:
raise ValueError("could not retrieve a value for tag; local={0}, doc={1}".format(local_tag, doc_tag))
return variable
|
sergiocorreia/panflute | panflute/tools.py | get_option | python | def get_option(options=None, local_tag=None, doc=None, doc_tag=None, default=None, error_on_none=True):
variable = None
# element level
if options is not None and local_tag is not None:
if local_tag in options and options[local_tag] is not None:
variable = options[local_tag]
if variable is not None:
return variable
# doc level
if doc is not None and doc_tag is not None:
variable = doc.get_metadata(doc_tag, None)
if variable is not None:
return variable
# default level
variable = default
if variable is None and error_on_none:
raise ValueError("could not retrieve a value for tag; local={0}, doc={1}".format(local_tag, doc_tag))
return variable | fetch an option variable,
from either a local (element) level option/attribute tag,
document level metadata tag,
or a default
:type options: ``dict``
:type local_tag: ``str``
:type doc: :class:`Doc`
:type doc_tag: ``str``
:type default: ``any``
:type error_on_none: ``bool``
The order of preference is local > document > default,
although if a local or document tag returns None, then the next level down is used.
Also, if error_on_none=True and the final variable is None, then a ValueError will be raised
In this manner you can set global variables, which can be optionally overriden at a local level.
For example, to apply different styles to docx text
main.md:
------------------
style-div:
name: MyStyle
------------------
:::style
some text
:::
::: {.style name=MyOtherStyle}
some more text
:::
style_filter.py:
import panflute as pf
def action(elem, doc):
if type(elem) == pf.Div:
style = pf.get_option(elem.attributes, "name", doc, "style-div.name")
elem.attributes["custom-style"] = style
def main(doc=None):
return run_filter(action, doc=doc)
if __name__ == "__main__":
main() | train | https://github.com/sergiocorreia/panflute/blob/65c2d570c26a190deb600cab5e2ad8a828a3302e/panflute/tools.py#L518-L587 | null | # ---------------------------
# Imports
# ---------------------------
from .base import Element
from .elements import *
from .io import dump
import io
import os
import re
import sys
import json
import yaml
import shlex
# shutil.which: new in version 3.3
try:
from shutil import which
except ImportError:
from shutilwhich import which
from subprocess import Popen, PIPE
from functools import partial
py2 = sys.version_info[0] == 2
if py2: str = basestring
# ---------------------------
# Constants
# ---------------------------
HorizontalSpaces = (Space, LineBreak, SoftBreak)
VerticalSpaces = (Para, )
# ---------------------------
# Convenience functions
# ---------------------------
def yaml_filter(element, doc, tag=None, function=None, tags=None,
strict_yaml=False):
'''
Convenience function for parsing code blocks with YAML options
This function is useful to create a filter that applies to
code blocks that have specific classes.
It is used as an argument of ``run_filter``, with two additional options:
``tag`` and ``function``.
Using this is equivalent to having filter functions that:
1. Check if the element is a code block
2. Check if the element belongs to a specific class
3. Split the YAML options (at the beginning of the block, by looking
for ``...`` or ``---`` strings in a separate line
4. Parse the YAML
5. Use the YAML options and (optionally) the data that follows the YAML
to return a new or modified element
Instead, you just need to:
1. Call ``run_filter`` with ``yaml_filter`` as the action function, and
with the additional arguments ``tag`` and ``function``
2. Construct a ``fenced_action`` function that takes four arguments:
(options, data, element, doc). Note that options is a dict and data
is a raw string. Notice that this is similar to the ``action``
functions of standard filters, but with *options* and *data* as the
new ones.
Note: if you want to apply multiple functions to separate classes,
you can use the ``tags`` argument, which receives a dict of
``tag: function`` pairs.
Note: use the ``strict_yaml=True`` option in order to allow for more verbose
but flexible YAML metadata: more than one YAML blocks are allowed, but
they all must start with ``---`` (even at the beginning) and end with
``---`` or ``...``. Also, YAML is not the default content
when no delimiters are set.
Example::
"""
Replace code blocks of class 'foo' with # horizontal rules
"""
import panflute as pf
def fenced_action(options, data, element, doc):
count = options.get('count', 1)
div = pf.Div(attributes={'count': str(count)})
div.content.extend([pf.HorizontalRule] * count)
return div
if __name__ == '__main__':
pf.run_filter(pf.yaml_filter, tag='foo', function=fenced_action)
'''
# Allow for either tag+function or a dict {tag: function}
assert (tag is None) + (tags is None) == 1 # XOR
if tags is None:
tags = {tag: function}
if type(element) == CodeBlock:
for tag in tags:
if tag in element.classes:
function = tags[tag]
if not strict_yaml:
# Split YAML and data parts (separated by ... or ---)
raw = re.split("^([.]{3,}|[-]{3,})$",
element.text, 1, re.MULTILINE)
data = raw[2] if len(raw) > 2 else ''
data = data.lstrip('\n')
raw = raw[0]
try:
options = yaml.safe_load(raw)
except yaml.scanner.ScannerError:
debug("panflute: malformed YAML block")
return
if options is None:
options = {}
else:
options = {}
data = []
raw = re.split("^([.]{3,}|[-]{3,})$",
element.text, 0, re.MULTILINE)
rawmode = True
for chunk in raw:
chunk = chunk.strip('\n')
if not chunk:
continue
if rawmode:
if chunk.startswith('---'):
rawmode = False
else:
data.append(chunk)
else:
if chunk.startswith('---') or chunk.startswith('...'):
rawmode = True
else:
try:
options.update(yaml.safe_load(chunk))
except yaml.scanner.ScannerError:
debug("panflute: malformed YAML block")
return
data = '\n'.join(data)
return function(options=options, data=data,
element=element, doc=doc)
def debug(*args, **kwargs):
"""
Same as print, but prints to ``stderr``
(which is not intercepted by Pandoc).
"""
print(file=sys.stderr, *args, **kwargs)
# ---------------------------
# Functions that extract content
# ---------------------------
def stringify(element, newlines=True):
"""
Return the raw text version of an elements (and its children element).
Example:
>>> from panflute import *
>>> e1 = Emph(Str('Hello'), Space, Str('world!'))
>>> e2 = Strong(Str('Bye!'))
>>> para = Para(e1, Space, e2)
>>> stringify(para)
'Hello world! Bye!\n\n'
:param newlines: add a new line after a paragraph (default True)
:type newlines: :class:`bool`
:rtype: :class:`str`
"""
def attach_str(e, doc, answer):
if hasattr(e, 'text'):
ans = e.text
elif isinstance(e, HorizontalSpaces):
ans = ' '
elif isinstance(e, VerticalSpaces) and newlines:
ans = '\n\n'
elif type(e) == Citation:
ans = ''
else:
ans = ''
answer.append(ans)
answer = []
f = partial(attach_str, answer=answer)
element.walk(f)
return ''.join(answer)
def _get_metadata(self, key='', default=None, builtin=True):
"""
get_metadata([key, default, simple])
Retrieve metadata with nested keys separated by dots.
This is useful to avoid repeatedly checking if a dict exists, as
the frontmatter might not have the keys that we expect.
With ``builtin=True`` (the default), it will convert the results to
built-in Python types, instead of :class:`.MetaValue` elements. EG: instead of returning a MetaBool it will return True|False.
:param key: string with the keys separated by a dot (``key1.key2``). Default is an empty string (which returns the entire metadata dict)
:type key: ``str``
:param default: return value in case the key is not found (default is ``None``)
:param builtin: If True, return built-in Python types (default is ``True``)
:Example:
>>> doc.metadata['format']['show-frame'] = True
>>> # ...
>>> # afterwards:
>>> show_frame = doc.get_metadata('format.show-frame', False)
>>> stata_path = doc.get_metadata('media.path.figures', '.')
"""
# Retrieve metadata
assert isinstance(key, str)
meta = self.metadata
# Retrieve specific key
if key:
for k in key.split('.'):
if isinstance(meta, MetaMap) and k in meta.content:
meta = meta[k]
else:
return default
# Stringify contents
return meta2builtin(meta) if builtin else meta
def meta2builtin(meta):
if isinstance(meta, MetaBool):
return meta.boolean
elif isinstance(meta, MetaString):
return meta.text
elif isinstance(meta, MetaList):
return [meta2builtin(v) for v in meta.content.list]
elif isinstance(meta, MetaMap):
return OrderedDict((k, meta2builtin(v)) for (k, v)
in meta.content.dict.items())
elif isinstance(meta, (MetaInlines, MetaBlocks)):
return stringify(meta)
else:
debug("MISSING", type(meta))
return meta
# Bind the method
Doc.get_metadata = _get_metadata
# ---------------------------
# Functions that rely on external calls
# ---------------------------
def shell(args, wait=True, msg=None):
"""
Execute the external command and get its exitcode, stdout and stderr.
"""
# Fix Windows error if passed a string
if isinstance(args, str):
args = shlex.split(args, posix=(os.name != "nt"))
if os.name == "nt":
args = [arg.replace('/', '\\') for arg in args]
if wait:
proc = Popen(args, stdin=PIPE, stdout=PIPE, stderr=PIPE)
out, err = proc.communicate(input=msg)
exitcode = proc.returncode
if exitcode != 0:
raise IOError(err)
return out
else:
DETACHED_PROCESS = 0x00000008
proc = Popen(args, creationflags=DETACHED_PROCESS)
#def get_exe_path():
# reg = winreg.ConnectRegistry(None,winreg.HKEY_CLASSES_ROOT)
#
# # Fetch verb linked to the dta extension
# path = '.dta'
# key = winreg.OpenKey(reg, path)
# verb = winreg.QueryValue(key, None) # Alternatives: .dta .do
#
# # Fetch command linked to that verb
# path = '{}\shell\open\command'.format(verb)
# key = winreg.OpenKey(reg, path)
# cmd = winreg.QueryValue(key, None)
# fn = cmd.strip('"').split('"')[0]
# #raise(Exception(fn))
# return fn
#
#def check_correct_executable(fn):
# return os.path.isfile(fn) and 'stata' in fn.lower()
def run_pandoc(text='', args=None):
"""
Low level function that calls Pandoc with (optionally)
some input text and/or arguments
"""
if args is None:
args = []
pandoc_path = which('pandoc')
if pandoc_path is None or not os.path.exists(pandoc_path):
raise OSError("Path to pandoc executable does not exists")
proc = Popen([pandoc_path] + args, stdin=PIPE, stdout=PIPE, stderr=PIPE)
out, err = proc.communicate(input=text.encode('utf-8'))
exitcode = proc.returncode
if exitcode != 0:
raise IOError(err)
return out.decode('utf-8')
def convert_text(text,
input_format='markdown',
output_format='panflute',
standalone=False,
extra_args=None):
"""
Convert formatted text (usually markdown) by calling Pandoc internally
The default output format ('panflute') will return a tree
of Pandoc elements. When combined with 'standalone=True', the tree root
will be a 'Doc' element.
Example:
>>> from panflute import *
>>> md = 'Some *markdown* **text** ~xyz~'
>>> tex = r'Some $x^y$ or $x_n = \sqrt{a + b}$ \textit{a}'
>>> convert_text(md)
[Para(Str(Some) Space Emph(Str(markdown)) Space Strong(Str(text)) Space Subscript(Str(xyz)))]
>>> convert_text(tex)
[Para(Str(Some) Space Math(x^y; format='InlineMath') Space Str(or) Space Math(x_n = \sqrt{a + b}; format='InlineMath') Space RawInline(\textit{a}; format='tex'))]
:param text: text that will be converted
:type text: :class:`str` | :class:`.Element` | :class:`list` of :class:`.Element`
:param input_format: format of the text (default 'markdown').
Any Pandoc input format is valid, plus 'panflute' (a tree of Pandoc
elements)
:param output_format: format of the output
(default is 'panflute' which creates the tree of Pandoc elements).
Non-binary Pandoc formats are allowed (e.g. markdown, latex is allowed,
but docx and pdf are not).
:param standalone: whether the results will be a standalone document
or not.
:type standalone: :class:`bool`
:param extra_args: extra arguments passed to Pandoc
:type extra_args: :class:`list`
:rtype: :class:`list` | :class:`.Doc` | :class:`str`
Note: for a more general solution,
see `pyandoc <https://github.com/kennethreitz/pyandoc/>`_
by Kenneth Reitz.
"""
if input_format == 'panflute':
# Problem:
# We need a Doc element, but received a list of elements.
# So we wrap-up the list in a Doc, but with what pandoc-api version?
# (remember that Pandoc requires a matching api-version!)
# Workaround: call Pandoc with empty text to get its api-version
if not isinstance(text, Doc):
tmp_doc = convert_text('', standalone=True)
api_version = tmp_doc.api_version
if isinstance(text, Element):
text = [text]
text = Doc(*text, api_version=api_version)
# Dump the Doc into json
with io.StringIO() as f:
dump(text, f)
text = f.getvalue()
in_fmt = 'json' if input_format == 'panflute' else input_format
out_fmt = 'json' if output_format == 'panflute' else output_format
if extra_args is None:
extra_args = []
if standalone:
extra_args.append('--standalone')
out = inner_convert_text(text, in_fmt, out_fmt, extra_args)
if output_format == 'panflute':
out = json.loads(out, object_pairs_hook=from_json)
if standalone:
if not isinstance(out, Doc): # Pandoc 1.7.2 and earlier
metadata, items = out
out = Doc(*items, metadata=metadata)
else:
if isinstance(out, Doc): # Pandoc 1.8 and later
out = out.content.list
else:
out = out[1] # Pandoc 1.7.2 and earlier
return out
def inner_convert_text(text, input_format, output_format, extra_args):
# like convert_text(), but does not support 'panflute' input/output
from_arg = '--from={}'.format(input_format)
to_arg = '--to={}'.format(output_format)
args = [from_arg, to_arg] + extra_args
out = run_pandoc(text, args)
out = "\n".join(out.splitlines()) # Replace \r\n with \n
return out
# ---------------------------
# Functions that modify content
# ---------------------------
def _replace_keyword(self, keyword, replacement, count=0):
"""
replace_keyword(keyword, replacement[, count])
Walk through the element and its children
and look for Str() objects that contains
exactly the keyword. Then, replace it.
Usually applied to an entire document (a :class:`.Doc` element)
Note: If the replacement is a block, it cannot be put in place of
a Str element. As a solution, the closest ancestor (e.g. the parent)
will be replaced instead, but only if possible
(if the parent only has one child).
Example:
>>> from panflute import *
>>> p1 = Para(Str('Spam'), Space, Emph(Str('and'), Space, Str('eggs')))
>>> p2 = Para(Str('eggs'))
>>> p3 = Plain(Emph(Str('eggs')))
>>> doc = Doc(p1, p2, p3)
>>> doc.content
ListContainer(Para(Str(Spam) Space Emph(Str(and) Space Str(eggs))) Para(Str(eggs)) Plain(Emph(Str(eggs))))
>>> doc.replace_keyword('eggs', Str('ham'))
>>> doc.content
ListContainer(Para(Str(Spam) Space Emph(Str(and) Space Str(ham))) Para(Str(ham)) Plain(Emph(Str(ham))))
>>> doc.replace_keyword(keyword='ham', replacement=Para(Str('spam')))
>>> doc.content
ListContainer(Para(Str(Spam) Space Emph(Str(and) Space Str(ham))) Para(Str(spam)) Para(Str(spam)))
:param keyword: string that will be searched (cannot have spaces!)
:type keyword: :class:`str`
:param replacement: element that will be placed in turn of the ``Str``
element that contains the keyword.
:type replacement: :class:`.Element`
:param count: number of occurrences that will be replaced.
If count is not given or is set to zero, all occurrences
will be replaced.
:type count: :class:`int`
"""
def replace_with_inline(e, doc):
if type(e) == Str and e.text == keyword:
doc.num_matches += 1
if not count or doc.num_matches <= count:
return replacement
def replace_with_block(e, doc):
if hasattr(e, 'content') and len(e.content) == 1:
ee = e.content[0]
if type(ee) == Str and ee.text == keyword:
if isinstance(e, Block):
doc.num_matches += 1
if not count or doc.num_matches <= count:
return replacement
elif isinstance(e, Inline):
return Str(keyword)
doc = self.doc
if doc is None:
raise Exception('No root document')
doc.num_matches = 0
if isinstance(replacement, Inline):
return self.walk(replace_with_inline, doc)
elif isinstance(replacement, Block):
return self.walk(replace_with_block, doc)
else:
raise NotImplementedError(type(replacement))
# Bind the method
Element.replace_keyword = _replace_keyword
def get_option(options=None, local_tag=None, doc=None, doc_tag=None, default=None, error_on_none=True):
""" fetch an option variable,
from either a local (element) level option/attribute tag,
document level metadata tag,
or a default
:type options: ``dict``
:type local_tag: ``str``
:type doc: :class:`Doc`
:type doc_tag: ``str``
:type default: ``any``
:type error_on_none: ``bool``
The order of preference is local > document > default,
although if a local or document tag returns None, then the next level down is used.
Also, if error_on_none=True and the final variable is None, then a ValueError will be raised
In this manner you can set global variables, which can be optionally overriden at a local level.
For example, to apply different styles to docx text
main.md:
------------------
style-div:
name: MyStyle
------------------
:::style
some text
:::
::: {.style name=MyOtherStyle}
some more text
:::
style_filter.py:
import panflute as pf
def action(elem, doc):
if type(elem) == pf.Div:
style = pf.get_option(elem.attributes, "name", doc, "style-div.name")
elem.attributes["custom-style"] = style
def main(doc=None):
return run_filter(action, doc=doc)
if __name__ == "__main__":
main()
"""
variable = None
# element level
if options is not None and local_tag is not None:
if local_tag in options and options[local_tag] is not None:
variable = options[local_tag]
if variable is not None:
return variable
# doc level
if doc is not None and doc_tag is not None:
variable = doc.get_metadata(doc_tag, None)
if variable is not None:
return variable
# default level
variable = default
if variable is None and error_on_none:
raise ValueError("could not retrieve a value for tag; local={0}, doc={1}".format(local_tag, doc_tag))
return variable
|
sergiocorreia/panflute | panflute/base.py | Element._set_content | python | def _set_content(self, value, oktypes):
if value is None:
value = []
self._content = ListContainer(*value, oktypes=oktypes, parent=self) | Similar to content.setter but when there are no existing oktypes | train | https://github.com/sergiocorreia/panflute/blob/65c2d570c26a190deb600cab5e2ad8a828a3302e/panflute/base.py#L123-L129 | null | class Element(object):
"""
Base class of all Pandoc elements
"""
__slots__ = ['parent', 'location']
_children = []
def __new__(cls, *args, **kwargs):
# This is just to initialize self.parent to None
element = object.__new__(cls)
element.parent = None
element.location = None
return element
@property
def tag(self):
tag = type(self).__name__
return tag
# ---------------------------
# Base methods
# ---------------------------
# Should be overridden except for trivial elements (Space, Null, etc.)
def __repr__(self):
# This is just a convenience method
# Override it for more complex elements
extra = []
for key in self.__slots__:
if not key.startswith('_') and key != 'text':
val = getattr(self, key)
if val not in ([], OrderedDict(), ''):
extra.append([key, val])
if extra:
extra = ('{}={}'.format(k, repr(v)) for k, v in extra)
extra = '; ' + ', '.join(x for x in extra)
else:
extra = ''
if '_content' in self.__slots__:
content = ' '.join(repr(x) for x in self.content)
return '{}({}{})'.format(self.tag, content, extra)
elif 'text' in self.__slots__:
return '{}({}{})'.format(self.tag, self.text, extra)
else:
return self.tag
def to_json(self):
return encode_dict(self.tag, self._slots_to_json())
def _slots_to_json(self):
# Default when the element contains nothing
return []
# ---------------------------
# .identifier .classes .attributes
# ---------------------------
def _set_ica(self, identifier, classes, attributes):
self.identifier = check_type(identifier, str)
self.classes = [check_type(cl, str) for cl in classes]
self.attributes = OrderedDict(attributes)
def _ica_to_json(self):
return [self.identifier, self.classes, list(self.attributes.items())]
# ---------------------------
# .content (setter and getter)
# ---------------------------
@property
def content(self):
"""
Sequence of :class:`Element` objects (usually either :class:`Block`
or :class:`Inline`) that are "children" of the current element.
Only available for elements that accept ``*args``.
Note: some elements have children in attributes other than ``content``
(such as :class:`.Table` that has children in the header and
caption attributes).
"""
return self._content
@content.setter
def content(self, value):
oktypes = self._content.oktypes
value = value.list if isinstance(value, ListContainer) else list(value)
self._content = ListContainer(*value, oktypes=oktypes, parent=self)
# ---------------------------
# Navigation
# ---------------------------
@property
def index(self):
"""
Return position of element inside the parent.
:rtype: ``int`` | ``None``
"""
container = self.container
if container is not None:
return container.index(self)
@property
def container(self):
"""
Rarely used attribute that returns the ``ListContainer`` or
``DictContainer`` that contains the element
(or returns None if no such container exist)
:rtype: ``ListContainer`` | ``DictContainer`` | ``None``
"""
if self.parent is None:
return None
elif self.location is None:
return self.parent.content
else:
container = getattr(self.parent, self.location)
if isinstance(container, (ListContainer, DictContainer)):
return container
else:
assert self is container # id(self) == id(container)
def offset(self, n):
"""
Return a sibling element offset by n
:rtype: :class:`Element` | ``None``
"""
idx = self.index
if idx is not None:
sibling = idx + n
container = self.container
if 0 <= sibling < len(container):
return container[sibling]
@property
def next(self):
"""
Return the next sibling.
Note that ``elem.offset(1) == elem.next``
:rtype: :class:`Element` | ``None``
"""
return self.offset(1)
@property
def prev(self):
"""
Return the previous sibling.
Note that ``elem.offset(-1) == elem.prev``
:rtype: :class:`Element` | ``None``
"""
return self.offset(-1)
def ancestor(self, n):
"""
Return the n-th ancestor.
Note that ``elem.ancestor(1) == elem.parent``
:rtype: :class:`Element` | ``None``
"""
if not isinstance(n, int) or n < 1:
raise TypeError('Ancestor needs to be positive, received', n)
if n == 1 or self.parent is None:
return self.parent
else:
return self.parent.ancestor(n-1)
# ---------------------------
# Walking
# ---------------------------
@property
def doc(self):
"""
Return the root Doc element (if there is one)
"""
guess = self
while guess is not None and guess.tag != 'Doc':
guess = guess.parent # If no parent, this will be None
return guess # Returns either Doc or None
def walk(self, action, doc=None):
"""
Walk through the element and all its children (sub-elements),
applying the provided function ``action``.
A trivial example would be:
.. code-block:: python
from panflute import *
def no_action(elem, doc):
pass
doc = Doc(Para(Str('a')))
altered = doc.walk(no_action)
:param action: function that takes (element, doc) as arguments.
:type action: :class:`function`
:param doc: root document; used to access metadata,
the output format (in ``.format``, other elements, and
other variables). Only use this variable if for some reason
you don't want to use the current document of an element.
:type doc: :class:`.Doc`
:rtype: :class:`Element` | ``[]`` | ``None``
"""
# Infer the document thanks to .parent magic
if doc is None:
doc = self.doc
# First iterate over children
for child in self._children:
obj = getattr(self, child)
if isinstance(obj, Element):
ans = obj.walk(action, doc)
elif isinstance(obj, ListContainer):
ans = (item.walk(action, doc) for item in obj)
# We need to convert single elements to iterables, so that they
# can be flattened later
ans = ((item,) if type(item) != list else item for item in ans)
# Flatten the list, by expanding any sublists
ans = list(chain.from_iterable(ans))
elif isinstance(obj, DictContainer):
ans = [(k, v.walk(action, doc)) for k, v in obj.items()]
ans = [(k, v) for k, v in ans if v != []]
elif obj is None:
ans = None # Empty table headers or captions
else:
raise TypeError(type(obj))
setattr(self, child, ans)
# Then apply the action to the element
altered = action(self, doc)
return self if altered is None else altered
|
sergiocorreia/panflute | panflute/base.py | Element.container | python | def container(self):
if self.parent is None:
return None
elif self.location is None:
return self.parent.content
else:
container = getattr(self.parent, self.location)
if isinstance(container, (ListContainer, DictContainer)):
return container
else:
assert self is container | Rarely used attribute that returns the ``ListContainer`` or
``DictContainer`` that contains the element
(or returns None if no such container exist)
:rtype: ``ListContainer`` | ``DictContainer`` | ``None`` | train | https://github.com/sergiocorreia/panflute/blob/65c2d570c26a190deb600cab5e2ad8a828a3302e/panflute/base.py#L147-L164 | null | class Element(object):
"""
Base class of all Pandoc elements
"""
__slots__ = ['parent', 'location']
_children = []
def __new__(cls, *args, **kwargs):
# This is just to initialize self.parent to None
element = object.__new__(cls)
element.parent = None
element.location = None
return element
@property
def tag(self):
tag = type(self).__name__
return tag
# ---------------------------
# Base methods
# ---------------------------
# Should be overridden except for trivial elements (Space, Null, etc.)
def __repr__(self):
# This is just a convenience method
# Override it for more complex elements
extra = []
for key in self.__slots__:
if not key.startswith('_') and key != 'text':
val = getattr(self, key)
if val not in ([], OrderedDict(), ''):
extra.append([key, val])
if extra:
extra = ('{}={}'.format(k, repr(v)) for k, v in extra)
extra = '; ' + ', '.join(x for x in extra)
else:
extra = ''
if '_content' in self.__slots__:
content = ' '.join(repr(x) for x in self.content)
return '{}({}{})'.format(self.tag, content, extra)
elif 'text' in self.__slots__:
return '{}({}{})'.format(self.tag, self.text, extra)
else:
return self.tag
def to_json(self):
return encode_dict(self.tag, self._slots_to_json())
def _slots_to_json(self):
# Default when the element contains nothing
return []
# ---------------------------
# .identifier .classes .attributes
# ---------------------------
def _set_ica(self, identifier, classes, attributes):
self.identifier = check_type(identifier, str)
self.classes = [check_type(cl, str) for cl in classes]
self.attributes = OrderedDict(attributes)
def _ica_to_json(self):
return [self.identifier, self.classes, list(self.attributes.items())]
# ---------------------------
# .content (setter and getter)
# ---------------------------
@property
def content(self):
"""
Sequence of :class:`Element` objects (usually either :class:`Block`
or :class:`Inline`) that are "children" of the current element.
Only available for elements that accept ``*args``.
Note: some elements have children in attributes other than ``content``
(such as :class:`.Table` that has children in the header and
caption attributes).
"""
return self._content
@content.setter
def content(self, value):
oktypes = self._content.oktypes
value = value.list if isinstance(value, ListContainer) else list(value)
self._content = ListContainer(*value, oktypes=oktypes, parent=self)
def _set_content(self, value, oktypes):
"""
Similar to content.setter but when there are no existing oktypes
"""
if value is None:
value = []
self._content = ListContainer(*value, oktypes=oktypes, parent=self)
# ---------------------------
# Navigation
# ---------------------------
@property
def index(self):
"""
Return position of element inside the parent.
:rtype: ``int`` | ``None``
"""
container = self.container
if container is not None:
return container.index(self)
@property
# id(self) == id(container)
def offset(self, n):
"""
Return a sibling element offset by n
:rtype: :class:`Element` | ``None``
"""
idx = self.index
if idx is not None:
sibling = idx + n
container = self.container
if 0 <= sibling < len(container):
return container[sibling]
@property
def next(self):
"""
Return the next sibling.
Note that ``elem.offset(1) == elem.next``
:rtype: :class:`Element` | ``None``
"""
return self.offset(1)
@property
def prev(self):
"""
Return the previous sibling.
Note that ``elem.offset(-1) == elem.prev``
:rtype: :class:`Element` | ``None``
"""
return self.offset(-1)
def ancestor(self, n):
"""
Return the n-th ancestor.
Note that ``elem.ancestor(1) == elem.parent``
:rtype: :class:`Element` | ``None``
"""
if not isinstance(n, int) or n < 1:
raise TypeError('Ancestor needs to be positive, received', n)
if n == 1 or self.parent is None:
return self.parent
else:
return self.parent.ancestor(n-1)
# ---------------------------
# Walking
# ---------------------------
@property
def doc(self):
"""
Return the root Doc element (if there is one)
"""
guess = self
while guess is not None and guess.tag != 'Doc':
guess = guess.parent # If no parent, this will be None
return guess # Returns either Doc or None
def walk(self, action, doc=None):
"""
Walk through the element and all its children (sub-elements),
applying the provided function ``action``.
A trivial example would be:
.. code-block:: python
from panflute import *
def no_action(elem, doc):
pass
doc = Doc(Para(Str('a')))
altered = doc.walk(no_action)
:param action: function that takes (element, doc) as arguments.
:type action: :class:`function`
:param doc: root document; used to access metadata,
the output format (in ``.format``, other elements, and
other variables). Only use this variable if for some reason
you don't want to use the current document of an element.
:type doc: :class:`.Doc`
:rtype: :class:`Element` | ``[]`` | ``None``
"""
# Infer the document thanks to .parent magic
if doc is None:
doc = self.doc
# First iterate over children
for child in self._children:
obj = getattr(self, child)
if isinstance(obj, Element):
ans = obj.walk(action, doc)
elif isinstance(obj, ListContainer):
ans = (item.walk(action, doc) for item in obj)
# We need to convert single elements to iterables, so that they
# can be flattened later
ans = ((item,) if type(item) != list else item for item in ans)
# Flatten the list, by expanding any sublists
ans = list(chain.from_iterable(ans))
elif isinstance(obj, DictContainer):
ans = [(k, v.walk(action, doc)) for k, v in obj.items()]
ans = [(k, v) for k, v in ans if v != []]
elif obj is None:
ans = None # Empty table headers or captions
else:
raise TypeError(type(obj))
setattr(self, child, ans)
# Then apply the action to the element
altered = action(self, doc)
return self if altered is None else altered
|
sergiocorreia/panflute | panflute/base.py | Element.offset | python | def offset(self, n):
idx = self.index
if idx is not None:
sibling = idx + n
container = self.container
if 0 <= sibling < len(container):
return container[sibling] | Return a sibling element offset by n
:rtype: :class:`Element` | ``None`` | train | https://github.com/sergiocorreia/panflute/blob/65c2d570c26a190deb600cab5e2ad8a828a3302e/panflute/base.py#L166-L178 | null | class Element(object):
"""
Base class of all Pandoc elements
"""
__slots__ = ['parent', 'location']
_children = []
def __new__(cls, *args, **kwargs):
# This is just to initialize self.parent to None
element = object.__new__(cls)
element.parent = None
element.location = None
return element
@property
def tag(self):
tag = type(self).__name__
return tag
# ---------------------------
# Base methods
# ---------------------------
# Should be overridden except for trivial elements (Space, Null, etc.)
def __repr__(self):
# This is just a convenience method
# Override it for more complex elements
extra = []
for key in self.__slots__:
if not key.startswith('_') and key != 'text':
val = getattr(self, key)
if val not in ([], OrderedDict(), ''):
extra.append([key, val])
if extra:
extra = ('{}={}'.format(k, repr(v)) for k, v in extra)
extra = '; ' + ', '.join(x for x in extra)
else:
extra = ''
if '_content' in self.__slots__:
content = ' '.join(repr(x) for x in self.content)
return '{}({}{})'.format(self.tag, content, extra)
elif 'text' in self.__slots__:
return '{}({}{})'.format(self.tag, self.text, extra)
else:
return self.tag
def to_json(self):
return encode_dict(self.tag, self._slots_to_json())
def _slots_to_json(self):
# Default when the element contains nothing
return []
# ---------------------------
# .identifier .classes .attributes
# ---------------------------
def _set_ica(self, identifier, classes, attributes):
self.identifier = check_type(identifier, str)
self.classes = [check_type(cl, str) for cl in classes]
self.attributes = OrderedDict(attributes)
def _ica_to_json(self):
return [self.identifier, self.classes, list(self.attributes.items())]
# ---------------------------
# .content (setter and getter)
# ---------------------------
@property
def content(self):
"""
Sequence of :class:`Element` objects (usually either :class:`Block`
or :class:`Inline`) that are "children" of the current element.
Only available for elements that accept ``*args``.
Note: some elements have children in attributes other than ``content``
(such as :class:`.Table` that has children in the header and
caption attributes).
"""
return self._content
@content.setter
def content(self, value):
oktypes = self._content.oktypes
value = value.list if isinstance(value, ListContainer) else list(value)
self._content = ListContainer(*value, oktypes=oktypes, parent=self)
def _set_content(self, value, oktypes):
"""
Similar to content.setter but when there are no existing oktypes
"""
if value is None:
value = []
self._content = ListContainer(*value, oktypes=oktypes, parent=self)
# ---------------------------
# Navigation
# ---------------------------
@property
def index(self):
"""
Return position of element inside the parent.
:rtype: ``int`` | ``None``
"""
container = self.container
if container is not None:
return container.index(self)
@property
def container(self):
"""
Rarely used attribute that returns the ``ListContainer`` or
``DictContainer`` that contains the element
(or returns None if no such container exist)
:rtype: ``ListContainer`` | ``DictContainer`` | ``None``
"""
if self.parent is None:
return None
elif self.location is None:
return self.parent.content
else:
container = getattr(self.parent, self.location)
if isinstance(container, (ListContainer, DictContainer)):
return container
else:
assert self is container # id(self) == id(container)
@property
def next(self):
"""
Return the next sibling.
Note that ``elem.offset(1) == elem.next``
:rtype: :class:`Element` | ``None``
"""
return self.offset(1)
@property
def prev(self):
"""
Return the previous sibling.
Note that ``elem.offset(-1) == elem.prev``
:rtype: :class:`Element` | ``None``
"""
return self.offset(-1)
def ancestor(self, n):
"""
Return the n-th ancestor.
Note that ``elem.ancestor(1) == elem.parent``
:rtype: :class:`Element` | ``None``
"""
if not isinstance(n, int) or n < 1:
raise TypeError('Ancestor needs to be positive, received', n)
if n == 1 or self.parent is None:
return self.parent
else:
return self.parent.ancestor(n-1)
# ---------------------------
# Walking
# ---------------------------
@property
def doc(self):
"""
Return the root Doc element (if there is one)
"""
guess = self
while guess is not None and guess.tag != 'Doc':
guess = guess.parent # If no parent, this will be None
return guess # Returns either Doc or None
def walk(self, action, doc=None):
"""
Walk through the element and all its children (sub-elements),
applying the provided function ``action``.
A trivial example would be:
.. code-block:: python
from panflute import *
def no_action(elem, doc):
pass
doc = Doc(Para(Str('a')))
altered = doc.walk(no_action)
:param action: function that takes (element, doc) as arguments.
:type action: :class:`function`
:param doc: root document; used to access metadata,
the output format (in ``.format``, other elements, and
other variables). Only use this variable if for some reason
you don't want to use the current document of an element.
:type doc: :class:`.Doc`
:rtype: :class:`Element` | ``[]`` | ``None``
"""
# Infer the document thanks to .parent magic
if doc is None:
doc = self.doc
# First iterate over children
for child in self._children:
obj = getattr(self, child)
if isinstance(obj, Element):
ans = obj.walk(action, doc)
elif isinstance(obj, ListContainer):
ans = (item.walk(action, doc) for item in obj)
# We need to convert single elements to iterables, so that they
# can be flattened later
ans = ((item,) if type(item) != list else item for item in ans)
# Flatten the list, by expanding any sublists
ans = list(chain.from_iterable(ans))
elif isinstance(obj, DictContainer):
ans = [(k, v.walk(action, doc)) for k, v in obj.items()]
ans = [(k, v) for k, v in ans if v != []]
elif obj is None:
ans = None # Empty table headers or captions
else:
raise TypeError(type(obj))
setattr(self, child, ans)
# Then apply the action to the element
altered = action(self, doc)
return self if altered is None else altered
|
sergiocorreia/panflute | panflute/base.py | Element.ancestor | python | def ancestor(self, n):
if not isinstance(n, int) or n < 1:
raise TypeError('Ancestor needs to be positive, received', n)
if n == 1 or self.parent is None:
return self.parent
else:
return self.parent.ancestor(n-1) | Return the n-th ancestor.
Note that ``elem.ancestor(1) == elem.parent``
:rtype: :class:`Element` | ``None`` | train | https://github.com/sergiocorreia/panflute/blob/65c2d570c26a190deb600cab5e2ad8a828a3302e/panflute/base.py#L201-L214 | null | class Element(object):
"""
Base class of all Pandoc elements
"""
__slots__ = ['parent', 'location']
_children = []
def __new__(cls, *args, **kwargs):
# This is just to initialize self.parent to None
element = object.__new__(cls)
element.parent = None
element.location = None
return element
@property
def tag(self):
tag = type(self).__name__
return tag
# ---------------------------
# Base methods
# ---------------------------
# Should be overridden except for trivial elements (Space, Null, etc.)
def __repr__(self):
# This is just a convenience method
# Override it for more complex elements
extra = []
for key in self.__slots__:
if not key.startswith('_') and key != 'text':
val = getattr(self, key)
if val not in ([], OrderedDict(), ''):
extra.append([key, val])
if extra:
extra = ('{}={}'.format(k, repr(v)) for k, v in extra)
extra = '; ' + ', '.join(x for x in extra)
else:
extra = ''
if '_content' in self.__slots__:
content = ' '.join(repr(x) for x in self.content)
return '{}({}{})'.format(self.tag, content, extra)
elif 'text' in self.__slots__:
return '{}({}{})'.format(self.tag, self.text, extra)
else:
return self.tag
def to_json(self):
return encode_dict(self.tag, self._slots_to_json())
def _slots_to_json(self):
# Default when the element contains nothing
return []
# ---------------------------
# .identifier .classes .attributes
# ---------------------------
def _set_ica(self, identifier, classes, attributes):
self.identifier = check_type(identifier, str)
self.classes = [check_type(cl, str) for cl in classes]
self.attributes = OrderedDict(attributes)
def _ica_to_json(self):
return [self.identifier, self.classes, list(self.attributes.items())]
# ---------------------------
# .content (setter and getter)
# ---------------------------
@property
def content(self):
"""
Sequence of :class:`Element` objects (usually either :class:`Block`
or :class:`Inline`) that are "children" of the current element.
Only available for elements that accept ``*args``.
Note: some elements have children in attributes other than ``content``
(such as :class:`.Table` that has children in the header and
caption attributes).
"""
return self._content
@content.setter
def content(self, value):
oktypes = self._content.oktypes
value = value.list if isinstance(value, ListContainer) else list(value)
self._content = ListContainer(*value, oktypes=oktypes, parent=self)
def _set_content(self, value, oktypes):
"""
Similar to content.setter but when there are no existing oktypes
"""
if value is None:
value = []
self._content = ListContainer(*value, oktypes=oktypes, parent=self)
# ---------------------------
# Navigation
# ---------------------------
@property
def index(self):
"""
Return position of element inside the parent.
:rtype: ``int`` | ``None``
"""
container = self.container
if container is not None:
return container.index(self)
@property
def container(self):
"""
Rarely used attribute that returns the ``ListContainer`` or
``DictContainer`` that contains the element
(or returns None if no such container exist)
:rtype: ``ListContainer`` | ``DictContainer`` | ``None``
"""
if self.parent is None:
return None
elif self.location is None:
return self.parent.content
else:
container = getattr(self.parent, self.location)
if isinstance(container, (ListContainer, DictContainer)):
return container
else:
assert self is container # id(self) == id(container)
def offset(self, n):
"""
Return a sibling element offset by n
:rtype: :class:`Element` | ``None``
"""
idx = self.index
if idx is not None:
sibling = idx + n
container = self.container
if 0 <= sibling < len(container):
return container[sibling]
@property
def next(self):
"""
Return the next sibling.
Note that ``elem.offset(1) == elem.next``
:rtype: :class:`Element` | ``None``
"""
return self.offset(1)
@property
def prev(self):
"""
Return the previous sibling.
Note that ``elem.offset(-1) == elem.prev``
:rtype: :class:`Element` | ``None``
"""
return self.offset(-1)
# ---------------------------
# Walking
# ---------------------------
@property
def doc(self):
"""
Return the root Doc element (if there is one)
"""
guess = self
while guess is not None and guess.tag != 'Doc':
guess = guess.parent # If no parent, this will be None
return guess # Returns either Doc or None
def walk(self, action, doc=None):
"""
Walk through the element and all its children (sub-elements),
applying the provided function ``action``.
A trivial example would be:
.. code-block:: python
from panflute import *
def no_action(elem, doc):
pass
doc = Doc(Para(Str('a')))
altered = doc.walk(no_action)
:param action: function that takes (element, doc) as arguments.
:type action: :class:`function`
:param doc: root document; used to access metadata,
the output format (in ``.format``, other elements, and
other variables). Only use this variable if for some reason
you don't want to use the current document of an element.
:type doc: :class:`.Doc`
:rtype: :class:`Element` | ``[]`` | ``None``
"""
# Infer the document thanks to .parent magic
if doc is None:
doc = self.doc
# First iterate over children
for child in self._children:
obj = getattr(self, child)
if isinstance(obj, Element):
ans = obj.walk(action, doc)
elif isinstance(obj, ListContainer):
ans = (item.walk(action, doc) for item in obj)
# We need to convert single elements to iterables, so that they
# can be flattened later
ans = ((item,) if type(item) != list else item for item in ans)
# Flatten the list, by expanding any sublists
ans = list(chain.from_iterable(ans))
elif isinstance(obj, DictContainer):
ans = [(k, v.walk(action, doc)) for k, v in obj.items()]
ans = [(k, v) for k, v in ans if v != []]
elif obj is None:
ans = None # Empty table headers or captions
else:
raise TypeError(type(obj))
setattr(self, child, ans)
# Then apply the action to the element
altered = action(self, doc)
return self if altered is None else altered
|
sergiocorreia/panflute | panflute/base.py | Element.doc | python | def doc(self):
guess = self
while guess is not None and guess.tag != 'Doc':
guess = guess.parent # If no parent, this will be None
return guess | Return the root Doc element (if there is one) | train | https://github.com/sergiocorreia/panflute/blob/65c2d570c26a190deb600cab5e2ad8a828a3302e/panflute/base.py#L221-L228 | null | class Element(object):
"""
Base class of all Pandoc elements
"""
__slots__ = ['parent', 'location']
_children = []
def __new__(cls, *args, **kwargs):
# This is just to initialize self.parent to None
element = object.__new__(cls)
element.parent = None
element.location = None
return element
@property
def tag(self):
tag = type(self).__name__
return tag
# ---------------------------
# Base methods
# ---------------------------
# Should be overridden except for trivial elements (Space, Null, etc.)
def __repr__(self):
# This is just a convenience method
# Override it for more complex elements
extra = []
for key in self.__slots__:
if not key.startswith('_') and key != 'text':
val = getattr(self, key)
if val not in ([], OrderedDict(), ''):
extra.append([key, val])
if extra:
extra = ('{}={}'.format(k, repr(v)) for k, v in extra)
extra = '; ' + ', '.join(x for x in extra)
else:
extra = ''
if '_content' in self.__slots__:
content = ' '.join(repr(x) for x in self.content)
return '{}({}{})'.format(self.tag, content, extra)
elif 'text' in self.__slots__:
return '{}({}{})'.format(self.tag, self.text, extra)
else:
return self.tag
def to_json(self):
return encode_dict(self.tag, self._slots_to_json())
def _slots_to_json(self):
# Default when the element contains nothing
return []
# ---------------------------
# .identifier .classes .attributes
# ---------------------------
def _set_ica(self, identifier, classes, attributes):
self.identifier = check_type(identifier, str)
self.classes = [check_type(cl, str) for cl in classes]
self.attributes = OrderedDict(attributes)
def _ica_to_json(self):
return [self.identifier, self.classes, list(self.attributes.items())]
# ---------------------------
# .content (setter and getter)
# ---------------------------
@property
def content(self):
"""
Sequence of :class:`Element` objects (usually either :class:`Block`
or :class:`Inline`) that are "children" of the current element.
Only available for elements that accept ``*args``.
Note: some elements have children in attributes other than ``content``
(such as :class:`.Table` that has children in the header and
caption attributes).
"""
return self._content
@content.setter
def content(self, value):
oktypes = self._content.oktypes
value = value.list if isinstance(value, ListContainer) else list(value)
self._content = ListContainer(*value, oktypes=oktypes, parent=self)
def _set_content(self, value, oktypes):
"""
Similar to content.setter but when there are no existing oktypes
"""
if value is None:
value = []
self._content = ListContainer(*value, oktypes=oktypes, parent=self)
# ---------------------------
# Navigation
# ---------------------------
@property
def index(self):
"""
Return position of element inside the parent.
:rtype: ``int`` | ``None``
"""
container = self.container
if container is not None:
return container.index(self)
@property
def container(self):
"""
Rarely used attribute that returns the ``ListContainer`` or
``DictContainer`` that contains the element
(or returns None if no such container exist)
:rtype: ``ListContainer`` | ``DictContainer`` | ``None``
"""
if self.parent is None:
return None
elif self.location is None:
return self.parent.content
else:
container = getattr(self.parent, self.location)
if isinstance(container, (ListContainer, DictContainer)):
return container
else:
assert self is container # id(self) == id(container)
def offset(self, n):
"""
Return a sibling element offset by n
:rtype: :class:`Element` | ``None``
"""
idx = self.index
if idx is not None:
sibling = idx + n
container = self.container
if 0 <= sibling < len(container):
return container[sibling]
@property
def next(self):
"""
Return the next sibling.
Note that ``elem.offset(1) == elem.next``
:rtype: :class:`Element` | ``None``
"""
return self.offset(1)
@property
def prev(self):
"""
Return the previous sibling.
Note that ``elem.offset(-1) == elem.prev``
:rtype: :class:`Element` | ``None``
"""
return self.offset(-1)
def ancestor(self, n):
"""
Return the n-th ancestor.
Note that ``elem.ancestor(1) == elem.parent``
:rtype: :class:`Element` | ``None``
"""
if not isinstance(n, int) or n < 1:
raise TypeError('Ancestor needs to be positive, received', n)
if n == 1 or self.parent is None:
return self.parent
else:
return self.parent.ancestor(n-1)
# ---------------------------
# Walking
# ---------------------------
@property
# Returns either Doc or None
def walk(self, action, doc=None):
"""
Walk through the element and all its children (sub-elements),
applying the provided function ``action``.
A trivial example would be:
.. code-block:: python
from panflute import *
def no_action(elem, doc):
pass
doc = Doc(Para(Str('a')))
altered = doc.walk(no_action)
:param action: function that takes (element, doc) as arguments.
:type action: :class:`function`
:param doc: root document; used to access metadata,
the output format (in ``.format``, other elements, and
other variables). Only use this variable if for some reason
you don't want to use the current document of an element.
:type doc: :class:`.Doc`
:rtype: :class:`Element` | ``[]`` | ``None``
"""
# Infer the document thanks to .parent magic
if doc is None:
doc = self.doc
# First iterate over children
for child in self._children:
obj = getattr(self, child)
if isinstance(obj, Element):
ans = obj.walk(action, doc)
elif isinstance(obj, ListContainer):
ans = (item.walk(action, doc) for item in obj)
# We need to convert single elements to iterables, so that they
# can be flattened later
ans = ((item,) if type(item) != list else item for item in ans)
# Flatten the list, by expanding any sublists
ans = list(chain.from_iterable(ans))
elif isinstance(obj, DictContainer):
ans = [(k, v.walk(action, doc)) for k, v in obj.items()]
ans = [(k, v) for k, v in ans if v != []]
elif obj is None:
ans = None # Empty table headers or captions
else:
raise TypeError(type(obj))
setattr(self, child, ans)
# Then apply the action to the element
altered = action(self, doc)
return self if altered is None else altered
|
sergiocorreia/panflute | panflute/base.py | Element.walk | python | def walk(self, action, doc=None):
# Infer the document thanks to .parent magic
if doc is None:
doc = self.doc
# First iterate over children
for child in self._children:
obj = getattr(self, child)
if isinstance(obj, Element):
ans = obj.walk(action, doc)
elif isinstance(obj, ListContainer):
ans = (item.walk(action, doc) for item in obj)
# We need to convert single elements to iterables, so that they
# can be flattened later
ans = ((item,) if type(item) != list else item for item in ans)
# Flatten the list, by expanding any sublists
ans = list(chain.from_iterable(ans))
elif isinstance(obj, DictContainer):
ans = [(k, v.walk(action, doc)) for k, v in obj.items()]
ans = [(k, v) for k, v in ans if v != []]
elif obj is None:
ans = None # Empty table headers or captions
else:
raise TypeError(type(obj))
setattr(self, child, ans)
# Then apply the action to the element
altered = action(self, doc)
return self if altered is None else altered | Walk through the element and all its children (sub-elements),
applying the provided function ``action``.
A trivial example would be:
.. code-block:: python
from panflute import *
def no_action(elem, doc):
pass
doc = Doc(Para(Str('a')))
altered = doc.walk(no_action)
:param action: function that takes (element, doc) as arguments.
:type action: :class:`function`
:param doc: root document; used to access metadata,
the output format (in ``.format``, other elements, and
other variables). Only use this variable if for some reason
you don't want to use the current document of an element.
:type doc: :class:`.Doc`
:rtype: :class:`Element` | ``[]`` | ``None`` | train | https://github.com/sergiocorreia/panflute/blob/65c2d570c26a190deb600cab5e2ad8a828a3302e/panflute/base.py#L230-L285 | null | class Element(object):
"""
Base class of all Pandoc elements
"""
__slots__ = ['parent', 'location']
_children = []
def __new__(cls, *args, **kwargs):
# This is just to initialize self.parent to None
element = object.__new__(cls)
element.parent = None
element.location = None
return element
@property
def tag(self):
tag = type(self).__name__
return tag
# ---------------------------
# Base methods
# ---------------------------
# Should be overridden except for trivial elements (Space, Null, etc.)
def __repr__(self):
# This is just a convenience method
# Override it for more complex elements
extra = []
for key in self.__slots__:
if not key.startswith('_') and key != 'text':
val = getattr(self, key)
if val not in ([], OrderedDict(), ''):
extra.append([key, val])
if extra:
extra = ('{}={}'.format(k, repr(v)) for k, v in extra)
extra = '; ' + ', '.join(x for x in extra)
else:
extra = ''
if '_content' in self.__slots__:
content = ' '.join(repr(x) for x in self.content)
return '{}({}{})'.format(self.tag, content, extra)
elif 'text' in self.__slots__:
return '{}({}{})'.format(self.tag, self.text, extra)
else:
return self.tag
def to_json(self):
return encode_dict(self.tag, self._slots_to_json())
def _slots_to_json(self):
# Default when the element contains nothing
return []
# ---------------------------
# .identifier .classes .attributes
# ---------------------------
def _set_ica(self, identifier, classes, attributes):
self.identifier = check_type(identifier, str)
self.classes = [check_type(cl, str) for cl in classes]
self.attributes = OrderedDict(attributes)
def _ica_to_json(self):
return [self.identifier, self.classes, list(self.attributes.items())]
# ---------------------------
# .content (setter and getter)
# ---------------------------
@property
def content(self):
"""
Sequence of :class:`Element` objects (usually either :class:`Block`
or :class:`Inline`) that are "children" of the current element.
Only available for elements that accept ``*args``.
Note: some elements have children in attributes other than ``content``
(such as :class:`.Table` that has children in the header and
caption attributes).
"""
return self._content
@content.setter
def content(self, value):
oktypes = self._content.oktypes
value = value.list if isinstance(value, ListContainer) else list(value)
self._content = ListContainer(*value, oktypes=oktypes, parent=self)
def _set_content(self, value, oktypes):
"""
Similar to content.setter but when there are no existing oktypes
"""
if value is None:
value = []
self._content = ListContainer(*value, oktypes=oktypes, parent=self)
# ---------------------------
# Navigation
# ---------------------------
@property
def index(self):
"""
Return position of element inside the parent.
:rtype: ``int`` | ``None``
"""
container = self.container
if container is not None:
return container.index(self)
@property
def container(self):
"""
Rarely used attribute that returns the ``ListContainer`` or
``DictContainer`` that contains the element
(or returns None if no such container exist)
:rtype: ``ListContainer`` | ``DictContainer`` | ``None``
"""
if self.parent is None:
return None
elif self.location is None:
return self.parent.content
else:
container = getattr(self.parent, self.location)
if isinstance(container, (ListContainer, DictContainer)):
return container
else:
assert self is container # id(self) == id(container)
def offset(self, n):
"""
Return a sibling element offset by n
:rtype: :class:`Element` | ``None``
"""
idx = self.index
if idx is not None:
sibling = idx + n
container = self.container
if 0 <= sibling < len(container):
return container[sibling]
@property
def next(self):
"""
Return the next sibling.
Note that ``elem.offset(1) == elem.next``
:rtype: :class:`Element` | ``None``
"""
return self.offset(1)
@property
def prev(self):
"""
Return the previous sibling.
Note that ``elem.offset(-1) == elem.prev``
:rtype: :class:`Element` | ``None``
"""
return self.offset(-1)
def ancestor(self, n):
"""
Return the n-th ancestor.
Note that ``elem.ancestor(1) == elem.parent``
:rtype: :class:`Element` | ``None``
"""
if not isinstance(n, int) or n < 1:
raise TypeError('Ancestor needs to be positive, received', n)
if n == 1 or self.parent is None:
return self.parent
else:
return self.parent.ancestor(n-1)
# ---------------------------
# Walking
# ---------------------------
@property
def doc(self):
"""
Return the root Doc element (if there is one)
"""
guess = self
while guess is not None and guess.tag != 'Doc':
guess = guess.parent # If no parent, this will be None
return guess # Returns either Doc or None
|
sergiocorreia/panflute | panflute/autofilter.py | stdio | python | def stdio(filters=None, search_dirs=None, data_dir=True, sys_path=True, panfl_=False, input_stream=None, output_stream=None):
doc = load(input_stream)
# meta = doc.metadata # Local variable 'meta' value is not used
verbose = doc.get_metadata('panflute-verbose', False)
if search_dirs is None:
# metadata 'panflute-path' can be a list, a string, or missing
# `search_dirs` should be a list of str
search_dirs = doc.get_metadata('panflute-path', [])
if type(search_dirs) != list:
search_dirs = [search_dirs]
if '--data-dir' in search_dirs:
data_dir = True
if '--no-sys-path' in search_dirs:
sys_path = False
search_dirs = [dir_ for dir_ in search_dirs
if dir_ not in ('--data-dir', '--no-sys-path')]
if verbose:
debug('panflute: data_dir={} sys_path={}'.format(data_dir, sys_path))
search_dirs = [p.normpath(p.expanduser(p.expandvars(dir_))) for dir_ in search_dirs]
if not panfl_:
# default panflute behaviour:
search_dirs.append('.')
if data_dir:
search_dirs.append(get_filter_dir())
if sys_path:
search_dirs += sys.path
else:
# panfl/pandoctools behaviour:
if data_dir:
search_dirs.append(get_filter_dir())
if sys_path:
search_dirs += reduced_sys_path
# Display message (tests that everything is working ok)
msg = doc.get_metadata('panflute-echo', False)
if msg:
debug(msg)
if filters is None:
# metadata 'panflute-filters' can be a list, a string, or missing
# `filters` should be a list of str
filters = doc.get_metadata('panflute-filters', [])
if type(filters) != list:
filters = [filters]
if filters:
if verbose:
msg = "panflute: will run the following filters:"
debug(msg, ' '.join(filters))
doc = autorun_filters(filters, doc, search_dirs, verbose)
elif verbose:
debug("panflute: no filters were provided")
dump(doc, output_stream) | Reads JSON from stdin and second CLI argument:
``sys.argv[1]``. Dumps JSON doc to the stdout.
:param filters: Union[List[str], None]
if None then read from metadata
:param search_dirs: Union[List[str], None]
if None then read from metadata
:param data_dir: bool
:param sys_path: bool
:param panfl_: bool
:param input_stream: io.StringIO or None
for debug purpose
:param output_stream: io.StringIO or None
for debug purpose
:return: None | train | https://github.com/sergiocorreia/panflute/blob/65c2d570c26a190deb600cab5e2ad8a828a3302e/panflute/autofilter.py#L39-L112 | [
"def debug(*args, **kwargs):\n \"\"\"\n Same as print, but prints to ``stderr``\n (which is not intercepted by Pandoc).\n \"\"\"\n print(file=sys.stderr, *args, **kwargs)\n",
"def dump(doc, output_stream=None):\n \"\"\"\n Dump a :class:`.Doc` object into a JSON-encoded text string.\n\n The... | """
Allow Panflute to be run as a command line executable
to be used as a Pandoc filter or used in Pandoctools
shell scripts as Pandoc filter with arguments.
Exports ``main`` and ``panfl``.
"""
import os
import os.path as p
import sys
import click
from .io import load, dump
from .tools import debug
from .utils import ContextImport
reduced_sys_path = [dir_ for dir_ in sys.path if (dir_ not in ('', '.')) and p.isdir(dir_)]
def get_filter_dir(hardcoded=True):
if hardcoded:
if os.name == 'nt':
return p.join(os.environ["APPDATA"], "pandoc", "filters")
else:
return p.join(os.environ["HOME"], ".pandoc", "filters")
else:
from .tools import run_pandoc
# Extract $DATADIR
info = run_pandoc(args=['--version']).splitlines()
prefix = "Default user data directory: "
info = [row for row in info if row.startswith(prefix)]
assert len(info) == 1
data_dir = info[0][len(prefix):]
return p.normpath(p.expanduser(p.expandvars(p.join(data_dir, 'filters'))))
def main():
"""
Allows Panflute to be run as a command line executable
to be used as a Pandoc filter.
"""
stdio()
help_str = """Allows Panflute to be run as a command line executable:
* to be used in Pandoctools shell scripts as Pandoc filter with
multiple arguments (should have -t/--to option in this case):
`pandoc -t json | panfl -t markdown foo.bar | pandoc -f json`
* to be used as a Pandoc filter (in this case only one positional
argument is allowed of all options): `pandoc --filter panfl`
Filters may be set with or without .py extension.
It can be relative or absolutele paths to files or modules specs
like `foo.bar`.
MIND THAT Panflute temporarily prepends folder of the filter
(or relevant dir provided if module spec) TO THE `sys.path` before
importing the filter!
Search preserves directories order (except for --data-dir and
`sys.path`).
"""
@click.command(help=help_str)
@click.argument('filters', nargs=-1)
@click.option('-w', '-t', '--write', '--to', 'to', type=str, default=None,
help='Derivative of Pandoc writer option that Pandoc passes to filters.')
@click.option('--dir', '-d', 'search_dirs', multiple=True,
help="Search filters in provided directories: `-d dir1 -d dir2`.")
@click.option('--data-dir', is_flag=True, default=False,
help="Search filters in default user data directory listed in `pandoc --version` " +
"(in it's `filters` subfolder actually). It's appended to the search list.")
@click.option('--no-sys-path', 'sys_path', is_flag=True, default=True,
help="Disable search filters in python's `sys.path` (without '' and '.') " +
"that is appended to the search list.")
def panfl(filters, to, search_dirs, data_dir, sys_path):
"""
Allows Panflute to be run as a command line executable:
* to be used in Pandoctools shell scripts as Pandoc filter with
multiple arguments (should have -t/--to option in this case):
``pandoc -t json | panfl -t markdown foo.bar | pandoc -f json``
* to be used as a Pandoc filter (in this case only one positional
argument is allowed of all options):
``pandoc --filter panfl``
MIND THAT Panflute temporarily prepends folder of the filter
(or relevant dir provided if module spec) TO THE `sys.path` before
importing the filter!
"""
if to is None:
if (len(filters) > 1) or search_dirs or not sys_path or data_dir:
raise ValueError('When no `--to` option then Pandoc filter mode assumed and ' +
'only one positional argument is allowed of all options.')
else:
filters, search_dirs = None, None
sys_path, data_dir = True, False
else:
filters, search_dirs = list(filters), list(search_dirs)
# `load()` in `stdio()` needs `to` in the 2nd arg
sys.argv[1:] = []
sys.argv.append(to)
stdio(filters, search_dirs, data_dir, sys_path, panfl_=True)
def autorun_filters(filters, doc, search_dirs, verbose):
"""
:param filters: list of str
:param doc: panflute.Doc
:param search_dirs: list of str
:param verbose: bool
:return: panflute.Doc
"""
def remove_py(s):
return s[:-3] if s.endswith('.py') else s
filter_paths = []
for filter_ in filters:
filter_exp = p.normpath(p.expanduser(p.expandvars(filter_)))
if filter_exp == remove_py(p.basename(filter_exp)).lstrip('.'):
# import .foo # is not supported
module = True
mod_path = filter_exp.replace('.', p.sep)
path_postfixes = (p.join(mod_path, '__init__.py'), mod_path + '.py')
else:
module = False
# allow with and without .py ending
path_postfixes = (remove_py(filter_exp) + '.py',)
for path, path_postf in [(path, path_postf)
for path in search_dirs
for path_postf in path_postfixes]:
if p.isabs(path_postf):
filter_path = path_postf
else:
filter_path = p.abspath(p.normpath(p.join(path, path_postf)))
if p.isfile(filter_path):
if verbose:
debug("panflute: filter <{}> found in {}".format(filter_, filter_path))
if module and not (path in reduced_sys_path):
extra_dir = p.abspath(path)
# `path` already doesn't contain `.`, `..`, env vars or `~`
else:
extra_dir = None
module_ = filter_exp if module else filter_path
filter_paths.append((filter_, filter_path, module_, extra_dir))
break
elif p.isabs(path_postf):
if verbose:
debug(" filter <{}> NOT found in {}".format(filter_, filter_path))
raise Exception("filter not found: " + filter_)
elif verbose:
debug(" filter <{}> NOT found in {}".format(filter_, filter_path))
else:
raise Exception("filter not found: " + filter_)
for filter_, filter_path, module_, extra_dir in filter_paths:
if verbose:
debug("panflute: running filter <{}>".format(filter_))
with ContextImport(module_, extra_dir) as module:
try:
module.main(doc)
except Exception as e:
debug("Failed to run filter: " + filter_)
if not hasattr(module, 'main'):
debug(' - Possible cause: filter lacks a main() function')
debug('Filter code:')
debug('-' * 64)
with open(filter_path) as fp:
debug(fp.read())
debug('-' * 64)
raise Exception(e)
if verbose:
debug("panflute: filter <{}> completed".format(filter_))
return doc
|
sergiocorreia/panflute | panflute/autofilter.py | panfl | python | def panfl(filters, to, search_dirs, data_dir, sys_path):
if to is None:
if (len(filters) > 1) or search_dirs or not sys_path or data_dir:
raise ValueError('When no `--to` option then Pandoc filter mode assumed and ' +
'only one positional argument is allowed of all options.')
else:
filters, search_dirs = None, None
sys_path, data_dir = True, False
else:
filters, search_dirs = list(filters), list(search_dirs)
# `load()` in `stdio()` needs `to` in the 2nd arg
sys.argv[1:] = []
sys.argv.append(to)
stdio(filters, search_dirs, data_dir, sys_path, panfl_=True) | Allows Panflute to be run as a command line executable:
* to be used in Pandoctools shell scripts as Pandoc filter with
multiple arguments (should have -t/--to option in this case):
``pandoc -t json | panfl -t markdown foo.bar | pandoc -f json``
* to be used as a Pandoc filter (in this case only one positional
argument is allowed of all options):
``pandoc --filter panfl``
MIND THAT Panflute temporarily prepends folder of the filter
(or relevant dir provided if module spec) TO THE `sys.path` before
importing the filter! | train | https://github.com/sergiocorreia/panflute/blob/65c2d570c26a190deb600cab5e2ad8a828a3302e/panflute/autofilter.py#L157-L186 | [
"def stdio(filters=None, search_dirs=None, data_dir=True, sys_path=True, panfl_=False, input_stream=None, output_stream=None):\n \"\"\"\n Reads JSON from stdin and second CLI argument:\n ``sys.argv[1]``. Dumps JSON doc to the stdout.\n\n :param filters: Union[List[str], None]\n if None then read ... | """
Allow Panflute to be run as a command line executable
to be used as a Pandoc filter or used in Pandoctools
shell scripts as Pandoc filter with arguments.
Exports ``main`` and ``panfl``.
"""
import os
import os.path as p
import sys
import click
from .io import load, dump
from .tools import debug
from .utils import ContextImport
reduced_sys_path = [dir_ for dir_ in sys.path if (dir_ not in ('', '.')) and p.isdir(dir_)]
def get_filter_dir(hardcoded=True):
if hardcoded:
if os.name == 'nt':
return p.join(os.environ["APPDATA"], "pandoc", "filters")
else:
return p.join(os.environ["HOME"], ".pandoc", "filters")
else:
from .tools import run_pandoc
# Extract $DATADIR
info = run_pandoc(args=['--version']).splitlines()
prefix = "Default user data directory: "
info = [row for row in info if row.startswith(prefix)]
assert len(info) == 1
data_dir = info[0][len(prefix):]
return p.normpath(p.expanduser(p.expandvars(p.join(data_dir, 'filters'))))
def stdio(filters=None, search_dirs=None, data_dir=True, sys_path=True, panfl_=False, input_stream=None, output_stream=None):
"""
Reads JSON from stdin and second CLI argument:
``sys.argv[1]``. Dumps JSON doc to the stdout.
:param filters: Union[List[str], None]
if None then read from metadata
:param search_dirs: Union[List[str], None]
if None then read from metadata
:param data_dir: bool
:param sys_path: bool
:param panfl_: bool
:param input_stream: io.StringIO or None
for debug purpose
:param output_stream: io.StringIO or None
for debug purpose
:return: None
"""
doc = load(input_stream)
# meta = doc.metadata # Local variable 'meta' value is not used
verbose = doc.get_metadata('panflute-verbose', False)
if search_dirs is None:
# metadata 'panflute-path' can be a list, a string, or missing
# `search_dirs` should be a list of str
search_dirs = doc.get_metadata('panflute-path', [])
if type(search_dirs) != list:
search_dirs = [search_dirs]
if '--data-dir' in search_dirs:
data_dir = True
if '--no-sys-path' in search_dirs:
sys_path = False
search_dirs = [dir_ for dir_ in search_dirs
if dir_ not in ('--data-dir', '--no-sys-path')]
if verbose:
debug('panflute: data_dir={} sys_path={}'.format(data_dir, sys_path))
search_dirs = [p.normpath(p.expanduser(p.expandvars(dir_))) for dir_ in search_dirs]
if not panfl_:
# default panflute behaviour:
search_dirs.append('.')
if data_dir:
search_dirs.append(get_filter_dir())
if sys_path:
search_dirs += sys.path
else:
# panfl/pandoctools behaviour:
if data_dir:
search_dirs.append(get_filter_dir())
if sys_path:
search_dirs += reduced_sys_path
# Display message (tests that everything is working ok)
msg = doc.get_metadata('panflute-echo', False)
if msg:
debug(msg)
if filters is None:
# metadata 'panflute-filters' can be a list, a string, or missing
# `filters` should be a list of str
filters = doc.get_metadata('panflute-filters', [])
if type(filters) != list:
filters = [filters]
if filters:
if verbose:
msg = "panflute: will run the following filters:"
debug(msg, ' '.join(filters))
doc = autorun_filters(filters, doc, search_dirs, verbose)
elif verbose:
debug("panflute: no filters were provided")
dump(doc, output_stream)
def main():
"""
Allows Panflute to be run as a command line executable
to be used as a Pandoc filter.
"""
stdio()
help_str = """Allows Panflute to be run as a command line executable:
* to be used in Pandoctools shell scripts as Pandoc filter with
multiple arguments (should have -t/--to option in this case):
`pandoc -t json | panfl -t markdown foo.bar | pandoc -f json`
* to be used as a Pandoc filter (in this case only one positional
argument is allowed of all options): `pandoc --filter panfl`
Filters may be set with or without .py extension.
It can be relative or absolutele paths to files or modules specs
like `foo.bar`.
MIND THAT Panflute temporarily prepends folder of the filter
(or relevant dir provided if module spec) TO THE `sys.path` before
importing the filter!
Search preserves directories order (except for --data-dir and
`sys.path`).
"""
@click.command(help=help_str)
@click.argument('filters', nargs=-1)
@click.option('-w', '-t', '--write', '--to', 'to', type=str, default=None,
help='Derivative of Pandoc writer option that Pandoc passes to filters.')
@click.option('--dir', '-d', 'search_dirs', multiple=True,
help="Search filters in provided directories: `-d dir1 -d dir2`.")
@click.option('--data-dir', is_flag=True, default=False,
help="Search filters in default user data directory listed in `pandoc --version` " +
"(in it's `filters` subfolder actually). It's appended to the search list.")
@click.option('--no-sys-path', 'sys_path', is_flag=True, default=True,
help="Disable search filters in python's `sys.path` (without '' and '.') " +
"that is appended to the search list.")
def autorun_filters(filters, doc, search_dirs, verbose):
"""
:param filters: list of str
:param doc: panflute.Doc
:param search_dirs: list of str
:param verbose: bool
:return: panflute.Doc
"""
def remove_py(s):
return s[:-3] if s.endswith('.py') else s
filter_paths = []
for filter_ in filters:
filter_exp = p.normpath(p.expanduser(p.expandvars(filter_)))
if filter_exp == remove_py(p.basename(filter_exp)).lstrip('.'):
# import .foo # is not supported
module = True
mod_path = filter_exp.replace('.', p.sep)
path_postfixes = (p.join(mod_path, '__init__.py'), mod_path + '.py')
else:
module = False
# allow with and without .py ending
path_postfixes = (remove_py(filter_exp) + '.py',)
for path, path_postf in [(path, path_postf)
for path in search_dirs
for path_postf in path_postfixes]:
if p.isabs(path_postf):
filter_path = path_postf
else:
filter_path = p.abspath(p.normpath(p.join(path, path_postf)))
if p.isfile(filter_path):
if verbose:
debug("panflute: filter <{}> found in {}".format(filter_, filter_path))
if module and not (path in reduced_sys_path):
extra_dir = p.abspath(path)
# `path` already doesn't contain `.`, `..`, env vars or `~`
else:
extra_dir = None
module_ = filter_exp if module else filter_path
filter_paths.append((filter_, filter_path, module_, extra_dir))
break
elif p.isabs(path_postf):
if verbose:
debug(" filter <{}> NOT found in {}".format(filter_, filter_path))
raise Exception("filter not found: " + filter_)
elif verbose:
debug(" filter <{}> NOT found in {}".format(filter_, filter_path))
else:
raise Exception("filter not found: " + filter_)
for filter_, filter_path, module_, extra_dir in filter_paths:
if verbose:
debug("panflute: running filter <{}>".format(filter_))
with ContextImport(module_, extra_dir) as module:
try:
module.main(doc)
except Exception as e:
debug("Failed to run filter: " + filter_)
if not hasattr(module, 'main'):
debug(' - Possible cause: filter lacks a main() function')
debug('Filter code:')
debug('-' * 64)
with open(filter_path) as fp:
debug(fp.read())
debug('-' * 64)
raise Exception(e)
if verbose:
debug("panflute: filter <{}> completed".format(filter_))
return doc
|
sergiocorreia/panflute | panflute/autofilter.py | autorun_filters | python | def autorun_filters(filters, doc, search_dirs, verbose):
def remove_py(s):
return s[:-3] if s.endswith('.py') else s
filter_paths = []
for filter_ in filters:
filter_exp = p.normpath(p.expanduser(p.expandvars(filter_)))
if filter_exp == remove_py(p.basename(filter_exp)).lstrip('.'):
# import .foo # is not supported
module = True
mod_path = filter_exp.replace('.', p.sep)
path_postfixes = (p.join(mod_path, '__init__.py'), mod_path + '.py')
else:
module = False
# allow with and without .py ending
path_postfixes = (remove_py(filter_exp) + '.py',)
for path, path_postf in [(path, path_postf)
for path in search_dirs
for path_postf in path_postfixes]:
if p.isabs(path_postf):
filter_path = path_postf
else:
filter_path = p.abspath(p.normpath(p.join(path, path_postf)))
if p.isfile(filter_path):
if verbose:
debug("panflute: filter <{}> found in {}".format(filter_, filter_path))
if module and not (path in reduced_sys_path):
extra_dir = p.abspath(path)
# `path` already doesn't contain `.`, `..`, env vars or `~`
else:
extra_dir = None
module_ = filter_exp if module else filter_path
filter_paths.append((filter_, filter_path, module_, extra_dir))
break
elif p.isabs(path_postf):
if verbose:
debug(" filter <{}> NOT found in {}".format(filter_, filter_path))
raise Exception("filter not found: " + filter_)
elif verbose:
debug(" filter <{}> NOT found in {}".format(filter_, filter_path))
else:
raise Exception("filter not found: " + filter_)
for filter_, filter_path, module_, extra_dir in filter_paths:
if verbose:
debug("panflute: running filter <{}>".format(filter_))
with ContextImport(module_, extra_dir) as module:
try:
module.main(doc)
except Exception as e:
debug("Failed to run filter: " + filter_)
if not hasattr(module, 'main'):
debug(' - Possible cause: filter lacks a main() function')
debug('Filter code:')
debug('-' * 64)
with open(filter_path) as fp:
debug(fp.read())
debug('-' * 64)
raise Exception(e)
if verbose:
debug("panflute: filter <{}> completed".format(filter_))
return doc | :param filters: list of str
:param doc: panflute.Doc
:param search_dirs: list of str
:param verbose: bool
:return: panflute.Doc | train | https://github.com/sergiocorreia/panflute/blob/65c2d570c26a190deb600cab5e2ad8a828a3302e/panflute/autofilter.py#L189-L263 | [
"def debug(*args, **kwargs):\n \"\"\"\n Same as print, but prints to ``stderr``\n (which is not intercepted by Pandoc).\n \"\"\"\n print(file=sys.stderr, *args, **kwargs)\n",
"def remove_py(s):\n return s[:-3] if s.endswith('.py') else s\n"
] | """
Allow Panflute to be run as a command line executable
to be used as a Pandoc filter or used in Pandoctools
shell scripts as Pandoc filter with arguments.
Exports ``main`` and ``panfl``.
"""
import os
import os.path as p
import sys
import click
from .io import load, dump
from .tools import debug
from .utils import ContextImport
reduced_sys_path = [dir_ for dir_ in sys.path if (dir_ not in ('', '.')) and p.isdir(dir_)]
def get_filter_dir(hardcoded=True):
if hardcoded:
if os.name == 'nt':
return p.join(os.environ["APPDATA"], "pandoc", "filters")
else:
return p.join(os.environ["HOME"], ".pandoc", "filters")
else:
from .tools import run_pandoc
# Extract $DATADIR
info = run_pandoc(args=['--version']).splitlines()
prefix = "Default user data directory: "
info = [row for row in info if row.startswith(prefix)]
assert len(info) == 1
data_dir = info[0][len(prefix):]
return p.normpath(p.expanduser(p.expandvars(p.join(data_dir, 'filters'))))
def stdio(filters=None, search_dirs=None, data_dir=True, sys_path=True, panfl_=False, input_stream=None, output_stream=None):
"""
Reads JSON from stdin and second CLI argument:
``sys.argv[1]``. Dumps JSON doc to the stdout.
:param filters: Union[List[str], None]
if None then read from metadata
:param search_dirs: Union[List[str], None]
if None then read from metadata
:param data_dir: bool
:param sys_path: bool
:param panfl_: bool
:param input_stream: io.StringIO or None
for debug purpose
:param output_stream: io.StringIO or None
for debug purpose
:return: None
"""
doc = load(input_stream)
# meta = doc.metadata # Local variable 'meta' value is not used
verbose = doc.get_metadata('panflute-verbose', False)
if search_dirs is None:
# metadata 'panflute-path' can be a list, a string, or missing
# `search_dirs` should be a list of str
search_dirs = doc.get_metadata('panflute-path', [])
if type(search_dirs) != list:
search_dirs = [search_dirs]
if '--data-dir' in search_dirs:
data_dir = True
if '--no-sys-path' in search_dirs:
sys_path = False
search_dirs = [dir_ for dir_ in search_dirs
if dir_ not in ('--data-dir', '--no-sys-path')]
if verbose:
debug('panflute: data_dir={} sys_path={}'.format(data_dir, sys_path))
search_dirs = [p.normpath(p.expanduser(p.expandvars(dir_))) for dir_ in search_dirs]
if not panfl_:
# default panflute behaviour:
search_dirs.append('.')
if data_dir:
search_dirs.append(get_filter_dir())
if sys_path:
search_dirs += sys.path
else:
# panfl/pandoctools behaviour:
if data_dir:
search_dirs.append(get_filter_dir())
if sys_path:
search_dirs += reduced_sys_path
# Display message (tests that everything is working ok)
msg = doc.get_metadata('panflute-echo', False)
if msg:
debug(msg)
if filters is None:
# metadata 'panflute-filters' can be a list, a string, or missing
# `filters` should be a list of str
filters = doc.get_metadata('panflute-filters', [])
if type(filters) != list:
filters = [filters]
if filters:
if verbose:
msg = "panflute: will run the following filters:"
debug(msg, ' '.join(filters))
doc = autorun_filters(filters, doc, search_dirs, verbose)
elif verbose:
debug("panflute: no filters were provided")
dump(doc, output_stream)
def main():
"""
Allows Panflute to be run as a command line executable
to be used as a Pandoc filter.
"""
stdio()
help_str = """Allows Panflute to be run as a command line executable:
* to be used in Pandoctools shell scripts as Pandoc filter with
multiple arguments (should have -t/--to option in this case):
`pandoc -t json | panfl -t markdown foo.bar | pandoc -f json`
* to be used as a Pandoc filter (in this case only one positional
argument is allowed of all options): `pandoc --filter panfl`
Filters may be set with or without .py extension.
It can be relative or absolutele paths to files or modules specs
like `foo.bar`.
MIND THAT Panflute temporarily prepends folder of the filter
(or relevant dir provided if module spec) TO THE `sys.path` before
importing the filter!
Search preserves directories order (except for --data-dir and
`sys.path`).
"""
@click.command(help=help_str)
@click.argument('filters', nargs=-1)
@click.option('-w', '-t', '--write', '--to', 'to', type=str, default=None,
help='Derivative of Pandoc writer option that Pandoc passes to filters.')
@click.option('--dir', '-d', 'search_dirs', multiple=True,
help="Search filters in provided directories: `-d dir1 -d dir2`.")
@click.option('--data-dir', is_flag=True, default=False,
help="Search filters in default user data directory listed in `pandoc --version` " +
"(in it's `filters` subfolder actually). It's appended to the search list.")
@click.option('--no-sys-path', 'sys_path', is_flag=True, default=True,
help="Disable search filters in python's `sys.path` (without '' and '.') " +
"that is appended to the search list.")
def panfl(filters, to, search_dirs, data_dir, sys_path):
"""
Allows Panflute to be run as a command line executable:
* to be used in Pandoctools shell scripts as Pandoc filter with
multiple arguments (should have -t/--to option in this case):
``pandoc -t json | panfl -t markdown foo.bar | pandoc -f json``
* to be used as a Pandoc filter (in this case only one positional
argument is allowed of all options):
``pandoc --filter panfl``
MIND THAT Panflute temporarily prepends folder of the filter
(or relevant dir provided if module spec) TO THE `sys.path` before
importing the filter!
"""
if to is None:
if (len(filters) > 1) or search_dirs or not sys_path or data_dir:
raise ValueError('When no `--to` option then Pandoc filter mode assumed and ' +
'only one positional argument is allowed of all options.')
else:
filters, search_dirs = None, None
sys_path, data_dir = True, False
else:
filters, search_dirs = list(filters), list(search_dirs)
# `load()` in `stdio()` needs `to` in the 2nd arg
sys.argv[1:] = []
sys.argv.append(to)
stdio(filters, search_dirs, data_dir, sys_path, panfl_=True)
|
google/budou | budou/parser.py | get_parser | python | def get_parser(segmenter, **options):
if segmenter == 'nlapi':
return NLAPIParser(**options)
elif segmenter == 'mecab':
return MecabParser()
elif segmenter == 'tinysegmenter':
return TinysegmenterParser()
else:
raise ValueError('Segmenter {} is not supported.'.format(segmenter)) | Gets a parser.
Args:
segmenter (str): Segmenter to use.
options (:obj:`dict`, optional): Optional settings.
Returns:
Parser (:obj:`budou.parser.Parser`)
Raises:
ValueError: If unsupported segmenter is specified. | train | https://github.com/google/budou/blob/101224e6523186851f38ee57a6b2e7bdbd826de2/budou/parser.py#L129-L149 | null | # -*- coding: utf-8 -*-
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Parser modules.
Parser modules are equipped with :code:`parse` method and it processes the
input text into a list of chunks and an organized HTML snippet.
Examples:
.. code-block:: python
import budou
parser = budou.get_parser('nlapi')
results = parser.parse('Google Home を使った。', classname='w')
print(results['html_code'])
# <span>Google <span class="w">Home を</span>
# <span class="w">使った。</span></span>
chunks = results['chunks']
print(chunks[1].word) # Home を
"""
from abc import ABCMeta
import re
from xml.etree import ElementTree as ET
import six
import html5lib
from .nlapisegmenter import NLAPISegmenter
from .mecabsegmenter import MecabSegmenter
from .tinysegmentersegmenter import TinysegmenterSegmenter
DEFAULT_CLASS_NAME = 'ww'
@six.add_metaclass(ABCMeta)
class Parser:
"""Abstract parser class:
Attributes:
segmenter(:obj:`budou.segmenter.Segmenter`): Segmenter module.
"""
def __init__(self):
self.segmenter = None
def parse(self, source, language=None, classname=None, max_length=None,
attributes=None):
"""Parses the source sentence to output organized HTML code.
Args:
source (str): Source sentence to process.
language (:obj:`str`, optional): Language code.
max_length (:obj:`int`, optional): Maximum length of a chunk.
attributes (:obj:`dict`, optional): Attributes for output SPAN tags.
Returns:
A dictionary containing :code:`chunks` (:obj:`budou.chunk.ChunkList`)
and :code:`html_code` (:obj:`str`).
"""
attributes = parse_attributes(attributes, classname)
source = preprocess(source)
chunks = self.segmenter.segment(source, language)
html_code = chunks.html_serialize(attributes, max_length=max_length)
return {
'chunks': chunks,
'html_code': html_code,
}
class NLAPIParser(Parser):
"""Parser built on Cloud Language API Segmenter
(:obj:`budou.nlapisegmenter.NLAPISegmenter`).
Args:
cache_filename (:obj:`string`, optional): the path to the cache file.
credentials_path (:obj:`string`, optional): the path to the service
account's credentials file.
Attributes:
segmenter(:obj:`budou.nlapisegmenter.NLAPISegmenter`): Segmenter module.
"""
def __init__(self, **options):
self.segmenter = NLAPISegmenter(
cache_filename=options.get('cache_filename', None),
credentials_path=options.get('credentials_path', None),
use_entity=options.get('use_entity', False),
use_cache=options.get('use_cache', True))
class MecabParser(Parser):
"""Parser built on Mecab Segmenter
(:obj:`budou.mecabsegmenter.MecabSegmenter`).
Attributes:
segmenter(:obj:`budou.mecabsegmenter.MecabSegmenter`): Segmenter module.
"""
def __init__(self):
self.segmenter = MecabSegmenter()
class TinysegmenterParser(Parser):
"""Parser built on TinySegmenter Segmenter
(:obj:`budou.tinysegmentersegmenter.TinysegmenterSegmenter`).
Attributes:
segmenter(:obj:`budou.tinysegmentersegmenter.TinysegmenterSegmenter`):
Segmenter module.
"""
def __init__(self):
self.segmenter = TinysegmenterSegmenter()
def parse_attributes(attributes=None, classname=None):
"""Parses attributes,
Args:
attributes (dict): Input attributes.
classname (:obj:`str`, optional): Class name of output SPAN tags.
Returns:
Parsed attributes. (dict)
"""
if not attributes:
attributes = {}
attributes.setdefault('class', DEFAULT_CLASS_NAME)
# If `classname` is specified, it overwrites `class` property in `attributes`.
if classname:
attributes['class'] = classname
return attributes
def preprocess(source):
"""Removes unnecessary break lines and white spaces.
Args:
source (str): Input sentence.
Returns:
Preprocessed sentence. (str)
"""
doc = html5lib.parseFragment(source)
source = ET.tostring(doc, encoding='utf-8', method='text').decode('utf-8')
source = source.replace(u'\n', u'').strip()
source = re.sub(r'\s\s+', u' ', source)
return source
|
google/budou | budou/parser.py | parse_attributes | python | def parse_attributes(attributes=None, classname=None):
if not attributes:
attributes = {}
attributes.setdefault('class', DEFAULT_CLASS_NAME)
# If `classname` is specified, it overwrites `class` property in `attributes`.
if classname:
attributes['class'] = classname
return attributes | Parses attributes,
Args:
attributes (dict): Input attributes.
classname (:obj:`str`, optional): Class name of output SPAN tags.
Returns:
Parsed attributes. (dict) | train | https://github.com/google/budou/blob/101224e6523186851f38ee57a6b2e7bdbd826de2/budou/parser.py#L151-L167 | null | # -*- coding: utf-8 -*-
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Parser modules.
Parser modules are equipped with :code:`parse` method and it processes the
input text into a list of chunks and an organized HTML snippet.
Examples:
.. code-block:: python
import budou
parser = budou.get_parser('nlapi')
results = parser.parse('Google Home を使った。', classname='w')
print(results['html_code'])
# <span>Google <span class="w">Home を</span>
# <span class="w">使った。</span></span>
chunks = results['chunks']
print(chunks[1].word) # Home を
"""
from abc import ABCMeta
import re
from xml.etree import ElementTree as ET
import six
import html5lib
from .nlapisegmenter import NLAPISegmenter
from .mecabsegmenter import MecabSegmenter
from .tinysegmentersegmenter import TinysegmenterSegmenter
DEFAULT_CLASS_NAME = 'ww'
@six.add_metaclass(ABCMeta)
class Parser:
"""Abstract parser class:
Attributes:
segmenter(:obj:`budou.segmenter.Segmenter`): Segmenter module.
"""
def __init__(self):
self.segmenter = None
def parse(self, source, language=None, classname=None, max_length=None,
attributes=None):
"""Parses the source sentence to output organized HTML code.
Args:
source (str): Source sentence to process.
language (:obj:`str`, optional): Language code.
max_length (:obj:`int`, optional): Maximum length of a chunk.
attributes (:obj:`dict`, optional): Attributes for output SPAN tags.
Returns:
A dictionary containing :code:`chunks` (:obj:`budou.chunk.ChunkList`)
and :code:`html_code` (:obj:`str`).
"""
attributes = parse_attributes(attributes, classname)
source = preprocess(source)
chunks = self.segmenter.segment(source, language)
html_code = chunks.html_serialize(attributes, max_length=max_length)
return {
'chunks': chunks,
'html_code': html_code,
}
class NLAPIParser(Parser):
"""Parser built on Cloud Language API Segmenter
(:obj:`budou.nlapisegmenter.NLAPISegmenter`).
Args:
cache_filename (:obj:`string`, optional): the path to the cache file.
credentials_path (:obj:`string`, optional): the path to the service
account's credentials file.
Attributes:
segmenter(:obj:`budou.nlapisegmenter.NLAPISegmenter`): Segmenter module.
"""
def __init__(self, **options):
self.segmenter = NLAPISegmenter(
cache_filename=options.get('cache_filename', None),
credentials_path=options.get('credentials_path', None),
use_entity=options.get('use_entity', False),
use_cache=options.get('use_cache', True))
class MecabParser(Parser):
"""Parser built on Mecab Segmenter
(:obj:`budou.mecabsegmenter.MecabSegmenter`).
Attributes:
segmenter(:obj:`budou.mecabsegmenter.MecabSegmenter`): Segmenter module.
"""
def __init__(self):
self.segmenter = MecabSegmenter()
class TinysegmenterParser(Parser):
"""Parser built on TinySegmenter Segmenter
(:obj:`budou.tinysegmentersegmenter.TinysegmenterSegmenter`).
Attributes:
segmenter(:obj:`budou.tinysegmentersegmenter.TinysegmenterSegmenter`):
Segmenter module.
"""
def __init__(self):
self.segmenter = TinysegmenterSegmenter()
def get_parser(segmenter, **options):
"""Gets a parser.
Args:
segmenter (str): Segmenter to use.
options (:obj:`dict`, optional): Optional settings.
Returns:
Parser (:obj:`budou.parser.Parser`)
Raises:
ValueError: If unsupported segmenter is specified.
"""
if segmenter == 'nlapi':
return NLAPIParser(**options)
elif segmenter == 'mecab':
return MecabParser()
elif segmenter == 'tinysegmenter':
return TinysegmenterParser()
else:
raise ValueError('Segmenter {} is not supported.'.format(segmenter))
def preprocess(source):
"""Removes unnecessary break lines and white spaces.
Args:
source (str): Input sentence.
Returns:
Preprocessed sentence. (str)
"""
doc = html5lib.parseFragment(source)
source = ET.tostring(doc, encoding='utf-8', method='text').decode('utf-8')
source = source.replace(u'\n', u'').strip()
source = re.sub(r'\s\s+', u' ', source)
return source
|
google/budou | budou/parser.py | preprocess | python | def preprocess(source):
doc = html5lib.parseFragment(source)
source = ET.tostring(doc, encoding='utf-8', method='text').decode('utf-8')
source = source.replace(u'\n', u'').strip()
source = re.sub(r'\s\s+', u' ', source)
return source | Removes unnecessary break lines and white spaces.
Args:
source (str): Input sentence.
Returns:
Preprocessed sentence. (str) | train | https://github.com/google/budou/blob/101224e6523186851f38ee57a6b2e7bdbd826de2/budou/parser.py#L169-L182 | null | # -*- coding: utf-8 -*-
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Parser modules.
Parser modules are equipped with :code:`parse` method and it processes the
input text into a list of chunks and an organized HTML snippet.
Examples:
.. code-block:: python
import budou
parser = budou.get_parser('nlapi')
results = parser.parse('Google Home を使った。', classname='w')
print(results['html_code'])
# <span>Google <span class="w">Home を</span>
# <span class="w">使った。</span></span>
chunks = results['chunks']
print(chunks[1].word) # Home を
"""
from abc import ABCMeta
import re
from xml.etree import ElementTree as ET
import six
import html5lib
from .nlapisegmenter import NLAPISegmenter
from .mecabsegmenter import MecabSegmenter
from .tinysegmentersegmenter import TinysegmenterSegmenter
DEFAULT_CLASS_NAME = 'ww'
@six.add_metaclass(ABCMeta)
class Parser:
"""Abstract parser class:
Attributes:
segmenter(:obj:`budou.segmenter.Segmenter`): Segmenter module.
"""
def __init__(self):
self.segmenter = None
def parse(self, source, language=None, classname=None, max_length=None,
attributes=None):
"""Parses the source sentence to output organized HTML code.
Args:
source (str): Source sentence to process.
language (:obj:`str`, optional): Language code.
max_length (:obj:`int`, optional): Maximum length of a chunk.
attributes (:obj:`dict`, optional): Attributes for output SPAN tags.
Returns:
A dictionary containing :code:`chunks` (:obj:`budou.chunk.ChunkList`)
and :code:`html_code` (:obj:`str`).
"""
attributes = parse_attributes(attributes, classname)
source = preprocess(source)
chunks = self.segmenter.segment(source, language)
html_code = chunks.html_serialize(attributes, max_length=max_length)
return {
'chunks': chunks,
'html_code': html_code,
}
class NLAPIParser(Parser):
"""Parser built on Cloud Language API Segmenter
(:obj:`budou.nlapisegmenter.NLAPISegmenter`).
Args:
cache_filename (:obj:`string`, optional): the path to the cache file.
credentials_path (:obj:`string`, optional): the path to the service
account's credentials file.
Attributes:
segmenter(:obj:`budou.nlapisegmenter.NLAPISegmenter`): Segmenter module.
"""
def __init__(self, **options):
self.segmenter = NLAPISegmenter(
cache_filename=options.get('cache_filename', None),
credentials_path=options.get('credentials_path', None),
use_entity=options.get('use_entity', False),
use_cache=options.get('use_cache', True))
class MecabParser(Parser):
"""Parser built on Mecab Segmenter
(:obj:`budou.mecabsegmenter.MecabSegmenter`).
Attributes:
segmenter(:obj:`budou.mecabsegmenter.MecabSegmenter`): Segmenter module.
"""
def __init__(self):
self.segmenter = MecabSegmenter()
class TinysegmenterParser(Parser):
"""Parser built on TinySegmenter Segmenter
(:obj:`budou.tinysegmentersegmenter.TinysegmenterSegmenter`).
Attributes:
segmenter(:obj:`budou.tinysegmentersegmenter.TinysegmenterSegmenter`):
Segmenter module.
"""
def __init__(self):
self.segmenter = TinysegmenterSegmenter()
def get_parser(segmenter, **options):
"""Gets a parser.
Args:
segmenter (str): Segmenter to use.
options (:obj:`dict`, optional): Optional settings.
Returns:
Parser (:obj:`budou.parser.Parser`)
Raises:
ValueError: If unsupported segmenter is specified.
"""
if segmenter == 'nlapi':
return NLAPIParser(**options)
elif segmenter == 'mecab':
return MecabParser()
elif segmenter == 'tinysegmenter':
return TinysegmenterParser()
else:
raise ValueError('Segmenter {} is not supported.'.format(segmenter))
def parse_attributes(attributes=None, classname=None):
"""Parses attributes,
Args:
attributes (dict): Input attributes.
classname (:obj:`str`, optional): Class name of output SPAN tags.
Returns:
Parsed attributes. (dict)
"""
if not attributes:
attributes = {}
attributes.setdefault('class', DEFAULT_CLASS_NAME)
# If `classname` is specified, it overwrites `class` property in `attributes`.
if classname:
attributes['class'] = classname
return attributes
|
google/budou | budou/tinysegmentersegmenter.py | TinysegmenterSegmenter.segment | python | def segment(self, source, language=None):
if language and not language in self.supported_languages:
raise ValueError(
'Language {} is not supported by NLAPI segmenter'.format(language))
chunks = ChunkList()
results = tinysegmenter.tokenize(source)
seek = 0
for word in results:
word = word.strip()
if not word:
continue
if source[seek: seek + len(word)] != word:
assert source[seek] == ' '
assert source[seek + 1: seek + len(word) + 1] == word
chunks.append(Chunk.space())
seek += 1
dependency = None
if word in _PARTICLES or word in _AUX_VERBS or is_hiragana(word):
dependency = False
chunk = Chunk(word, dependency=dependency)
if chunk.is_punct():
chunk.dependency = chunk.is_open_punct()
chunks.append(chunk)
seek += len(word)
chunks.resolve_dependencies()
return chunks | Returns a chunk list from the given sentence.
Args:
source (str): Source string to segment.
language (:obj:`str`, optional): A language code.
Returns:
A chunk list. (:obj:`budou.chunk.ChunkList`)
Raises:
ValueError: If :obj:`language` is given and it is not included in
:obj:`supported_languages`. | train | https://github.com/google/budou/blob/101224e6523186851f38ee57a6b2e7bdbd826de2/budou/tinysegmentersegmenter.py#L68-L109 | [
"def is_hiragana(word):\n \"\"\"Checks is the word is a Japanese hiragana.\n\n This is using the unicode codepoint range for hiragana.\n https://en.wikipedia.org/wiki/Hiragana_(Unicode_block)\n\n Args:\n word (str): A word.\n\n Returns:\n bool: True if the word is a hiragana.\n \"\"\"\n return len(word... | class TinysegmenterSegmenter(Segmenter):
"""TinySegmenter based Segmenter.
Attributes:
supported_languages (list of str): List of supported languages' codes.
"""
supported_languages = {'ja'}
|
google/budou | budou/mecabsegmenter.py | MecabSegmenter.segment | python | def segment(self, source, language=None):
if language and not language in self.supported_languages:
raise ValueError(
'Language {} is not supported by MeCab segmenter'.format(language))
chunks = ChunkList()
seek = 0
source_str = source.encode('utf-8') if six.PY2 else source
results = self.tagger.parse(source_str).split('\n')[:-2]
for row in results:
if six.PY2:
row = row.decode('utf-8')
token = row.split('\t')
word = token[0]
labels = token[3].split('-')
pos = labels[0]
label = labels[1] if len(labels) > 1 else None
if source[seek: seek + len(word)] != word:
assert source[seek] == ' '
assert source[seek + 1: seek + len(word) + 1] == word
chunks.append(Chunk.space())
seek += 1
dependency = None
if pos in _DEPENDENT_POS_FORWARD:
dependency = True
elif pos in _DEPENDENT_POS_BACKWARD:
dependency = False
elif label in _DEPENDENT_LABEL_FORWARD:
dependency = True
elif label in _DEPENDENT_LABEL_BACKWARD:
dependency = False
chunk = Chunk(word, pos=pos, label=label, dependency=dependency)
if chunk.is_punct():
chunk.dependency = chunk.is_open_punct()
chunks.append(chunk)
seek += len(word)
chunks.resolve_dependencies()
return chunks | Returns a chunk list from the given sentence.
Args:
source (str): Source string to segment.
language (:obj:`str`, optional): A language code.
Returns:
A chunk list. (:obj:`budou.chunk.ChunkList`)
Raises:
ValueError: If :obj:`language` is given and it is not included in
:obj:`supported_languages`. | train | https://github.com/google/budou/blob/101224e6523186851f38ee57a6b2e7bdbd826de2/budou/mecabsegmenter.py#L59-L111 | [
"def space(cls):\n \"\"\"Creates space Chunk.\n\n Returns:\n A chunk (:obj:`budou.chunk.Chunk`)\n \"\"\"\n chunk = cls(u' ', cls._SPACE_POS)\n return chunk\n",
"def is_punct(self):\n \"\"\"Whether the chunk is a punctuation mark.\n\n See also https://en.wikipedia.org/wiki/Unicode_character_property\n\n ... | class MecabSegmenter(Segmenter):
"""MeCab Segmenter.
Attributes:
tagger (MeCab.Tagger): MeCab Tagger to parse the input sentence.
supported_languages (list of str): List of supported languages' codes.
"""
supported_languages = {'ja'}
def __init__(self):
try:
import MeCab
self.tagger = MeCab.Tagger('-Ochasen')
except ImportError:
logging.error(
('mecab-python3 is not installed. Install the module by running '
'`$ pip install mecab-python3`. If MeCab is not installed in your '
'system yet, run `$ make install-mecab` instead.'))
sys.exit(1)
|
google/budou | budou/budou.py | main | python | def main():
args = docopt(__doc__)
if args['--version']:
print(__version__)
sys.exit()
result = parse(
args['<source>'],
segmenter=args['--segmenter'],
language=args['--language'],
classname=args['--classname'])
print(result['html_code'])
sys.exit() | Budou main method for the command line tool. | train | https://github.com/google/budou/blob/101224e6523186851f38ee57a6b2e7bdbd826de2/budou/budou.py#L48-L62 | [
"def parse(source, segmenter='nlapi', language=None, max_length=None,\n classname=None, attributes=None, **kwargs):\n \"\"\"Parses input source.\n\n Args:\n source (str): Input source to process.\n segmenter (:obj:`str`, optional): Segmenter to use [default: nlapi].\n language (:obj:`str`, optio... | # -*- coding: utf-8 -*-
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Budou: an automatic organizer tool for beautiful line breaking in CJK
Usage:
budou <source> [--segmenter=<seg>] [--language=<lang>] [--classname=<class>]
budou -h | --help
budou -v | --version
Options:
-h --help Show this screen.
-v --version Show version.
--segmenter=<segmenter> Segmenter to use [default: nlapi].
--language=<language> Language the source in.
--classname=<classname> Class name for output SPAN tags.
Use comma-separated value to specify multiple
classes.
"""
from __future__ import print_function
import sys
import warnings
from docopt import docopt
from .parser import get_parser
from .__version__ import __version__
AVAILABLE_SEGMENTERS = {'nlapi', 'mecab'}
def parse(source, segmenter='nlapi', language=None, max_length=None,
classname=None, attributes=None, **kwargs):
"""Parses input source.
Args:
source (str): Input source to process.
segmenter (:obj:`str`, optional): Segmenter to use [default: nlapi].
language (:obj:`str`, optional): Language code.
max_length (:obj:`int`, optional): Maximum length of a chunk.
classname (:obj:`str`, optional): Class name of output SPAN tags.
attributes (:obj:`dict`, optional): Attributes for output SPAN tags.
Returns:
Results in a dict. :code:`chunks` holds a list of chunks
(:obj:`budou.chunk.ChunkList`) and :code:`html_code` holds the output HTML
code.
"""
parser = get_parser(segmenter, **kwargs)
return parser.parse(
source, language=language, max_length=max_length, classname=classname,
attributes=attributes)
def authenticate(json_path=None):
"""Gets a Natural Language API parser by authenticating the API.
**This method is deprecated.** Please use :obj:`budou.get_parser` to obtain a
parser instead.
Args:
json_path (:obj:`str`, optional): The file path to the service account's
credentials.
Returns:
Parser. (:obj:`budou.parser.NLAPIParser`)
"""
msg = ('budou.authentication() is deprecated. '
'Please use budou.get_parser() to obtain a parser instead.')
warnings.warn(msg, DeprecationWarning)
parser = get_parser('nlapi', credentials_path=json_path)
return parser
if __name__ == '__main__':
main()
|
google/budou | budou/budou.py | parse | python | def parse(source, segmenter='nlapi', language=None, max_length=None,
classname=None, attributes=None, **kwargs):
parser = get_parser(segmenter, **kwargs)
return parser.parse(
source, language=language, max_length=max_length, classname=classname,
attributes=attributes) | Parses input source.
Args:
source (str): Input source to process.
segmenter (:obj:`str`, optional): Segmenter to use [default: nlapi].
language (:obj:`str`, optional): Language code.
max_length (:obj:`int`, optional): Maximum length of a chunk.
classname (:obj:`str`, optional): Class name of output SPAN tags.
attributes (:obj:`dict`, optional): Attributes for output SPAN tags.
Returns:
Results in a dict. :code:`chunks` holds a list of chunks
(:obj:`budou.chunk.ChunkList`) and :code:`html_code` holds the output HTML
code. | train | https://github.com/google/budou/blob/101224e6523186851f38ee57a6b2e7bdbd826de2/budou/budou.py#L64-L84 | [
"def get_parser(segmenter, **options):\n \"\"\"Gets a parser.\n\n Args:\n segmenter (str): Segmenter to use.\n options (:obj:`dict`, optional): Optional settings.\n\n Returns:\n Parser (:obj:`budou.parser.Parser`)\n\n Raises:\n ValueError: If unsupported segmenter is specified.\n \"\"\"\n if segme... | # -*- coding: utf-8 -*-
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Budou: an automatic organizer tool for beautiful line breaking in CJK
Usage:
budou <source> [--segmenter=<seg>] [--language=<lang>] [--classname=<class>]
budou -h | --help
budou -v | --version
Options:
-h --help Show this screen.
-v --version Show version.
--segmenter=<segmenter> Segmenter to use [default: nlapi].
--language=<language> Language the source in.
--classname=<classname> Class name for output SPAN tags.
Use comma-separated value to specify multiple
classes.
"""
from __future__ import print_function
import sys
import warnings
from docopt import docopt
from .parser import get_parser
from .__version__ import __version__
AVAILABLE_SEGMENTERS = {'nlapi', 'mecab'}
def main():
"""Budou main method for the command line tool.
"""
args = docopt(__doc__)
if args['--version']:
print(__version__)
sys.exit()
result = parse(
args['<source>'],
segmenter=args['--segmenter'],
language=args['--language'],
classname=args['--classname'])
print(result['html_code'])
sys.exit()
def authenticate(json_path=None):
"""Gets a Natural Language API parser by authenticating the API.
**This method is deprecated.** Please use :obj:`budou.get_parser` to obtain a
parser instead.
Args:
json_path (:obj:`str`, optional): The file path to the service account's
credentials.
Returns:
Parser. (:obj:`budou.parser.NLAPIParser`)
"""
msg = ('budou.authentication() is deprecated. '
'Please use budou.get_parser() to obtain a parser instead.')
warnings.warn(msg, DeprecationWarning)
parser = get_parser('nlapi', credentials_path=json_path)
return parser
if __name__ == '__main__':
main()
|
google/budou | budou/budou.py | authenticate | python | def authenticate(json_path=None):
msg = ('budou.authentication() is deprecated. '
'Please use budou.get_parser() to obtain a parser instead.')
warnings.warn(msg, DeprecationWarning)
parser = get_parser('nlapi', credentials_path=json_path)
return parser | Gets a Natural Language API parser by authenticating the API.
**This method is deprecated.** Please use :obj:`budou.get_parser` to obtain a
parser instead.
Args:
json_path (:obj:`str`, optional): The file path to the service account's
credentials.
Returns:
Parser. (:obj:`budou.parser.NLAPIParser`) | train | https://github.com/google/budou/blob/101224e6523186851f38ee57a6b2e7bdbd826de2/budou/budou.py#L86-L104 | [
"def get_parser(segmenter, **options):\n \"\"\"Gets a parser.\n\n Args:\n segmenter (str): Segmenter to use.\n options (:obj:`dict`, optional): Optional settings.\n\n Returns:\n Parser (:obj:`budou.parser.Parser`)\n\n Raises:\n ValueError: If unsupported segmenter is specified.\n \"\"\"\n if segme... | # -*- coding: utf-8 -*-
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Budou: an automatic organizer tool for beautiful line breaking in CJK
Usage:
budou <source> [--segmenter=<seg>] [--language=<lang>] [--classname=<class>]
budou -h | --help
budou -v | --version
Options:
-h --help Show this screen.
-v --version Show version.
--segmenter=<segmenter> Segmenter to use [default: nlapi].
--language=<language> Language the source in.
--classname=<classname> Class name for output SPAN tags.
Use comma-separated value to specify multiple
classes.
"""
from __future__ import print_function
import sys
import warnings
from docopt import docopt
from .parser import get_parser
from .__version__ import __version__
AVAILABLE_SEGMENTERS = {'nlapi', 'mecab'}
def main():
"""Budou main method for the command line tool.
"""
args = docopt(__doc__)
if args['--version']:
print(__version__)
sys.exit()
result = parse(
args['<source>'],
segmenter=args['--segmenter'],
language=args['--language'],
classname=args['--classname'])
print(result['html_code'])
sys.exit()
def parse(source, segmenter='nlapi', language=None, max_length=None,
classname=None, attributes=None, **kwargs):
"""Parses input source.
Args:
source (str): Input source to process.
segmenter (:obj:`str`, optional): Segmenter to use [default: nlapi].
language (:obj:`str`, optional): Language code.
max_length (:obj:`int`, optional): Maximum length of a chunk.
classname (:obj:`str`, optional): Class name of output SPAN tags.
attributes (:obj:`dict`, optional): Attributes for output SPAN tags.
Returns:
Results in a dict. :code:`chunks` holds a list of chunks
(:obj:`budou.chunk.ChunkList`) and :code:`html_code` holds the output HTML
code.
"""
parser = get_parser(segmenter, **kwargs)
return parser.parse(
source, language=language, max_length=max_length, classname=classname,
attributes=attributes)
if __name__ == '__main__':
main()
|
google/budou | budou/nlapisegmenter.py | _memorize | python | def _memorize(func):
def _wrapper(self, *args, **kwargs):
"""Wrapper to cache the function's output.
"""
if self.use_cache:
cache = load_cache(self.cache_filename)
original_key = ':'.join([
self.__class__.__name__,
func.__name__,
'_'.join([str(a) for a in args]),
'_'.join([str(w) for w in kwargs.values()])])
cache_key = hashlib.md5(original_key.encode('utf-8')).hexdigest()
cached_val = cache.get(cache_key)
if cached_val:
return cached_val
val = func(self, *args, **kwargs)
if self.use_cache:
cache.set(cache_key, val)
return val
return _wrapper | Decorator to cache the given function's output. | train | https://github.com/google/budou/blob/101224e6523186851f38ee57a6b2e7bdbd826de2/budou/nlapisegmenter.py#L62-L84 | null | # -*- coding: utf-8 -*-
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Natural Language API based Segmenter.
Word segmenter module powered by
`Cloud Natural Language API <https://cloud.google.com/natural-language/>`_.
You need to enable the API in your Google Cloud Platform project before you
use this module.
Example:
Once you enabled the API, download a service account's credentials and set
as `GOOGLE_APPLICATION_CREDENTIALS` environment variable.
.. code-block:: bash
$ export GOOGLE_APPLICATION_CREDENTIALS='/path/to/credentials.json'
Alternatively, you can also pass the path to your credentials file to the
module.
.. code-block:: python
segmenter = budou.segmenter.NLAPISegmenter(
credentials_path='/path/to/credentials.json')
This module is equipped with caching system not to make multiple requests for
the same source sentence because making request to the API may incur costs.
The caching system is provided by `budou.cachefactory`, and a proper caching
system is chosen to be used based on the environment.
"""
from __future__ import unicode_literals
from builtins import str
import logging
import hashlib
from .segmenter import Segmenter
from .cachefactory import load_cache
from .chunk import Chunk, ChunkList
_DEPENDENT_LABEL = (
'P', 'SNUM', 'PRT', 'AUX', 'SUFF', 'AUXPASS', 'RDROP', 'NUMBER', 'NUM',
'PREF')
""" list of str: Labels dependent to other parts.
"""
class NLAPISegmenter(Segmenter):
"""Natural Language API Segmenter.
Attributes:
service: A resource object for interacting with Cloud Natural Language API.
cache_filename (str): File path to the cache file.
supported_languages (list of str): List of supported languages' codes.
Args:
cache_filename (:obj:`str`, optional): File path to the pickle file for
caching. The file is created automatically if not exist. If the
environment is Google App Engine Standard Environment and memcache
service is available, it is used for caching and the pickle file
won't be generated.
credentials_path (:obj:`str`, optional): File path to the service
account's credentials file. If no file path is specified, it tries
to authenticate with default credentials.
use_entity (:obj:`bool`, optional): Whether to use entity analysis
results to wrap entity names in the output.
use_cache (:obj:`bool`, optional): Whether to use a cache system.
"""
supported_languages = {'ja', 'ko', 'zh', 'zh-TW', 'zh-CN', 'zh-HK', 'zh-Hant'}
def __init__(self, cache_filename, credentials_path, use_entity, use_cache):
self.cache_filename = cache_filename
self.credentials_path = credentials_path
self.use_entity = use_entity
self.use_cache = use_cache
self._authenticate()
def _authenticate(self):
import google_auth_httplib2
import googleapiclient.discovery
scope = ['https://www.googleapis.com/auth/cloud-platform']
if self.credentials_path:
try:
from google.oauth2 import service_account
credentials = service_account.Credentials.from_service_account_file(
self.credentials_path)
scoped_credentials = credentials.with_scopes(scope)
except ImportError:
logging.error('Failed to load google.oauth2.service_account module. '
'If you are running this script in Google App Engine '
'environment, you can initialize the segmenter with '
'default credentials.')
else:
import google.auth
scoped_credentials, _ = google.auth.default(scope)
authed_http = google_auth_httplib2.AuthorizedHttp(scoped_credentials)
service = googleapiclient.discovery.build(
'language', 'v1beta2', http=authed_http)
self.service = service
def segment(self, source, language=None):
"""Returns a chunk list from the given sentence.
Args:
source (str): Source string to segment.
language (:obj:`str`, optional): A language code.
Returns:
A chunk list. (:obj:`budou.chunk.ChunkList`)
Raises:
ValueError: If :obj:`language` is given and it is not included in
:obj:`supported_languages`.
"""
if language and not language in self.supported_languages:
raise ValueError(
'Language {} is not supported by NLAPI segmenter'.format(language))
chunks, language = self._get_source_chunks(source, language=language)
if self.use_entity:
entities = self._get_entities(source, language=language)
chunks = self._group_chunks_by_entities(chunks, entities)
chunks.resolve_dependencies()
return chunks
def _get_source_chunks(self, input_text, language=None):
"""Returns a chunk list retrieved from Syntax Analysis results.
Args:
input_text (str): Text to annotate.
language (:obj:`str`, optional): Language of the text.
Returns:
A chunk list. (:obj:`budou.chunk.ChunkList`)
"""
chunks = ChunkList()
seek = 0
result = self._get_annotations(input_text, language=language)
tokens = result['tokens']
language = result['language']
for i, token in enumerate(tokens):
word = token['text']['content']
begin_offset = token['text']['beginOffset']
label = token['dependencyEdge']['label']
pos = token['partOfSpeech']['tag']
if begin_offset > seek:
chunks.append(Chunk.space())
seek = begin_offset
chunk = Chunk(word, pos, label)
if chunk.label in _DEPENDENT_LABEL:
# Determining concatenating direction based on syntax dependency.
chunk.dependency = i < token['dependencyEdge']['headTokenIndex']
if chunk.is_punct():
chunk.dependency = chunk.is_open_punct()
chunks.append(chunk)
seek += len(word)
return chunks, language
def _group_chunks_by_entities(self, chunks, entities):
"""Groups chunks by entities retrieved from NL API Entity Analysis.
Args:
chunks (:obj:`budou.chunk.ChunkList`): List of chunks to be processed.
entities (:obj:`list` of :obj:`dict`): List of entities.
Returns:
A chunk list. (:obj:`budou.chunk.ChunkList`)
"""
for entity in entities:
chunks_to_concat = chunks.get_overlaps(
entity['beginOffset'], len(entity['content']))
if not chunks_to_concat:
continue
new_chunk_word = u''.join([chunk.word for chunk in chunks_to_concat])
new_chunk = Chunk(new_chunk_word)
chunks.swap(chunks_to_concat, new_chunk)
return chunks
@_memorize
def _get_annotations(self, text, language=''):
"""Returns the list of annotations retrieved from the given text.
Args:
text (str): Input text.
language (:obj:`str`, optional): Language code.
Returns:
Results in a dictionary. :code:`tokens` contains the list of annotations
and :code:`language` contains the inferred language from the input.
"""
body = {
'document': {
'type': 'PLAIN_TEXT',
'content': text,
},
'features': {
'extract_syntax': True,
},
'encodingType': 'UTF32',
}
if language:
body['document']['language'] = language
request = self.service.documents().annotateText(body=body)
response = request.execute()
tokens = response.get('tokens', [])
language = response.get('language')
return {'tokens': tokens, 'language': language}
@_memorize
def _get_entities(self, text, language=''):
"""Returns the list of entities retrieved from the given text.
Args:
text (str): Input text.
language (:obj:`str`, optional): Language code.
Returns:
List of entities.
"""
body = {
'document': {
'type': 'PLAIN_TEXT',
'content': text,
},
'encodingType': 'UTF32',
}
if language:
body['document']['language'] = language
request = self.service.documents().analyzeEntities(body=body)
response = request.execute()
result = []
for entity in response.get('entities', []):
mentions = entity.get('mentions', [])
if not mentions:
continue
entity_text = mentions[0]['text']
offset = entity_text['beginOffset']
for word in entity_text['content'].split():
result.append({'content': word, 'beginOffset': offset})
offset += len(word)
return result
|
google/budou | budou/nlapisegmenter.py | NLAPISegmenter.segment | python | def segment(self, source, language=None):
if language and not language in self.supported_languages:
raise ValueError(
'Language {} is not supported by NLAPI segmenter'.format(language))
chunks, language = self._get_source_chunks(source, language=language)
if self.use_entity:
entities = self._get_entities(source, language=language)
chunks = self._group_chunks_by_entities(chunks, entities)
chunks.resolve_dependencies()
return chunks | Returns a chunk list from the given sentence.
Args:
source (str): Source string to segment.
language (:obj:`str`, optional): A language code.
Returns:
A chunk list. (:obj:`budou.chunk.ChunkList`)
Raises:
ValueError: If :obj:`language` is given and it is not included in
:obj:`supported_languages`. | train | https://github.com/google/budou/blob/101224e6523186851f38ee57a6b2e7bdbd826de2/budou/nlapisegmenter.py#L145-L168 | [
"def _get_source_chunks(self, input_text, language=None):\n \"\"\"Returns a chunk list retrieved from Syntax Analysis results.\n\n Args:\n input_text (str): Text to annotate.\n language (:obj:`str`, optional): Language of the text.\n\n Returns:\n A chunk list. (:obj:`budou.chunk.ChunkList`)\n \"\"\"\n ... | class NLAPISegmenter(Segmenter):
"""Natural Language API Segmenter.
Attributes:
service: A resource object for interacting with Cloud Natural Language API.
cache_filename (str): File path to the cache file.
supported_languages (list of str): List of supported languages' codes.
Args:
cache_filename (:obj:`str`, optional): File path to the pickle file for
caching. The file is created automatically if not exist. If the
environment is Google App Engine Standard Environment and memcache
service is available, it is used for caching and the pickle file
won't be generated.
credentials_path (:obj:`str`, optional): File path to the service
account's credentials file. If no file path is specified, it tries
to authenticate with default credentials.
use_entity (:obj:`bool`, optional): Whether to use entity analysis
results to wrap entity names in the output.
use_cache (:obj:`bool`, optional): Whether to use a cache system.
"""
supported_languages = {'ja', 'ko', 'zh', 'zh-TW', 'zh-CN', 'zh-HK', 'zh-Hant'}
def __init__(self, cache_filename, credentials_path, use_entity, use_cache):
self.cache_filename = cache_filename
self.credentials_path = credentials_path
self.use_entity = use_entity
self.use_cache = use_cache
self._authenticate()
def _authenticate(self):
import google_auth_httplib2
import googleapiclient.discovery
scope = ['https://www.googleapis.com/auth/cloud-platform']
if self.credentials_path:
try:
from google.oauth2 import service_account
credentials = service_account.Credentials.from_service_account_file(
self.credentials_path)
scoped_credentials = credentials.with_scopes(scope)
except ImportError:
logging.error('Failed to load google.oauth2.service_account module. '
'If you are running this script in Google App Engine '
'environment, you can initialize the segmenter with '
'default credentials.')
else:
import google.auth
scoped_credentials, _ = google.auth.default(scope)
authed_http = google_auth_httplib2.AuthorizedHttp(scoped_credentials)
service = googleapiclient.discovery.build(
'language', 'v1beta2', http=authed_http)
self.service = service
def _get_source_chunks(self, input_text, language=None):
"""Returns a chunk list retrieved from Syntax Analysis results.
Args:
input_text (str): Text to annotate.
language (:obj:`str`, optional): Language of the text.
Returns:
A chunk list. (:obj:`budou.chunk.ChunkList`)
"""
chunks = ChunkList()
seek = 0
result = self._get_annotations(input_text, language=language)
tokens = result['tokens']
language = result['language']
for i, token in enumerate(tokens):
word = token['text']['content']
begin_offset = token['text']['beginOffset']
label = token['dependencyEdge']['label']
pos = token['partOfSpeech']['tag']
if begin_offset > seek:
chunks.append(Chunk.space())
seek = begin_offset
chunk = Chunk(word, pos, label)
if chunk.label in _DEPENDENT_LABEL:
# Determining concatenating direction based on syntax dependency.
chunk.dependency = i < token['dependencyEdge']['headTokenIndex']
if chunk.is_punct():
chunk.dependency = chunk.is_open_punct()
chunks.append(chunk)
seek += len(word)
return chunks, language
def _group_chunks_by_entities(self, chunks, entities):
"""Groups chunks by entities retrieved from NL API Entity Analysis.
Args:
chunks (:obj:`budou.chunk.ChunkList`): List of chunks to be processed.
entities (:obj:`list` of :obj:`dict`): List of entities.
Returns:
A chunk list. (:obj:`budou.chunk.ChunkList`)
"""
for entity in entities:
chunks_to_concat = chunks.get_overlaps(
entity['beginOffset'], len(entity['content']))
if not chunks_to_concat:
continue
new_chunk_word = u''.join([chunk.word for chunk in chunks_to_concat])
new_chunk = Chunk(new_chunk_word)
chunks.swap(chunks_to_concat, new_chunk)
return chunks
@_memorize
def _get_annotations(self, text, language=''):
"""Returns the list of annotations retrieved from the given text.
Args:
text (str): Input text.
language (:obj:`str`, optional): Language code.
Returns:
Results in a dictionary. :code:`tokens` contains the list of annotations
and :code:`language` contains the inferred language from the input.
"""
body = {
'document': {
'type': 'PLAIN_TEXT',
'content': text,
},
'features': {
'extract_syntax': True,
},
'encodingType': 'UTF32',
}
if language:
body['document']['language'] = language
request = self.service.documents().annotateText(body=body)
response = request.execute()
tokens = response.get('tokens', [])
language = response.get('language')
return {'tokens': tokens, 'language': language}
@_memorize
def _get_entities(self, text, language=''):
"""Returns the list of entities retrieved from the given text.
Args:
text (str): Input text.
language (:obj:`str`, optional): Language code.
Returns:
List of entities.
"""
body = {
'document': {
'type': 'PLAIN_TEXT',
'content': text,
},
'encodingType': 'UTF32',
}
if language:
body['document']['language'] = language
request = self.service.documents().analyzeEntities(body=body)
response = request.execute()
result = []
for entity in response.get('entities', []):
mentions = entity.get('mentions', [])
if not mentions:
continue
entity_text = mentions[0]['text']
offset = entity_text['beginOffset']
for word in entity_text['content'].split():
result.append({'content': word, 'beginOffset': offset})
offset += len(word)
return result
|
google/budou | budou/nlapisegmenter.py | NLAPISegmenter._get_source_chunks | python | def _get_source_chunks(self, input_text, language=None):
chunks = ChunkList()
seek = 0
result = self._get_annotations(input_text, language=language)
tokens = result['tokens']
language = result['language']
for i, token in enumerate(tokens):
word = token['text']['content']
begin_offset = token['text']['beginOffset']
label = token['dependencyEdge']['label']
pos = token['partOfSpeech']['tag']
if begin_offset > seek:
chunks.append(Chunk.space())
seek = begin_offset
chunk = Chunk(word, pos, label)
if chunk.label in _DEPENDENT_LABEL:
# Determining concatenating direction based on syntax dependency.
chunk.dependency = i < token['dependencyEdge']['headTokenIndex']
if chunk.is_punct():
chunk.dependency = chunk.is_open_punct()
chunks.append(chunk)
seek += len(word)
return chunks, language | Returns a chunk list retrieved from Syntax Analysis results.
Args:
input_text (str): Text to annotate.
language (:obj:`str`, optional): Language of the text.
Returns:
A chunk list. (:obj:`budou.chunk.ChunkList`) | train | https://github.com/google/budou/blob/101224e6523186851f38ee57a6b2e7bdbd826de2/budou/nlapisegmenter.py#L170-L201 | [
"def space(cls):\n \"\"\"Creates space Chunk.\n\n Returns:\n A chunk (:obj:`budou.chunk.Chunk`)\n \"\"\"\n chunk = cls(u' ', cls._SPACE_POS)\n return chunk\n",
"def is_punct(self):\n \"\"\"Whether the chunk is a punctuation mark.\n\n See also https://en.wikipedia.org/wiki/Unicode_character_property\n\n ... | class NLAPISegmenter(Segmenter):
"""Natural Language API Segmenter.
Attributes:
service: A resource object for interacting with Cloud Natural Language API.
cache_filename (str): File path to the cache file.
supported_languages (list of str): List of supported languages' codes.
Args:
cache_filename (:obj:`str`, optional): File path to the pickle file for
caching. The file is created automatically if not exist. If the
environment is Google App Engine Standard Environment and memcache
service is available, it is used for caching and the pickle file
won't be generated.
credentials_path (:obj:`str`, optional): File path to the service
account's credentials file. If no file path is specified, it tries
to authenticate with default credentials.
use_entity (:obj:`bool`, optional): Whether to use entity analysis
results to wrap entity names in the output.
use_cache (:obj:`bool`, optional): Whether to use a cache system.
"""
supported_languages = {'ja', 'ko', 'zh', 'zh-TW', 'zh-CN', 'zh-HK', 'zh-Hant'}
def __init__(self, cache_filename, credentials_path, use_entity, use_cache):
self.cache_filename = cache_filename
self.credentials_path = credentials_path
self.use_entity = use_entity
self.use_cache = use_cache
self._authenticate()
def _authenticate(self):
import google_auth_httplib2
import googleapiclient.discovery
scope = ['https://www.googleapis.com/auth/cloud-platform']
if self.credentials_path:
try:
from google.oauth2 import service_account
credentials = service_account.Credentials.from_service_account_file(
self.credentials_path)
scoped_credentials = credentials.with_scopes(scope)
except ImportError:
logging.error('Failed to load google.oauth2.service_account module. '
'If you are running this script in Google App Engine '
'environment, you can initialize the segmenter with '
'default credentials.')
else:
import google.auth
scoped_credentials, _ = google.auth.default(scope)
authed_http = google_auth_httplib2.AuthorizedHttp(scoped_credentials)
service = googleapiclient.discovery.build(
'language', 'v1beta2', http=authed_http)
self.service = service
def segment(self, source, language=None):
"""Returns a chunk list from the given sentence.
Args:
source (str): Source string to segment.
language (:obj:`str`, optional): A language code.
Returns:
A chunk list. (:obj:`budou.chunk.ChunkList`)
Raises:
ValueError: If :obj:`language` is given and it is not included in
:obj:`supported_languages`.
"""
if language and not language in self.supported_languages:
raise ValueError(
'Language {} is not supported by NLAPI segmenter'.format(language))
chunks, language = self._get_source_chunks(source, language=language)
if self.use_entity:
entities = self._get_entities(source, language=language)
chunks = self._group_chunks_by_entities(chunks, entities)
chunks.resolve_dependencies()
return chunks
def _group_chunks_by_entities(self, chunks, entities):
"""Groups chunks by entities retrieved from NL API Entity Analysis.
Args:
chunks (:obj:`budou.chunk.ChunkList`): List of chunks to be processed.
entities (:obj:`list` of :obj:`dict`): List of entities.
Returns:
A chunk list. (:obj:`budou.chunk.ChunkList`)
"""
for entity in entities:
chunks_to_concat = chunks.get_overlaps(
entity['beginOffset'], len(entity['content']))
if not chunks_to_concat:
continue
new_chunk_word = u''.join([chunk.word for chunk in chunks_to_concat])
new_chunk = Chunk(new_chunk_word)
chunks.swap(chunks_to_concat, new_chunk)
return chunks
@_memorize
def _get_annotations(self, text, language=''):
"""Returns the list of annotations retrieved from the given text.
Args:
text (str): Input text.
language (:obj:`str`, optional): Language code.
Returns:
Results in a dictionary. :code:`tokens` contains the list of annotations
and :code:`language` contains the inferred language from the input.
"""
body = {
'document': {
'type': 'PLAIN_TEXT',
'content': text,
},
'features': {
'extract_syntax': True,
},
'encodingType': 'UTF32',
}
if language:
body['document']['language'] = language
request = self.service.documents().annotateText(body=body)
response = request.execute()
tokens = response.get('tokens', [])
language = response.get('language')
return {'tokens': tokens, 'language': language}
@_memorize
def _get_entities(self, text, language=''):
"""Returns the list of entities retrieved from the given text.
Args:
text (str): Input text.
language (:obj:`str`, optional): Language code.
Returns:
List of entities.
"""
body = {
'document': {
'type': 'PLAIN_TEXT',
'content': text,
},
'encodingType': 'UTF32',
}
if language:
body['document']['language'] = language
request = self.service.documents().analyzeEntities(body=body)
response = request.execute()
result = []
for entity in response.get('entities', []):
mentions = entity.get('mentions', [])
if not mentions:
continue
entity_text = mentions[0]['text']
offset = entity_text['beginOffset']
for word in entity_text['content'].split():
result.append({'content': word, 'beginOffset': offset})
offset += len(word)
return result
|
google/budou | budou/nlapisegmenter.py | NLAPISegmenter._group_chunks_by_entities | python | def _group_chunks_by_entities(self, chunks, entities):
for entity in entities:
chunks_to_concat = chunks.get_overlaps(
entity['beginOffset'], len(entity['content']))
if not chunks_to_concat:
continue
new_chunk_word = u''.join([chunk.word for chunk in chunks_to_concat])
new_chunk = Chunk(new_chunk_word)
chunks.swap(chunks_to_concat, new_chunk)
return chunks | Groups chunks by entities retrieved from NL API Entity Analysis.
Args:
chunks (:obj:`budou.chunk.ChunkList`): List of chunks to be processed.
entities (:obj:`list` of :obj:`dict`): List of entities.
Returns:
A chunk list. (:obj:`budou.chunk.ChunkList`) | train | https://github.com/google/budou/blob/101224e6523186851f38ee57a6b2e7bdbd826de2/budou/nlapisegmenter.py#L203-L221 | null | class NLAPISegmenter(Segmenter):
"""Natural Language API Segmenter.
Attributes:
service: A resource object for interacting with Cloud Natural Language API.
cache_filename (str): File path to the cache file.
supported_languages (list of str): List of supported languages' codes.
Args:
cache_filename (:obj:`str`, optional): File path to the pickle file for
caching. The file is created automatically if not exist. If the
environment is Google App Engine Standard Environment and memcache
service is available, it is used for caching and the pickle file
won't be generated.
credentials_path (:obj:`str`, optional): File path to the service
account's credentials file. If no file path is specified, it tries
to authenticate with default credentials.
use_entity (:obj:`bool`, optional): Whether to use entity analysis
results to wrap entity names in the output.
use_cache (:obj:`bool`, optional): Whether to use a cache system.
"""
supported_languages = {'ja', 'ko', 'zh', 'zh-TW', 'zh-CN', 'zh-HK', 'zh-Hant'}
def __init__(self, cache_filename, credentials_path, use_entity, use_cache):
self.cache_filename = cache_filename
self.credentials_path = credentials_path
self.use_entity = use_entity
self.use_cache = use_cache
self._authenticate()
def _authenticate(self):
import google_auth_httplib2
import googleapiclient.discovery
scope = ['https://www.googleapis.com/auth/cloud-platform']
if self.credentials_path:
try:
from google.oauth2 import service_account
credentials = service_account.Credentials.from_service_account_file(
self.credentials_path)
scoped_credentials = credentials.with_scopes(scope)
except ImportError:
logging.error('Failed to load google.oauth2.service_account module. '
'If you are running this script in Google App Engine '
'environment, you can initialize the segmenter with '
'default credentials.')
else:
import google.auth
scoped_credentials, _ = google.auth.default(scope)
authed_http = google_auth_httplib2.AuthorizedHttp(scoped_credentials)
service = googleapiclient.discovery.build(
'language', 'v1beta2', http=authed_http)
self.service = service
def segment(self, source, language=None):
"""Returns a chunk list from the given sentence.
Args:
source (str): Source string to segment.
language (:obj:`str`, optional): A language code.
Returns:
A chunk list. (:obj:`budou.chunk.ChunkList`)
Raises:
ValueError: If :obj:`language` is given and it is not included in
:obj:`supported_languages`.
"""
if language and not language in self.supported_languages:
raise ValueError(
'Language {} is not supported by NLAPI segmenter'.format(language))
chunks, language = self._get_source_chunks(source, language=language)
if self.use_entity:
entities = self._get_entities(source, language=language)
chunks = self._group_chunks_by_entities(chunks, entities)
chunks.resolve_dependencies()
return chunks
def _get_source_chunks(self, input_text, language=None):
"""Returns a chunk list retrieved from Syntax Analysis results.
Args:
input_text (str): Text to annotate.
language (:obj:`str`, optional): Language of the text.
Returns:
A chunk list. (:obj:`budou.chunk.ChunkList`)
"""
chunks = ChunkList()
seek = 0
result = self._get_annotations(input_text, language=language)
tokens = result['tokens']
language = result['language']
for i, token in enumerate(tokens):
word = token['text']['content']
begin_offset = token['text']['beginOffset']
label = token['dependencyEdge']['label']
pos = token['partOfSpeech']['tag']
if begin_offset > seek:
chunks.append(Chunk.space())
seek = begin_offset
chunk = Chunk(word, pos, label)
if chunk.label in _DEPENDENT_LABEL:
# Determining concatenating direction based on syntax dependency.
chunk.dependency = i < token['dependencyEdge']['headTokenIndex']
if chunk.is_punct():
chunk.dependency = chunk.is_open_punct()
chunks.append(chunk)
seek += len(word)
return chunks, language
@_memorize
def _get_annotations(self, text, language=''):
"""Returns the list of annotations retrieved from the given text.
Args:
text (str): Input text.
language (:obj:`str`, optional): Language code.
Returns:
Results in a dictionary. :code:`tokens` contains the list of annotations
and :code:`language` contains the inferred language from the input.
"""
body = {
'document': {
'type': 'PLAIN_TEXT',
'content': text,
},
'features': {
'extract_syntax': True,
},
'encodingType': 'UTF32',
}
if language:
body['document']['language'] = language
request = self.service.documents().annotateText(body=body)
response = request.execute()
tokens = response.get('tokens', [])
language = response.get('language')
return {'tokens': tokens, 'language': language}
@_memorize
def _get_entities(self, text, language=''):
"""Returns the list of entities retrieved from the given text.
Args:
text (str): Input text.
language (:obj:`str`, optional): Language code.
Returns:
List of entities.
"""
body = {
'document': {
'type': 'PLAIN_TEXT',
'content': text,
},
'encodingType': 'UTF32',
}
if language:
body['document']['language'] = language
request = self.service.documents().analyzeEntities(body=body)
response = request.execute()
result = []
for entity in response.get('entities', []):
mentions = entity.get('mentions', [])
if not mentions:
continue
entity_text = mentions[0]['text']
offset = entity_text['beginOffset']
for word in entity_text['content'].split():
result.append({'content': word, 'beginOffset': offset})
offset += len(word)
return result
|
google/budou | budou/nlapisegmenter.py | NLAPISegmenter._get_annotations | python | def _get_annotations(self, text, language=''):
body = {
'document': {
'type': 'PLAIN_TEXT',
'content': text,
},
'features': {
'extract_syntax': True,
},
'encodingType': 'UTF32',
}
if language:
body['document']['language'] = language
request = self.service.documents().annotateText(body=body)
response = request.execute()
tokens = response.get('tokens', [])
language = response.get('language')
return {'tokens': tokens, 'language': language} | Returns the list of annotations retrieved from the given text.
Args:
text (str): Input text.
language (:obj:`str`, optional): Language code.
Returns:
Results in a dictionary. :code:`tokens` contains the list of annotations
and :code:`language` contains the inferred language from the input. | train | https://github.com/google/budou/blob/101224e6523186851f38ee57a6b2e7bdbd826de2/budou/nlapisegmenter.py#L224-L253 | null | class NLAPISegmenter(Segmenter):
"""Natural Language API Segmenter.
Attributes:
service: A resource object for interacting with Cloud Natural Language API.
cache_filename (str): File path to the cache file.
supported_languages (list of str): List of supported languages' codes.
Args:
cache_filename (:obj:`str`, optional): File path to the pickle file for
caching. The file is created automatically if not exist. If the
environment is Google App Engine Standard Environment and memcache
service is available, it is used for caching and the pickle file
won't be generated.
credentials_path (:obj:`str`, optional): File path to the service
account's credentials file. If no file path is specified, it tries
to authenticate with default credentials.
use_entity (:obj:`bool`, optional): Whether to use entity analysis
results to wrap entity names in the output.
use_cache (:obj:`bool`, optional): Whether to use a cache system.
"""
supported_languages = {'ja', 'ko', 'zh', 'zh-TW', 'zh-CN', 'zh-HK', 'zh-Hant'}
def __init__(self, cache_filename, credentials_path, use_entity, use_cache):
self.cache_filename = cache_filename
self.credentials_path = credentials_path
self.use_entity = use_entity
self.use_cache = use_cache
self._authenticate()
def _authenticate(self):
import google_auth_httplib2
import googleapiclient.discovery
scope = ['https://www.googleapis.com/auth/cloud-platform']
if self.credentials_path:
try:
from google.oauth2 import service_account
credentials = service_account.Credentials.from_service_account_file(
self.credentials_path)
scoped_credentials = credentials.with_scopes(scope)
except ImportError:
logging.error('Failed to load google.oauth2.service_account module. '
'If you are running this script in Google App Engine '
'environment, you can initialize the segmenter with '
'default credentials.')
else:
import google.auth
scoped_credentials, _ = google.auth.default(scope)
authed_http = google_auth_httplib2.AuthorizedHttp(scoped_credentials)
service = googleapiclient.discovery.build(
'language', 'v1beta2', http=authed_http)
self.service = service
def segment(self, source, language=None):
"""Returns a chunk list from the given sentence.
Args:
source (str): Source string to segment.
language (:obj:`str`, optional): A language code.
Returns:
A chunk list. (:obj:`budou.chunk.ChunkList`)
Raises:
ValueError: If :obj:`language` is given and it is not included in
:obj:`supported_languages`.
"""
if language and not language in self.supported_languages:
raise ValueError(
'Language {} is not supported by NLAPI segmenter'.format(language))
chunks, language = self._get_source_chunks(source, language=language)
if self.use_entity:
entities = self._get_entities(source, language=language)
chunks = self._group_chunks_by_entities(chunks, entities)
chunks.resolve_dependencies()
return chunks
def _get_source_chunks(self, input_text, language=None):
"""Returns a chunk list retrieved from Syntax Analysis results.
Args:
input_text (str): Text to annotate.
language (:obj:`str`, optional): Language of the text.
Returns:
A chunk list. (:obj:`budou.chunk.ChunkList`)
"""
chunks = ChunkList()
seek = 0
result = self._get_annotations(input_text, language=language)
tokens = result['tokens']
language = result['language']
for i, token in enumerate(tokens):
word = token['text']['content']
begin_offset = token['text']['beginOffset']
label = token['dependencyEdge']['label']
pos = token['partOfSpeech']['tag']
if begin_offset > seek:
chunks.append(Chunk.space())
seek = begin_offset
chunk = Chunk(word, pos, label)
if chunk.label in _DEPENDENT_LABEL:
# Determining concatenating direction based on syntax dependency.
chunk.dependency = i < token['dependencyEdge']['headTokenIndex']
if chunk.is_punct():
chunk.dependency = chunk.is_open_punct()
chunks.append(chunk)
seek += len(word)
return chunks, language
def _group_chunks_by_entities(self, chunks, entities):
"""Groups chunks by entities retrieved from NL API Entity Analysis.
Args:
chunks (:obj:`budou.chunk.ChunkList`): List of chunks to be processed.
entities (:obj:`list` of :obj:`dict`): List of entities.
Returns:
A chunk list. (:obj:`budou.chunk.ChunkList`)
"""
for entity in entities:
chunks_to_concat = chunks.get_overlaps(
entity['beginOffset'], len(entity['content']))
if not chunks_to_concat:
continue
new_chunk_word = u''.join([chunk.word for chunk in chunks_to_concat])
new_chunk = Chunk(new_chunk_word)
chunks.swap(chunks_to_concat, new_chunk)
return chunks
@_memorize
@_memorize
def _get_entities(self, text, language=''):
"""Returns the list of entities retrieved from the given text.
Args:
text (str): Input text.
language (:obj:`str`, optional): Language code.
Returns:
List of entities.
"""
body = {
'document': {
'type': 'PLAIN_TEXT',
'content': text,
},
'encodingType': 'UTF32',
}
if language:
body['document']['language'] = language
request = self.service.documents().analyzeEntities(body=body)
response = request.execute()
result = []
for entity in response.get('entities', []):
mentions = entity.get('mentions', [])
if not mentions:
continue
entity_text = mentions[0]['text']
offset = entity_text['beginOffset']
for word in entity_text['content'].split():
result.append({'content': word, 'beginOffset': offset})
offset += len(word)
return result
|
google/budou | budou/nlapisegmenter.py | NLAPISegmenter._get_entities | python | def _get_entities(self, text, language=''):
body = {
'document': {
'type': 'PLAIN_TEXT',
'content': text,
},
'encodingType': 'UTF32',
}
if language:
body['document']['language'] = language
request = self.service.documents().analyzeEntities(body=body)
response = request.execute()
result = []
for entity in response.get('entities', []):
mentions = entity.get('mentions', [])
if not mentions:
continue
entity_text = mentions[0]['text']
offset = entity_text['beginOffset']
for word in entity_text['content'].split():
result.append({'content': word, 'beginOffset': offset})
offset += len(word)
return result | Returns the list of entities retrieved from the given text.
Args:
text (str): Input text.
language (:obj:`str`, optional): Language code.
Returns:
List of entities. | train | https://github.com/google/budou/blob/101224e6523186851f38ee57a6b2e7bdbd826de2/budou/nlapisegmenter.py#L256-L288 | null | class NLAPISegmenter(Segmenter):
"""Natural Language API Segmenter.
Attributes:
service: A resource object for interacting with Cloud Natural Language API.
cache_filename (str): File path to the cache file.
supported_languages (list of str): List of supported languages' codes.
Args:
cache_filename (:obj:`str`, optional): File path to the pickle file for
caching. The file is created automatically if not exist. If the
environment is Google App Engine Standard Environment and memcache
service is available, it is used for caching and the pickle file
won't be generated.
credentials_path (:obj:`str`, optional): File path to the service
account's credentials file. If no file path is specified, it tries
to authenticate with default credentials.
use_entity (:obj:`bool`, optional): Whether to use entity analysis
results to wrap entity names in the output.
use_cache (:obj:`bool`, optional): Whether to use a cache system.
"""
supported_languages = {'ja', 'ko', 'zh', 'zh-TW', 'zh-CN', 'zh-HK', 'zh-Hant'}
def __init__(self, cache_filename, credentials_path, use_entity, use_cache):
self.cache_filename = cache_filename
self.credentials_path = credentials_path
self.use_entity = use_entity
self.use_cache = use_cache
self._authenticate()
def _authenticate(self):
import google_auth_httplib2
import googleapiclient.discovery
scope = ['https://www.googleapis.com/auth/cloud-platform']
if self.credentials_path:
try:
from google.oauth2 import service_account
credentials = service_account.Credentials.from_service_account_file(
self.credentials_path)
scoped_credentials = credentials.with_scopes(scope)
except ImportError:
logging.error('Failed to load google.oauth2.service_account module. '
'If you are running this script in Google App Engine '
'environment, you can initialize the segmenter with '
'default credentials.')
else:
import google.auth
scoped_credentials, _ = google.auth.default(scope)
authed_http = google_auth_httplib2.AuthorizedHttp(scoped_credentials)
service = googleapiclient.discovery.build(
'language', 'v1beta2', http=authed_http)
self.service = service
def segment(self, source, language=None):
"""Returns a chunk list from the given sentence.
Args:
source (str): Source string to segment.
language (:obj:`str`, optional): A language code.
Returns:
A chunk list. (:obj:`budou.chunk.ChunkList`)
Raises:
ValueError: If :obj:`language` is given and it is not included in
:obj:`supported_languages`.
"""
if language and not language in self.supported_languages:
raise ValueError(
'Language {} is not supported by NLAPI segmenter'.format(language))
chunks, language = self._get_source_chunks(source, language=language)
if self.use_entity:
entities = self._get_entities(source, language=language)
chunks = self._group_chunks_by_entities(chunks, entities)
chunks.resolve_dependencies()
return chunks
def _get_source_chunks(self, input_text, language=None):
"""Returns a chunk list retrieved from Syntax Analysis results.
Args:
input_text (str): Text to annotate.
language (:obj:`str`, optional): Language of the text.
Returns:
A chunk list. (:obj:`budou.chunk.ChunkList`)
"""
chunks = ChunkList()
seek = 0
result = self._get_annotations(input_text, language=language)
tokens = result['tokens']
language = result['language']
for i, token in enumerate(tokens):
word = token['text']['content']
begin_offset = token['text']['beginOffset']
label = token['dependencyEdge']['label']
pos = token['partOfSpeech']['tag']
if begin_offset > seek:
chunks.append(Chunk.space())
seek = begin_offset
chunk = Chunk(word, pos, label)
if chunk.label in _DEPENDENT_LABEL:
# Determining concatenating direction based on syntax dependency.
chunk.dependency = i < token['dependencyEdge']['headTokenIndex']
if chunk.is_punct():
chunk.dependency = chunk.is_open_punct()
chunks.append(chunk)
seek += len(word)
return chunks, language
def _group_chunks_by_entities(self, chunks, entities):
"""Groups chunks by entities retrieved from NL API Entity Analysis.
Args:
chunks (:obj:`budou.chunk.ChunkList`): List of chunks to be processed.
entities (:obj:`list` of :obj:`dict`): List of entities.
Returns:
A chunk list. (:obj:`budou.chunk.ChunkList`)
"""
for entity in entities:
chunks_to_concat = chunks.get_overlaps(
entity['beginOffset'], len(entity['content']))
if not chunks_to_concat:
continue
new_chunk_word = u''.join([chunk.word for chunk in chunks_to_concat])
new_chunk = Chunk(new_chunk_word)
chunks.swap(chunks_to_concat, new_chunk)
return chunks
@_memorize
def _get_annotations(self, text, language=''):
"""Returns the list of annotations retrieved from the given text.
Args:
text (str): Input text.
language (:obj:`str`, optional): Language code.
Returns:
Results in a dictionary. :code:`tokens` contains the list of annotations
and :code:`language` contains the inferred language from the input.
"""
body = {
'document': {
'type': 'PLAIN_TEXT',
'content': text,
},
'features': {
'extract_syntax': True,
},
'encodingType': 'UTF32',
}
if language:
body['document']['language'] = language
request = self.service.documents().annotateText(body=body)
response = request.execute()
tokens = response.get('tokens', [])
language = response.get('language')
return {'tokens': tokens, 'language': language}
@_memorize
|
google/budou | budou/cachefactory.py | PickleCache.get | python | def get(self, key):
self._create_file_if_none_exists()
with open(self.filename, 'rb') as file_object:
cache_pickle = pickle.load(file_object)
val = cache_pickle.get(key, None)
return val | Gets a value by a key.
Args:
key (str): Key to retrieve the value.
Returns: Retrieved value. | train | https://github.com/google/budou/blob/101224e6523186851f38ee57a6b2e7bdbd826de2/budou/cachefactory.py#L93-L105 | [
"def _create_file_if_none_exists(self):\n if os.path.exists(self.filename):\n return\n with open(self.filename, 'wb') as file_object:\n pickle.dump({}, file_object)\n"
] | class PickleCache(BudouCache):
"""Cache system with :obj:`pickle` backend.
Args:
filename (str): The file path to the cache file.
Attributes:
filename (str): The file path to the cache file.
"""
DEFAULT_FILE_NAME = '/tmp/budou-cache.pickle'
""" The default path to the cache file.
"""
def __init__(self, filename):
self.filename = filename if filename else self.DEFAULT_FILE_NAME
def set(self, key, val):
"""Sets a value in a key.
Args:
key (str): Key for the value.
val: Value to set.
Returns:
Retrieved value.
"""
self._create_file_if_none_exists()
with open(self.filename, 'r+b') as file_object:
cache_pickle = pickle.load(file_object)
cache_pickle[key] = val
file_object.seek(0)
pickle.dump(cache_pickle, file_object)
def _create_file_if_none_exists(self):
if os.path.exists(self.filename):
return
with open(self.filename, 'wb') as file_object:
pickle.dump({}, file_object)
|
google/budou | budou/cachefactory.py | PickleCache.set | python | def set(self, key, val):
self._create_file_if_none_exists()
with open(self.filename, 'r+b') as file_object:
cache_pickle = pickle.load(file_object)
cache_pickle[key] = val
file_object.seek(0)
pickle.dump(cache_pickle, file_object) | Sets a value in a key.
Args:
key (str): Key for the value.
val: Value to set.
Returns:
Retrieved value. | train | https://github.com/google/budou/blob/101224e6523186851f38ee57a6b2e7bdbd826de2/budou/cachefactory.py#L107-L122 | [
"def _create_file_if_none_exists(self):\n if os.path.exists(self.filename):\n return\n with open(self.filename, 'wb') as file_object:\n pickle.dump({}, file_object)\n"
] | class PickleCache(BudouCache):
"""Cache system with :obj:`pickle` backend.
Args:
filename (str): The file path to the cache file.
Attributes:
filename (str): The file path to the cache file.
"""
DEFAULT_FILE_NAME = '/tmp/budou-cache.pickle'
""" The default path to the cache file.
"""
def __init__(self, filename):
self.filename = filename if filename else self.DEFAULT_FILE_NAME
def get(self, key):
"""Gets a value by a key.
Args:
key (str): Key to retrieve the value.
Returns: Retrieved value.
"""
self._create_file_if_none_exists()
with open(self.filename, 'rb') as file_object:
cache_pickle = pickle.load(file_object)
val = cache_pickle.get(key, None)
return val
def _create_file_if_none_exists(self):
if os.path.exists(self.filename):
return
with open(self.filename, 'wb') as file_object:
pickle.dump({}, file_object)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.