repo_name stringlengths 6 67 | path stringlengths 5 185 | copies stringlengths 1 3 | size stringlengths 4 6 | content stringlengths 1.02k 962k | license stringclasses 15 values |
|---|---|---|---|---|---|
Newman101/scipy | scipy/signal/windows.py | 20 | 54134 | """The suite of window functions."""
from __future__ import division, print_function, absolute_import
import warnings
import numpy as np
from scipy import fftpack, linalg, special
from scipy._lib.six import string_types
__all__ = ['boxcar', 'triang', 'parzen', 'bohman', 'blackman', 'nuttall',
'blackmanharris', 'flattop', 'bartlett', 'hanning', 'barthann',
'hamming', 'kaiser', 'gaussian', 'general_gaussian', 'chebwin',
'slepian', 'cosine', 'hann', 'exponential', 'tukey', 'get_window']
def boxcar(M, sym=True):
"""Return a boxcar or rectangular window.
Included for completeness, this is equivalent to no window at all.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
Whether the window is symmetric. (Has no effect for boxcar.)
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1.
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.boxcar(51)
>>> plt.plot(window)
>>> plt.title("Boxcar window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the boxcar window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
return np.ones(M, float)
def triang(M, sym=True):
"""Return a triangular window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.triang(51)
>>> plt.plot(window)
>>> plt.title("Triangular window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the triangular window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(1, (M + 1) // 2 + 1)
if M % 2 == 0:
w = (2 * n - 1.0) / M
w = np.r_[w, w[::-1]]
else:
w = 2 * n / (M + 1.0)
w = np.r_[w, w[-2::-1]]
if not sym and not odd:
w = w[:-1]
return w
def parzen(M, sym=True):
"""Return a Parzen window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.parzen(51)
>>> plt.plot(window)
>>> plt.title("Parzen window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Parzen window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(-(M - 1) / 2.0, (M - 1) / 2.0 + 0.5, 1.0)
na = np.extract(n < -(M - 1) / 4.0, n)
nb = np.extract(abs(n) <= (M - 1) / 4.0, n)
wa = 2 * (1 - np.abs(na) / (M / 2.0)) ** 3.0
wb = (1 - 6 * (np.abs(nb) / (M / 2.0)) ** 2.0 +
6 * (np.abs(nb) / (M / 2.0)) ** 3.0)
w = np.r_[wa, wb, wa[::-1]]
if not sym and not odd:
w = w[:-1]
return w
def bohman(M, sym=True):
"""Return a Bohman window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.bohman(51)
>>> plt.plot(window)
>>> plt.title("Bohman window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Bohman window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
fac = np.abs(np.linspace(-1, 1, M)[1:-1])
w = (1 - fac) * np.cos(np.pi * fac) + 1.0 / np.pi * np.sin(np.pi * fac)
w = np.r_[0, w, 0]
if not sym and not odd:
w = w[:-1]
return w
def blackman(M, sym=True):
r"""
Return a Blackman window.
The Blackman window is a taper formed by using the first three terms of
a summation of cosines. It was designed to have close to the minimal
leakage possible. It is close to optimal, only slightly worse than a
Kaiser window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The Blackman window is defined as
.. math:: w(n) = 0.42 - 0.5 \cos(2\pi n/M) + 0.08 \cos(4\pi n/M)
Most references to the Blackman window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function. It is known as a
"near optimal" tapering function, almost as good (by some measures)
as the Kaiser window.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] Oppenheim, A.V., and R.W. Schafer. Discrete-Time Signal Processing.
Upper Saddle River, NJ: Prentice-Hall, 1999, pp. 468-471.
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.blackman(51)
>>> plt.plot(window)
>>> plt.title("Blackman window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Blackman window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
# Docstring adapted from NumPy's blackman function
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M)
w = (0.42 - 0.5 * np.cos(2.0 * np.pi * n / (M - 1)) +
0.08 * np.cos(4.0 * np.pi * n / (M - 1)))
if not sym and not odd:
w = w[:-1]
return w
def nuttall(M, sym=True):
"""Return a minimum 4-term Blackman-Harris window according to Nuttall.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.nuttall(51)
>>> plt.plot(window)
>>> plt.title("Nuttall window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Nuttall window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
a = [0.3635819, 0.4891775, 0.1365995, 0.0106411]
n = np.arange(0, M)
fac = n * 2 * np.pi / (M - 1.0)
w = (a[0] - a[1] * np.cos(fac) +
a[2] * np.cos(2 * fac) - a[3] * np.cos(3 * fac))
if not sym and not odd:
w = w[:-1]
return w
def blackmanharris(M, sym=True):
"""Return a minimum 4-term Blackman-Harris window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.blackmanharris(51)
>>> plt.plot(window)
>>> plt.title("Blackman-Harris window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Blackman-Harris window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
a = [0.35875, 0.48829, 0.14128, 0.01168]
n = np.arange(0, M)
fac = n * 2 * np.pi / (M - 1.0)
w = (a[0] - a[1] * np.cos(fac) +
a[2] * np.cos(2 * fac) - a[3] * np.cos(3 * fac))
if not sym and not odd:
w = w[:-1]
return w
def flattop(M, sym=True):
"""Return a flat top window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.flattop(51)
>>> plt.plot(window)
>>> plt.title("Flat top window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the flat top window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
a = [0.2156, 0.4160, 0.2781, 0.0836, 0.0069]
n = np.arange(0, M)
fac = n * 2 * np.pi / (M - 1.0)
w = (a[0] - a[1] * np.cos(fac) +
a[2] * np.cos(2 * fac) - a[3] * np.cos(3 * fac) +
a[4] * np.cos(4 * fac))
if not sym and not odd:
w = w[:-1]
return w
def bartlett(M, sym=True):
r"""
Return a Bartlett window.
The Bartlett window is very similar to a triangular window, except
that the end points are at zero. It is often used in signal
processing for tapering a signal, without generating too much
ripple in the frequency domain.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The triangular window, with the first and last samples equal to zero
and the maximum value normalized to 1 (though the value 1 does not
appear if `M` is even and `sym` is True).
Notes
-----
The Bartlett window is defined as
.. math:: w(n) = \frac{2}{M-1} \left(
\frac{M-1}{2} - \left|n - \frac{M-1}{2}\right|
\right)
Most references to the Bartlett window come from the signal
processing literature, where it is used as one of many windowing
functions for smoothing values. Note that convolution with this
window produces linear interpolation. It is also known as an
apodization (which means"removing the foot", i.e. smoothing
discontinuities at the beginning and end of the sampled signal) or
tapering function. The Fourier transform of the Bartlett is the product
of two sinc functions.
Note the excellent discussion in Kanasewich.
References
----------
.. [1] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra",
Biometrika 37, 1-16, 1950.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
The University of Alberta Press, 1975, pp. 109-110.
.. [3] A.V. Oppenheim and R.W. Schafer, "Discrete-Time Signal
Processing", Prentice-Hall, 1999, pp. 468-471.
.. [4] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [5] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 429.
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.bartlett(51)
>>> plt.plot(window)
>>> plt.title("Bartlett window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Bartlett window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
# Docstring adapted from NumPy's bartlett function
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M)
w = np.where(np.less_equal(n, (M - 1) / 2.0),
2.0 * n / (M - 1), 2.0 - 2.0 * n / (M - 1))
if not sym and not odd:
w = w[:-1]
return w
def hann(M, sym=True):
r"""
Return a Hann window.
The Hann window is a taper formed by using a raised cosine or sine-squared
with ends that touch zero.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The Hann window is defined as
.. math:: w(n) = 0.5 - 0.5 \cos\left(\frac{2\pi{n}}{M-1}\right)
\qquad 0 \leq n \leq M-1
The window was named for Julius von Hann, an Austrian meteorologist. It is
also known as the Cosine Bell. It is sometimes erroneously referred to as
the "Hanning" window, from the use of "hann" as a verb in the original
paper and confusion with the very similar Hamming window.
Most references to the Hann window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
The University of Alberta Press, 1975, pp. 106-108.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.hann(51)
>>> plt.plot(window)
>>> plt.title("Hann window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Hann window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
# Docstring adapted from NumPy's hanning function
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M)
w = 0.5 - 0.5 * np.cos(2.0 * np.pi * n / (M - 1))
if not sym and not odd:
w = w[:-1]
return w
hanning = hann
def tukey(M, alpha=0.5, sym=True):
r"""Return a Tukey window, also known as a tapered cosine window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
alpha : float, optional
Shape parameter of the Tukey window, representing the faction of the
window inside the cosine tapered region.
If zero, the Tukey window is equivalent to a rectangular window.
If one, the Tukey window is equivalent to a Hann window.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
References
----------
.. [1] Harris, Fredric J. (Jan 1978). "On the use of Windows for Harmonic
Analysis with the Discrete Fourier Transform". Proceedings of the
IEEE 66 (1): 51-83. doi:10.1109/PROC.1978.10837
.. [2] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function#Tukey_window
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.tukey(51)
>>> plt.plot(window)
>>> plt.title("Tukey window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.ylim([0, 1.1])
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Tukey window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
if alpha <= 0:
return np.ones(M, 'd')
elif alpha >= 1.0:
return hann(M, sym=sym)
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M)
width = int(np.floor(alpha*(M-1)/2.0))
n1 = n[0:width+1]
n2 = n[width+1:M-width-1]
n3 = n[M-width-1:]
w1 = 0.5 * (1 + np.cos(np.pi * (-1 + 2.0*n1/alpha/(M-1))))
w2 = np.ones(n2.shape)
w3 = 0.5 * (1 + np.cos(np.pi * (-2.0/alpha + 1 + 2.0*n3/alpha/(M-1))))
w = np.concatenate((w1, w2, w3))
if not sym and not odd:
w = w[:-1]
return w
def barthann(M, sym=True):
"""Return a modified Bartlett-Hann window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.barthann(51)
>>> plt.plot(window)
>>> plt.title("Bartlett-Hann window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Bartlett-Hann window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M)
fac = np.abs(n / (M - 1.0) - 0.5)
w = 0.62 - 0.48 * fac + 0.38 * np.cos(2 * np.pi * fac)
if not sym and not odd:
w = w[:-1]
return w
def hamming(M, sym=True):
r"""Return a Hamming window.
The Hamming window is a taper formed by using a raised cosine with
non-zero endpoints, optimized to minimize the nearest side lobe.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The Hamming window is defined as
.. math:: w(n) = 0.54 - 0.46 \cos\left(\frac{2\pi{n}}{M-1}\right)
\qquad 0 \leq n \leq M-1
The Hamming was named for R. W. Hamming, an associate of J. W. Tukey and
is described in Blackman and Tukey. It was recommended for smoothing the
truncated autocovariance function in the time domain.
Most references to the Hamming window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
University of Alberta Press, 1975, pp. 109-110.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.hamming(51)
>>> plt.plot(window)
>>> plt.title("Hamming window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Hamming window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
# Docstring adapted from NumPy's hamming function
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M)
w = 0.54 - 0.46 * np.cos(2.0 * np.pi * n / (M - 1))
if not sym and not odd:
w = w[:-1]
return w
def kaiser(M, beta, sym=True):
r"""Return a Kaiser window.
The Kaiser window is a taper formed by using a Bessel function.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
beta : float
Shape parameter, determines trade-off between main-lobe width and
side lobe level. As beta gets large, the window narrows.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The Kaiser window is defined as
.. math:: w(n) = I_0\left( \beta \sqrt{1-\frac{4n^2}{(M-1)^2}}
\right)/I_0(\beta)
with
.. math:: \quad -\frac{M-1}{2} \leq n \leq \frac{M-1}{2},
where :math:`I_0` is the modified zeroth-order Bessel function.
The Kaiser was named for Jim Kaiser, who discovered a simple approximation
to the DPSS window based on Bessel functions.
The Kaiser window is a very good approximation to the Digital Prolate
Spheroidal Sequence, or Slepian window, which is the transform which
maximizes the energy in the main lobe of the window relative to total
energy.
The Kaiser can approximate many other windows by varying the beta
parameter.
==== =======================
beta Window shape
==== =======================
0 Rectangular
5 Similar to a Hamming
6 Similar to a Hann
8.6 Similar to a Blackman
==== =======================
A beta value of 14 is probably a good starting point. Note that as beta
gets large, the window narrows, and so the number of samples needs to be
large enough to sample the increasingly narrow spike, otherwise NaNs will
get returned.
Most references to the Kaiser window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] J. F. Kaiser, "Digital Filters" - Ch 7 in "Systems analysis by
digital computer", Editors: F.F. Kuo and J.F. Kaiser, p 218-285.
John Wiley and Sons, New York, (1966).
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
University of Alberta Press, 1975, pp. 177-178.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.kaiser(51, beta=14)
>>> plt.plot(window)
>>> plt.title(r"Kaiser window ($\beta$=14)")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title(r"Frequency response of the Kaiser window ($\beta$=14)")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
# Docstring adapted from NumPy's kaiser function
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M)
alpha = (M - 1) / 2.0
w = (special.i0(beta * np.sqrt(1 - ((n - alpha) / alpha) ** 2.0)) /
special.i0(beta))
if not sym and not odd:
w = w[:-1]
return w
def gaussian(M, std, sym=True):
r"""Return a Gaussian window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
std : float
The standard deviation, sigma.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The Gaussian window is defined as
.. math:: w(n) = e^{ -\frac{1}{2}\left(\frac{n}{\sigma}\right)^2 }
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.gaussian(51, std=7)
>>> plt.plot(window)
>>> plt.title(r"Gaussian window ($\sigma$=7)")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title(r"Frequency response of the Gaussian window ($\sigma$=7)")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M) - (M - 1.0) / 2.0
sig2 = 2 * std * std
w = np.exp(-n ** 2 / sig2)
if not sym and not odd:
w = w[:-1]
return w
def general_gaussian(M, p, sig, sym=True):
r"""Return a window with a generalized Gaussian shape.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
p : float
Shape parameter. p = 1 is identical to `gaussian`, p = 0.5 is
the same shape as the Laplace distribution.
sig : float
The standard deviation, sigma.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The generalized Gaussian window is defined as
.. math:: w(n) = e^{ -\frac{1}{2}\left|\frac{n}{\sigma}\right|^{2p} }
the half-power point is at
.. math:: (2 \log(2))^{1/(2 p)} \sigma
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.general_gaussian(51, p=1.5, sig=7)
>>> plt.plot(window)
>>> plt.title(r"Generalized Gaussian window (p=1.5, $\sigma$=7)")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title(r"Freq. resp. of the gen. Gaussian window (p=1.5, $\sigma$=7)")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M) - (M - 1.0) / 2.0
w = np.exp(-0.5 * np.abs(n / sig) ** (2 * p))
if not sym and not odd:
w = w[:-1]
return w
# `chebwin` contributed by Kumar Appaiah.
def chebwin(M, at, sym=True):
r"""Return a Dolph-Chebyshev window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
at : float
Attenuation (in dB).
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value always normalized to 1
Notes
-----
This window optimizes for the narrowest main lobe width for a given order
`M` and sidelobe equiripple attenuation `at`, using Chebyshev
polynomials. It was originally developed by Dolph to optimize the
directionality of radio antenna arrays.
Unlike most windows, the Dolph-Chebyshev is defined in terms of its
frequency response:
.. math:: W(k) = \frac
{\cos\{M \cos^{-1}[\beta \cos(\frac{\pi k}{M})]\}}
{\cosh[M \cosh^{-1}(\beta)]}
where
.. math:: \beta = \cosh \left [\frac{1}{M}
\cosh^{-1}(10^\frac{A}{20}) \right ]
and 0 <= abs(k) <= M-1. A is the attenuation in decibels (`at`).
The time domain window is then generated using the IFFT, so
power-of-two `M` are the fastest to generate, and prime number `M` are
the slowest.
The equiripple condition in the frequency domain creates impulses in the
time domain, which appear at the ends of the window.
References
----------
.. [1] C. Dolph, "A current distribution for broadside arrays which
optimizes the relationship between beam width and side-lobe level",
Proceedings of the IEEE, Vol. 34, Issue 6
.. [2] Peter Lynch, "The Dolph-Chebyshev Window: A Simple Optimal Filter",
American Meteorological Society (April 1997)
http://mathsci.ucd.ie/~plynch/Publications/Dolph.pdf
.. [3] F. J. Harris, "On the use of windows for harmonic analysis with the
discrete Fourier transforms", Proceedings of the IEEE, Vol. 66,
No. 1, January 1978
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.chebwin(51, at=100)
>>> plt.plot(window)
>>> plt.title("Dolph-Chebyshev window (100 dB)")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Dolph-Chebyshev window (100 dB)")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if np.abs(at) < 45:
warnings.warn("This window is not suitable for spectral analysis "
"for attenuation values lower than about 45dB because "
"the equivalent noise bandwidth of a Chebyshev window "
"does not grow monotonically with increasing sidelobe "
"attenuation when the attenuation is smaller than "
"about 45 dB.")
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
# compute the parameter beta
order = M - 1.0
beta = np.cosh(1.0 / order * np.arccosh(10 ** (np.abs(at) / 20.)))
k = np.r_[0:M] * 1.0
x = beta * np.cos(np.pi * k / M)
# Find the window's DFT coefficients
# Use analytic definition of Chebyshev polynomial instead of expansion
# from scipy.special. Using the expansion in scipy.special leads to errors.
p = np.zeros(x.shape)
p[x > 1] = np.cosh(order * np.arccosh(x[x > 1]))
p[x < -1] = (1 - 2 * (order % 2)) * np.cosh(order * np.arccosh(-x[x < -1]))
p[np.abs(x) <= 1] = np.cos(order * np.arccos(x[np.abs(x) <= 1]))
# Appropriate IDFT and filling up
# depending on even/odd M
if M % 2:
w = np.real(fftpack.fft(p))
n = (M + 1) // 2
w = w[:n]
w = np.concatenate((w[n - 1:0:-1], w))
else:
p = p * np.exp(1.j * np.pi / M * np.r_[0:M])
w = np.real(fftpack.fft(p))
n = M // 2 + 1
w = np.concatenate((w[n - 1:0:-1], w[1:n]))
w = w / max(w)
if not sym and not odd:
w = w[:-1]
return w
def slepian(M, width, sym=True):
"""Return a digital Slepian (DPSS) window.
Used to maximize the energy concentration in the main lobe. Also called
the digital prolate spheroidal sequence (DPSS).
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
width : float
Bandwidth
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value always normalized to 1
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.slepian(51, width=0.3)
>>> plt.plot(window)
>>> plt.title("Slepian (DPSS) window (BW=0.3)")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Slepian window (BW=0.3)")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
# our width is the full bandwidth
width = width / 2
# to match the old version
width = width / 2
m = np.arange(M, dtype='d')
H = np.zeros((2, M))
H[0, 1:] = m[1:] * (M - m[1:]) / 2
H[1, :] = ((M - 1 - 2 * m) / 2)**2 * np.cos(2 * np.pi * width)
_, win = linalg.eig_banded(H, select='i', select_range=(M-1, M-1))
win = win.ravel() / win.max()
if not sym and not odd:
win = win[:-1]
return win
def cosine(M, sym=True):
"""Return a window with a simple cosine shape.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
.. versionadded:: 0.13.0
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.cosine(51)
>>> plt.plot(window)
>>> plt.title("Cosine window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the cosine window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
>>> plt.show()
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
w = np.sin(np.pi / M * (np.arange(0, M) + .5))
if not sym and not odd:
w = w[:-1]
return w
def exponential(M, center=None, tau=1., sym=True):
r"""Return an exponential (or Poisson) window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
center : float, optional
Parameter defining the center location of the window function.
The default value if not given is ``center = (M-1) / 2``. This
parameter must take its default value for symmetric windows.
tau : float, optional
Parameter defining the decay. For ``center = 0`` use
``tau = -(M-1) / ln(x)`` if ``x`` is the fraction of the window
remaining at the end.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The Exponential window is defined as
.. math:: w(n) = e^{-|n-center| / \tau}
References
----------
S. Gade and H. Herlufsen, "Windows to FFT analysis (Part I)",
Technical Review 3, Bruel & Kjaer, 1987.
Examples
--------
Plot the symmetric window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> M = 51
>>> tau = 3.0
>>> window = signal.exponential(M, tau=tau)
>>> plt.plot(window)
>>> plt.title("Exponential Window (tau=3.0)")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -35, 0])
>>> plt.title("Frequency response of the Exponential window (tau=3.0)")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
This function can also generate non-symmetric windows:
>>> tau2 = -(M-1) / np.log(0.01)
>>> window2 = signal.exponential(M, 0, tau2, False)
>>> plt.figure()
>>> plt.plot(window2)
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
"""
if sym and center is not None:
raise ValueError("If sym==True, center must be None.")
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
if center is None:
center = (M-1) / 2
n = np.arange(0, M)
w = np.exp(-np.abs(n-center) / tau)
if not sym and not odd:
w = w[:-1]
return w
_win_equiv_raw = {
('barthann', 'brthan', 'bth'): (barthann, False),
('bartlett', 'bart', 'brt'): (bartlett, False),
('blackman', 'black', 'blk'): (blackman, False),
('blackmanharris', 'blackharr', 'bkh'): (blackmanharris, False),
('bohman', 'bman', 'bmn'): (bohman, False),
('boxcar', 'box', 'ones',
'rect', 'rectangular'): (boxcar, False),
('chebwin', 'cheb'): (chebwin, True),
('cosine', 'halfcosine'): (cosine, False),
('exponential', 'poisson'): (exponential, True),
('flattop', 'flat', 'flt'): (flattop, False),
('gaussian', 'gauss', 'gss'): (gaussian, True),
('general gaussian', 'general_gaussian',
'general gauss', 'general_gauss', 'ggs'): (general_gaussian, True),
('hamming', 'hamm', 'ham'): (hamming, False),
('hanning', 'hann', 'han'): (hann, False),
('kaiser', 'ksr'): (kaiser, True),
('nuttall', 'nutl', 'nut'): (nuttall, False),
('parzen', 'parz', 'par'): (parzen, False),
('slepian', 'slep', 'optimal', 'dpss', 'dss'): (slepian, True),
('triangle', 'triang', 'tri'): (triang, False),
('tukey', 'tuk'): (tukey, True),
}
# Fill dict with all valid window name strings
_win_equiv = {}
for k, v in _win_equiv_raw.items():
for key in k:
_win_equiv[key] = v[0]
# Keep track of which windows need additional parameters
_needs_param = set()
for k, v in _win_equiv_raw.items():
if v[1]:
_needs_param.update(k)
def get_window(window, Nx, fftbins=True):
"""
Return a window.
Parameters
----------
window : string, float, or tuple
The type of window to create. See below for more details.
Nx : int
The number of samples in the window.
fftbins : bool, optional
If True (default), create a "periodic" window, ready to use with
`ifftshift` and be multiplied by the result of an FFT (see also
`fftpack.fftfreq`).
If False, create a "symmetric" window, for use in filter design.
Returns
-------
get_window : ndarray
Returns a window of length `Nx` and type `window`
Notes
-----
Window types:
`boxcar`, `triang`, `blackman`, `hamming`, `hann`, `bartlett`,
`flattop`, `parzen`, `bohman`, `blackmanharris`, `nuttall`,
`barthann`, `kaiser` (needs beta), `gaussian` (needs standard
deviation), `general_gaussian` (needs power, width), `slepian`
(needs width), `chebwin` (needs attenuation), `exponential`
(needs decay scale), `tukey` (needs taper fraction)
If the window requires no parameters, then `window` can be a string.
If the window requires parameters, then `window` must be a tuple
with the first argument the string name of the window, and the next
arguments the needed parameters.
If `window` is a floating point number, it is interpreted as the beta
parameter of the `kaiser` window.
Each of the window types listed above is also the name of
a function that can be called directly to create a window of
that type.
Examples
--------
>>> from scipy import signal
>>> signal.get_window('triang', 7)
array([ 0.25, 0.5 , 0.75, 1. , 0.75, 0.5 , 0.25])
>>> signal.get_window(('kaiser', 4.0), 9)
array([ 0.08848053, 0.32578323, 0.63343178, 0.89640418, 1. ,
0.89640418, 0.63343178, 0.32578323, 0.08848053])
>>> signal.get_window(4.0, 9)
array([ 0.08848053, 0.32578323, 0.63343178, 0.89640418, 1. ,
0.89640418, 0.63343178, 0.32578323, 0.08848053])
"""
sym = not fftbins
try:
beta = float(window)
except (TypeError, ValueError):
args = ()
if isinstance(window, tuple):
winstr = window[0]
if len(window) > 1:
args = window[1:]
elif isinstance(window, string_types):
if window in _needs_param:
raise ValueError("The '" + window + "' window needs one or "
"more parameters -- pass a tuple.")
else:
winstr = window
else:
raise ValueError("%s as window type is not supported." %
str(type(window)))
try:
winfunc = _win_equiv[winstr]
except KeyError:
raise ValueError("Unknown window type.")
params = (Nx,) + args + (sym,)
else:
winfunc = kaiser
params = (Nx, beta, sym)
return winfunc(*params)
| bsd-3-clause |
ephes/scikit-learn | sklearn/decomposition/tests/test_factor_analysis.py | 222 | 3055 | # Author: Christian Osendorfer <osendorf@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Licence: BSD3
import numpy as np
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils import ConvergenceWarning
from sklearn.decomposition import FactorAnalysis
def test_factor_analysis():
# Test FactorAnalysis ability to recover the data covariance structure
rng = np.random.RandomState(0)
n_samples, n_features, n_components = 20, 5, 3
# Some random settings for the generative model
W = rng.randn(n_components, n_features)
# latent variable of dim 3, 20 of it
h = rng.randn(n_samples, n_components)
# using gamma to model different noise variance
# per component
noise = rng.gamma(1, size=n_features) * rng.randn(n_samples, n_features)
# generate observations
# wlog, mean is 0
X = np.dot(h, W) + noise
assert_raises(ValueError, FactorAnalysis, svd_method='foo')
fa_fail = FactorAnalysis()
fa_fail.svd_method = 'foo'
assert_raises(ValueError, fa_fail.fit, X)
fas = []
for method in ['randomized', 'lapack']:
fa = FactorAnalysis(n_components=n_components, svd_method=method)
fa.fit(X)
fas.append(fa)
X_t = fa.transform(X)
assert_equal(X_t.shape, (n_samples, n_components))
assert_almost_equal(fa.loglike_[-1], fa.score_samples(X).sum())
assert_almost_equal(fa.score_samples(X).mean(), fa.score(X))
diff = np.all(np.diff(fa.loglike_))
assert_greater(diff, 0., 'Log likelihood dif not increase')
# Sample Covariance
scov = np.cov(X, rowvar=0., bias=1.)
# Model Covariance
mcov = fa.get_covariance()
diff = np.sum(np.abs(scov - mcov)) / W.size
assert_less(diff, 0.1, "Mean absolute difference is %f" % diff)
fa = FactorAnalysis(n_components=n_components,
noise_variance_init=np.ones(n_features))
assert_raises(ValueError, fa.fit, X[:, :2])
f = lambda x, y: np.abs(getattr(x, y)) # sign will not be equal
fa1, fa2 = fas
for attr in ['loglike_', 'components_', 'noise_variance_']:
assert_almost_equal(f(fa1, attr), f(fa2, attr))
fa1.max_iter = 1
fa1.verbose = True
assert_warns(ConvergenceWarning, fa1.fit, X)
# Test get_covariance and get_precision with n_components == n_features
# with n_components < n_features and with n_components == 0
for n_components in [0, 2, X.shape[1]]:
fa.n_components = n_components
fa.fit(X)
cov = fa.get_covariance()
precision = fa.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]), 12)
| bsd-3-clause |
stharrold/demo | demo/app_predict/predict.py | 1 | 119605 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
r"""Prediction application.
"""
# Import standard packages.
import bs4
import collections
import inspect
import itertools
import logging
import os
import pickle
import requests
import shelve
import sys
import textwrap
import time
import warnings
# Import installed packages.
import geopy
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy
import seaborn as sns
import sklearn.cross_validation as sk_cv
import sklearn.cluster as sk_cl
import sklearn.decomposition as sk_dc
import sklearn.ensemble as sk_ens
import sklearn.metrics as sk_met
import sklearn.preprocessing as sk_pre
# Import local packages.
from .. import utils
# Define module exports:
__all__ = [
'etl',
'create_features',
'plot_eda',
'plot_heursitic',
'update_features',
'update_features_append',
'create_features_new_data',
'create_pipeline_model']
# Define state settings and globals.
# Note: For non-root-level loggers, use `getLogger(__name__)`
# http://stackoverflow.com/questions/17336680/python-logging-with-multiple-modules-does-not-work
logger = logging.getLogger(__name__)
# Set the matplotlib backend to the Anti-Grain Geometry C++ library.
# Note: Use plt.switch_backend since matplotlib.use('agg') before importing pyplot fails.
plt.switch_backend('agg')
# Set matplotlib styles with seaborn
sns.set()
# Define globals
# Return rates over 10% are considered excessive.
buyer_retrate_max = 0.1
# Return rate is ratio of Returned=1 to Returned not null.
buyer_retrate = 'BuyerID_fracReturned1DivReturnedNotNull'
def etl(
df:pd.DataFrame
) -> pd.DataFrame:
r"""Extract-transform-load.
Args:
df (pandas.DataFrame): Dataframe of raw data.
Returns:
df (pandas.DataFrame): Dataframe of formatted, cleaned data.
TODO:
* Modularize script into separate helper functions.
* Modify dataframe in place
"""
# Check input.
# Copy dataframe to avoid in place modification.
df = df.copy()
########################################
# DSEligible, Returned: Fix DSEligible == 0 but Returned not null
# Some vehicles have DSEligible=0 but have Returned!=nan due to errors or extenuating circumstances.
# To correct: If Returned!=nan, then DSEligible=1
# Note: Skip if cleaning new data for which Returned is unknown.
if 'Returned' in df.columns:
logger.info(textwrap.dedent("""\
DSEligible, Returned: Fix DSEligible == 0 but Returned not null.
To correct: If Returned not null, then DSEligible = 1."""))
logger.info("Before:\n{pt}".format(
pt=pd.pivot_table(
df[['DSEligible', 'Returned']].astype(str),
index='DSEligible', columns='Returned',
aggfunc=len, margins=True, dropna=False)))
df.loc[df['Returned'].notnull(), 'DSEligible'] = 1
logger.info("After:\n{pt}".format(
pt=pd.pivot_table(
df[['DSEligible', 'Returned']].astype(str),
index='DSEligible', columns='Returned',
aggfunc=len, margins=True, dropna=False)))
########################################
# Returned: fill nulls
# Fill null values with -1 and cast to int.
# Note: Skip if cleaning new data for which Returned is unknown.
if 'Returned' in df.columns:
logger.info('Returned: Fill nulls with -1 and cast to int.')
logger.info("Before:\n{pt}".format(
pt=pd.pivot_table(
df[['DSEligible', 'Returned']].astype(str),
index='DSEligible', columns='Returned',
aggfunc=len, margins=True, dropna=False)))
df['Returned'] = df[['Returned']].fillna(value=-1).astype(int)
logger.info("After:\n{pt}".format(
pt=pd.pivot_table(
df[['DSEligible', 'Returned']].astype(str),
index='DSEligible', columns='Returned',
aggfunc=len, margins=True, dropna=False)))
########################################
# BuyerID, SellerID, VIN, SellingLocation, CarMake, JDPowersCat:
# Cast to strings as categorical features.
logger.info(textwrap.dedent("""\
BuyerID, SellerID, VIN, SellingLocation, CarMake, JDPowersCat:
Cast to strings as categorical features."""))
for col in ['BuyerID', 'SellerID', 'VIN', 'SellingLocation', 'CarMake', 'JDPowersCat']:
df[col] = df[col].astype(str)
########################################
# CarMake: Deduplicate
# TODO: Find/scrape hierarchical relationships between car brands
# (e.g. https://en.wikipedia.org/wiki/Category:Mid-size_cars). To business people: would that be helpful?
# TODO: Deduplicate with spelling corrector.
logger.info("CarMake: Deduplicate.")
carmake_dedup = {
'1SUZU': 'ISUZU',
'CHEVY': 'CHEVROLET',
'XHEVY': 'CHEVROLET',
'DAMON': 'DEMON',
'FORESTR':'FORESTRIVER',
'FORESTRIVE': 'FORESTRIVER',
'FREIGHTLI': 'FREIGHTLINER',
'FREIGHTLIN': 'FREIGHTLINER',
'FRIGHTLIE': 'FREIGHTLINER',
'FRTLNRL': 'FREIGHTLINER',
'XFREIGHTLN': 'FREIGHTLINER',
'XREIGHTL': 'FREIGHTLINER',
'HARLEY': 'HARLEYDAVIDSON',
'HARLEY-DAV': 'HARLEYDAVIDSON',
'INTERNATIO': 'INTERNATIONAL',
'INTERNATL': 'INTERNATIONAL',
'XINTERNATI': 'INTERNATIONAL',
'MERCEDES': 'MERCEDES-BENZ',
'nan': 'UNKNOWN',
'XHINO': 'HINO',
'XOSHKOSH': 'OSHKOSH',
'XSMART': 'SMART'}
df['CarMake'] = df['CarMake'].str.replace(' ', '').apply(
lambda car: carmake_dedup[car] if car in carmake_dedup else car)
# # TODO: Experiment with one-hot encoding (problem is that it doesn't scale)
# df = pd.merge(
# left=df,
# right=pd.get_dummies(df['CarMake'], prefix='CarMake'),
# left_index=True,
# right_index=True)
########################################
# JDPowersCat: Replace nan with UNKNOWN
logger.info("JDPowersCat: Replace 'nan' with 'UNKNOWN'.")
df['JDPowersCat'] = df['JDPowersCat'].str.replace(' ', '').apply(
lambda cat: 'UNKNOWN' if cat == 'nan' else cat)
########################################
# LIGHTG, LIGHTY, LIGHTR
# Retain light with highest warning
logger.info("LIGHT*: Only retain light with highest warning.")
pt = pd.DataFrame([
df.loc[df['LIGHTG']==1, ['LIGHTG', 'LIGHTY', 'LIGHTR']].sum(),
df.loc[df['LIGHTY']==1, ['LIGHTG', 'LIGHTY', 'LIGHTR']].sum(),
df.loc[df['LIGHTR']==1, ['LIGHTG', 'LIGHTY', 'LIGHTR']].sum()],
index=['LIGHTG=1', 'LIGHTY=1', 'LIGHTR=1'])
pt.columns = ['LIGHTG=1', 'LIGHTY=1', 'LIGHTR=1']
logger.info("Before:\n{pt}".format(pt=pt))
df.loc[df['LIGHTR']==1, ['LIGHTG', 'LIGHTY']] = 0
df.loc[df['LIGHTY']==1, ['LIGHTG']] = 0
pt = pd.DataFrame([
df.loc[df['LIGHTG']==1, ['LIGHTG', 'LIGHTY', 'LIGHTR']].sum(),
df.loc[df['LIGHTY']==1, ['LIGHTG', 'LIGHTY', 'LIGHTR']].sum(),
df.loc[df['LIGHTR']==1, ['LIGHTG', 'LIGHTY', 'LIGHTR']].sum()],
index=['LIGHTG=1', 'LIGHTY=1', 'LIGHTR=1'])
pt.columns = ['LIGHTG=1', 'LIGHTY=1', 'LIGHTR=1']
logger.info("After:\n{pt}".format(pt=pt))
########################################
# SaleDate: Cast to datetime.
logger.info("SaleDate: Cast to datetime.")
if df['SaleDate'].dtype == 'O':
df['SaleDate'] = pd.to_datetime(df['SaleDate'], format=r'%y-%m-%d')
########################################
# Autocheck_score: Fill null values with mode (1)
# TODO: Use nearest neighbors to infer probable fill value.
logger.info("Autocheck_score: Fill null values with mode (1).")
df['Autocheck_score'] = df['Autocheck_score'].fillna(value=1)
########################################
# ConditionReport
# Map character codes to numerical values, invalid codes are "average".
logger.info("ConditionReport: Map character codes to numerical values. Invalid codes are 'average'.")
conrpt_value = {
'EC': 50,
'CL': 40,
'AV': 30,
'RG': 20,
'PR': 10,
'SL': 0,
'A': 30,
'A3': 30,
'Y6': 30,
'nan': 30}
df['ConditionReport'] = df['ConditionReport'].astype(str).apply(
lambda con: conrpt_value[con] if con in conrpt_value else con)
df['ConditionReport'] = df['ConditionReport'].astype(int)
return df
def create_features(
df:pd.DataFrame,
path_data_dir:str
) -> pd.DataFrame:
r"""Create features for post-ETL data.
Args:
df (pandas.DataFrame): Dataframe of raw data.
path_data_dir (str): Path to data directory for caching geocode shelf file.
Returns:
df (pandas.DataFrame): Dataframe of extracted data.
See Also:
etl
Notes:
* BuyerID_fracReturned1DivReturnedNotNull is the return rate for a buyer.
TODO:
* Modularize script into separate helper functions.
* Modify dataframe in place
"""
# Check input.
# Copy dataframe to avoid in place modification.
df = df.copy()
# Check file path.
if not os.path.exists(path_data_dir):
raise IOError(textwrap.dedent("""\
Path does not exist:
path_data_dir = {path}""".format(
path=path_data_dir)))
########################################
# Returned_asm
# Interpretation of assumptions:
# If DSEligible=0, then the vehicle is not eligible for a guarantee.
# * And Returned=-1 (null) since we don't know whether or not it would have been returned,
# but given that it wasn't eligible, it may have been likely to have Returned=1.
# If DSEligible=1, then the vehicle is eligible for a guarantee.
# * And if Returned=0 then the guarantee was purchased and the vehicle was not returned.
# * And if Returned=1 then the guarantee was purchased and the vehicle was returned.
# * And if Returned=-1 (null) then the guarantee was not purchased.
# We don't know whether or not it would have been returned,
# but given that the dealer did not purchase, it may have been likely to have Returned=0.
# Assume:
# If Returned=-1 and DSEligible=0, then Returned_asm=1
# If Returned=-1 and DSEligible=1, then Returned_asm=0
logger.info(textwrap.dedent("""\
Returned_asm: Assume returned status to fill nulls as new feature.
If Returned=-1 and DSEligible=0, then Returned_asm=1 (assumes low P(resale|buyer, car))
If Returned=-1 and DSEligible=1, then Returned_asm=0 (assumes high P(resale|buyer, car))"""))
df['Returned_asm'] = df['Returned']
df.loc[
np.logical_and(df['Returned'] == -1, df['DSEligible'] == 0),
'Returned_asm'] = 1
df.loc[
np.logical_and(df['Returned'] == -1, df['DSEligible'] == 1),
'Returned_asm'] = 0
logger.info("Relationship between DSEligible and Returned:\n{pt}".format(
pt=pd.pivot_table(
df[['DSEligible', 'Returned']].astype(str),
index='DSEligible', columns='Returned',
aggfunc=len, margins=True, dropna=False)))
logger.info("Relationship between DSEligible and Returned_asm:\n{pt}".format(
pt=pd.pivot_table(
df[['DSEligible', 'Returned_asm']].astype(str),
index='DSEligible', columns='Returned_asm',
aggfunc=len, margins=True, dropna=False)))
logger.info("Relationship between Returned and Returned_asm:\n{pt}".format(
pt=pd.pivot_table(
df[['Returned', 'Returned_asm']].astype(str),
index='Returned', columns='Returned_asm',
aggfunc=len, margins=True, dropna=False)))
########################################
# SellingLocation_lat, SellingLocation_lon
# Cell takes ~1 min to execute if shelf does not exist.
# Google API limit: https://developers.google.com/maps/documentation/geocoding/usage-limits
logger.info(textwrap.dedent("""\
SellingLocation: Geocode.
Scraping webpages for addresses and looking up latitude, longitude coordinates."""))
path_shelf = os.path.join(path_data_dir, 'sellloc_geoloc.shelf')
seconds_per_query = 1.0/50.0 # Google API limit
sellloc_geoloc = dict()
with shelve.open(filename=path_shelf, flag='c') as shelf:
for loc in df['SellingLocation'].unique():
if loc in shelf:
raw = shelf[loc]
if raw is None:
location = raw
else:
address = raw['formatted_address']
latitude = raw['geometry']['location']['lat']
longitude = raw['geometry']['location']['lng']
location = geopy.location.Location(
address=address, point=(latitude, longitude), raw=raw)
else:
url = r'https://www.manheim.com/locations/{loc}/events'.format(loc=loc)
page = requests.get(url)
tree = bs4.BeautifulSoup(page.text, 'lxml')
address = tree.find(name='p', class_='loc_address').get_text().strip()
try:
components = {
'country': 'United States',
'postal_code': address.split()[-1]}
location = geopy.geocoders.GoogleV3().geocode(
query=address,
exactly_one=True,
components=components)
except:
logger.warning(textwrap.dedent("""\
Exception raised. Setting {loc} geo location to `None`
sys.exc_info() =
{exc}""".format(loc=loc, exc=sys.exc_info())))
location = None
finally:
time.sleep(seconds_per_query)
if location is None:
shelf[loc] = location
else:
shelf[loc] = location.raw
sellloc_geoloc[loc] = location
logger.info("Mapping SellingLocation to latitude, longitude coordinates.")
sellloc_lat = {
sellloc: (geoloc.latitude if geoloc is not None else 0.0)
for (sellloc, geoloc) in sellloc_geoloc.items()}
sellloc_lon = {
sellloc: (geoloc.longitude if geoloc is not None else 0.0)
for (sellloc, geoloc) in sellloc_geoloc.items()}
df['SellingLocation_lat'] = df['SellingLocation'].map(sellloc_lat)
df['SellingLocation_lon'] = df['SellingLocation'].map(sellloc_lon)
# # TODO: experiment with one-hot encoding (problems is that it doesn't scale)
# df = pd.merge(
# left=df,
# right=pd.get_dummies(df['SellingLocation'], prefix='SellingLocation'),
# how='inner',
# left_index=True,
# right_index=True)
########################################
# JDPowersCat: One-hot encoding
# TODO: Estimate sizes from Wikipedia, e.g. https://en.wikipedia.org/wiki/Vehicle_size_class.
logger.info("JDPowersCat: One-hot encoding.")
# Cast to string, replacing 'nan' with 'UNKNOWN'.
df['JDPowersCat'] = (df['JDPowersCat'].astype(str)).str.replace(' ', '').apply(
lambda cat: 'UNKNOWN' if cat == 'nan' else cat)
# One-hot encoding.
df = pd.merge(
left=df,
right=pd.get_dummies(df['JDPowersCat'], prefix='JDPowersCat'),
left_index=True,
right_index=True)
########################################
# LIGHT_N0G1Y2R3
# Rank lights by warning level.
logger.info("LIGHT_N0G1Y2R3: Rank lights by warning level (null=0, green=1, yellow=2, red=3).")
df['LIGHT_N0G1Y2R3'] = df['LIGHTG']*1 + df['LIGHTY']*2 + df['LIGHTR']*3
########################################
# SaleDate_*: Extract timeseries features.
logger.info("SaleDate: Extract timeseries features.")
df['SaleDate_dow'] = df['SaleDate'].dt.dayofweek
df['SaleDate_doy'] = df['SaleDate'].dt.dayofyear
df['SaleDate_day'] = df['SaleDate'].dt.day
df['SaleDate_decyear'] = df['SaleDate'].dt.year + (df['SaleDate'].dt.dayofyear-1)/366
########################################
# BuyerID, SellerID, VIN, SellingLocation, CarMake, JDPowersCat:
# Make cumulative informative priors (*_num*, *_frac*) for string features.
logger.info(textwrap.dedent("""\
BuyerID, SellerID, VIN, SellingLocation, CarMake, JDPowersCat:
Make cumulative informative priors (*_num*, *_frac*) for string features."""))
# Cumulative features require sorting by time.
df.sort_values(by=['SaleDate'], inplace=True)
df.reset_index(drop=True, inplace=True)
for col in ['BuyerID', 'SellerID', 'VIN', 'SellingLocation', 'CarMake', 'JDPowersCat']:
logger.info("Processing {col}".format(col=col))
####################
# Cumulative count of transactions and DSEligible:
# Cumulative count of transactions (yes including current).
df[col+'_numTransactions'] = df[[col]].groupby(by=col).cumcount().astype(int) + 1
df[col+'_numTransactions'].fillna(value=1, inplace=True)
# Cumulative count of transactions that were DealShield-eligible (yes including current).
df[col+'_numDSEligible1'] = df[[col, 'DSEligible']].groupby(by=col)['DSEligible'].cumsum().astype(int)
df[col+'_numDSEligible1'].fillna(value=0, inplace=True)
# Cumulative ratio of transactions that were DealShield-eligible (0=bad, 1=good).
df[col+'_fracDSEligible1DivTransactions'] = (df[col+'_numDSEligible1']/df[col+'_numTransactions'])
df[col+'_fracDSEligible1DivTransactions'].fillna(value=1, inplace=True)
####################
# DSEligible and Returned
# Note:
# * DealShield-purchased ==> Returned != -1 (not null)
# * below requires
# DSEligible == 0 ==> Returned == -1 (is null)
# Returned != -1 (not null) ==> DSEligible == 1
assert (df.loc[df['DSEligible']==0, 'Returned'] == -1).all()
assert (df.loc[df['Returned']!=-1, 'DSEligible'] == 1).all()
# Cumulative count of transactions that were DealShield-eligible and DealShield-purchased.
df_tmp = df[[col, 'Returned']].copy()
df_tmp['ReturnedNotNull'] = df_tmp['Returned'] != -1
df[col+'_numReturnedNotNull'] = df_tmp[[col, 'ReturnedNotNull']].groupby(by=col)['ReturnedNotNull'].cumsum().astype(int)
df[col+'_numReturnedNotNull'].fillna(value=0, inplace=True)
del df_tmp
# Cumulative ratio of DealShield-eligible transactions that were DealShield-purchased (0=mode).
df[col+'_fracReturnedNotNullDivDSEligible1'] = df[col+'_numReturnedNotNull']/df[col+'_numDSEligible1']
df[col+'_fracReturnedNotNullDivDSEligible1'].fillna(value=0, inplace=True)
# Cumulative count of transactions that were DealShield-elegible and DealShield-purchased and DealShield-returned.
df_tmp = df[[col, 'Returned']].copy()
df_tmp['Returned1'] = df_tmp['Returned'] == 1
df[col+'_numReturned1'] = df_tmp[[col, 'Returned1']].groupby(by=col)['Returned1'].cumsum().astype(int)
df[col+'_numReturned1'].fillna(value=0, inplace=True)
del df_tmp
# Cumulative ratio of DealShield-eligible, DealShield-purchased transactions that were DealShield-returned (0=good, 1=bad).
# Note: BuyerID_fracReturned1DivReturnedNotNull is the cumulative return rate for a buyer.
df[col+'_fracReturned1DivReturnedNotNull'] = df[col+'_numReturned1']/df[col+'_numReturnedNotNull']
df[col+'_fracReturned1DivReturnedNotNull'].fillna(value=0, inplace=True)
# Check that weighted average of return rate equals overall return rate.
# Note: Requires groups sorted by date, ascending.
assert np.isclose(
(df[[col, col+'_fracReturned1DivReturnedNotNull', col+'_numReturnedNotNull']].groupby(by=col).last().product(axis=1).sum()/\
df[[col, col+'_numReturnedNotNull']].groupby(by=col).last().sum()).values[0],
sum(df['Returned']==1)/sum(df['Returned'] != -1),
equal_nan=True)
####################
# DSEligible and Returned_asm
# NOTE:
# * Below requires
# DSEligible == 0 ==> Returned_asm == 1
# Returned_asm == 0 ==> DSEligible == 1
assert (df.loc[df['DSEligible']==0, 'Returned_asm'] == 1).all()
assert (df.loc[df['Returned_asm']==0, 'DSEligible'] == 1).all()
# Cumulative number of transactions that were assumed to be returned.
df_tmp = df[[col, 'Returned_asm']].copy()
df_tmp['Returnedasm1'] = df_tmp['Returned_asm'] == 1
df[col+'_numReturnedasm1'] = df_tmp[[col, 'Returnedasm1']].groupby(by=col)['Returnedasm1'].cumsum().astype(int)
df[col+'_numReturnedasm1'].fillna(value=0, inplace=True)
del df_tmp
# Cumulative ratio of transactions that were assumed to be returned (0=mode).
df[col+'_fracReturnedasm1DivTransactions'] = df[col+'_numReturnedasm1']/df[col+'_numTransactions']
df[col+'_fracReturnedasm1DivTransactions'].fillna(value=0, inplace=True)
# Check that weighted average of assumed return rate equals overall assumed return rate.
assert np.isclose(
(df[[col, col+'_fracReturnedasm1DivTransactions', col+'_numTransactions']].groupby(by=col).last().product(axis=1).sum()/\
df[[col, col+'_numTransactions']].groupby(by=col).last().sum()).values[0],
sum(df['Returned_asm']==1)/sum(df['Returned_asm'] != -1),
equal_nan=True)
# Note:
# * Number of transactions that were DealShield-eligible and assumed to be returned ==
# number of transactions that were DealShield-elegible and DealShield-purchased and DealShield-returned
# (numReturned1)
return df
def plot_eda(
df:pd.DataFrame,
columns:list,
path_plot_dir:str=None
) -> None:
r"""Make plots for exploratory data analysis (EDA).
Args:
df (pandas.DataFrame): Dataframe of formatted data.
columns (list): List of strings of columns in `df` to plot.
path_plot_dir (str, optional, None): Path to directory in which to save plots.
Returns:
None
"""
# Check inputs.
if not os.path.exists(path_plot_dir):
raise IOError(textwrap.dedent("""\
Path does not exist: path_plot_dir =
{path}""".format(path=path_plot_dir)))
################################################################################
# Plot frequency distributions.
print('#'*80)
print('Plot frequency distributions (histograms) of columns.')
for col in columns:
print('#'*40)
print('Feature: {col}'.format(col=col))
print('Timestamp:', time.strftime(r'%Y-%m-%dT%H:%M:%S%Z', time.gmtime()))
# Plot frequency distributions by transaction.
if col != buyer_retrate:
df_plot = df[['BuyerID', col, buyer_retrate]].copy()
else:
df_plot = df[['BuyerID', buyer_retrate]].copy()
buyer_retrate_omax = buyer_retrate+'_omax'
df_plot[buyer_retrate_omax] = df_plot[buyer_retrate] > buyer_retrate_max
itemized_counts = {
is_omax: grp[col].values
for (is_omax, grp) in df_plot.groupby(by=buyer_retrate_omax)}
itemized_counts = collections.OrderedDict(
sorted(itemized_counts.items(), key=lambda tup: tup[0], reverse=False))
keys = itemized_counts.keys()
bins = 50
colors = sns.light_palette(sns.color_palette()[2], n_colors=len(keys))
plt.hist(
[itemized_counts[key] for key in itemized_counts.keys()],
bins=bins, stacked=True, rwidth=1.0, label=keys, color=colors)
plt.title('{col}\nfrequency distribution'.format(col=col))
plt.xlabel(col)
plt.ylabel('Number of transactions with\n{col} = X'.format(col=col))
plt.legend(
title='Buyer return\nrate > {retrate:.0%}'.format(retrate=buyer_retrate_max),
loc='upper left', bbox_to_anchor=(1.0, 1.0))
rect = (0, 0, 0.85, 1)
plt.tight_layout(rect=rect)
if path_plot_dir is not None:
plt.savefig(
os.path.join(path_plot_dir, 'freq-dist-transaction_'+col+'.png'),
dpi=300)
plt.show()
# Plot frequency distributions by buyer.
itemized_counts = {
is_omax: grp[['BuyerID', col]].groupby(by='BuyerID').mean().values.flatten()
for (is_omax, grp) in df_plot.groupby(by=buyer_retrate_omax)}
itemized_counts = collections.OrderedDict(
sorted(itemized_counts.items(), key=lambda tup: tup[0], reverse=False))
keys = itemized_counts.keys()
plt.hist(
[itemized_counts[key] for key in itemized_counts.keys()],
bins=bins, stacked=True, rwidth=1.0, label=keys, color=colors)
plt.title('Mean {col} per buyer\nfrequency distribution'.format(col=col))
plt.xlabel('Mean '+col)
plt.ylabel('Number of buyers with\nmean {col} = X'.format(col=col))
plt.legend(
title='Buyer return\nrate > {retrate:.0%}'.format(retrate=buyer_retrate_max),
loc='upper left', bbox_to_anchor=(1.0, 1.0))
plt.tight_layout(rect=rect)
if path_plot_dir is not None:
plt.savefig(
os.path.join(path_plot_dir, 'freq-dist-buyer_'+col+'.png'),
dpi=300)
plt.show()
################################################################################
# Plot (timeseries) traces for fractional quantities vs fraction of completed transactions.
# Columns to plot: catgory (cat), <category>_numTransactions (trans), <category>_frac* (col)
print('#'*80)
print('Plot traces (timeseries) for fractional quantities vs fraction of completed transactions.')
plot_cols = list()
for col in df.columns:
if '_frac' in col:
cat = col.split('_frac')[0]
trans = cat+'_numTransactions'
plot_cols.append([cat, trans, col])
for (col_cat, col_trans, col_frac) in plot_cols:
print('#'*40)
print('Category column: {col}'.format(col=col_cat))
print('Transaction column: {col}'.format(col=col_trans))
print('Fraction column: {col}'.format(col=col_frac))
print('Timestamp:', time.strftime(r'%Y-%m-%dT%H:%M:%S%Z', time.gmtime()))
# Weight categorical values by number of transactions.
assert (df[[col_cat, col_trans]].groupby(by=col_cat).last().sum() == len(df)).all()
cat_wts = df[[col_cat, col_trans]].groupby(by=col_cat).last()/len(df)
cat_wts.columns = [col_cat+'_wts']
cats = cat_wts.sample(n=30, replace=True, weights=col_cat+'_wts').index.values
# Make plot.
for idx in range(len(cats)):
cat = cats[idx]
tfmask = df[col_cat] == cat
xvals = (df.loc[tfmask, col_trans]/sum(tfmask)).values
yvals = df.loc[tfmask, col_frac].values
xvals_omax = (df.loc[np.logical_and(tfmask, df[buyer_retrate] > buyer_retrate_max), col_trans]/sum(tfmask)).values
yvals_omax = df.loc[np.logical_and(tfmask, df[buyer_retrate] > buyer_retrate_max), col_frac].values
if len(xvals) > 51: # downsample for speed
step = 1/50
xvals_resampled = np.arange(start=0, stop=1+step, step=step)
yvals_resampled = np.interp(x=xvals_resampled, xp=xvals, fp=yvals)
(xvals, yvals) = (xvals_resampled, yvals_resampled)
if len(xvals_omax) > 51: # downsample for speed
idxs_omax = np.random.choice(range(len(xvals_omax)), size=51, replace=False)
xvals_omax_resampled = xvals_omax[idxs_omax]
yvals_omax_resampled = yvals_omax[idxs_omax]
(xvals_omax, yvals_omax) = (xvals_omax_resampled, yvals_omax_resampled)
plt.plot(
xvals, yvals,
marker='.', alpha=0.1, color=sns.color_palette()[0])
if idx == 0:
label = 'Buyer return\nrate > {retrate:.0%}'.format(retrate=buyer_retrate_max)
else:
label = None
plt.plot(
xvals_omax, yvals_omax,
marker='o', alpha=0.2, linestyle='',
color=sns.color_palette()[2], label=label)
plt.title('{col_frac} vs\nfraction of transactions completed'.format(col_frac=col_frac))
plt.xlabel("Fraction of transactions completed")
plt.ylabel(col_frac)
plt.legend(loc='upper left', bbox_to_anchor=(1.0, 1.0))
rect = (0, 0, 0.80, 1)
plt.tight_layout(rect=rect)
if path_plot_dir is not None:
plt.savefig(
os.path.join(path_plot_dir, 'trace_'+col_frac+'.png'),
dpi=300)
plt.show()
return None
def plot_heuristic(
df:pd.DataFrame,
path_plot_dir:str=None
) -> None:
r"""Plot heuristic to predict bad dealers.
Args:
df (pandas.DataFrame): DataFrame of formatted data.
path_plot_dir (str, optional, None): Path to directory in which to save plots.
Returns:
None
TODO:
* Use stacked area chart instead of histogram, but requires aggregating by date.
* Format xaxis with dates.
2013.0 = 2013-01-01
2013.2 = 2013-03-14
2013.4 = 2013-05-26
2013.6 = 2013-08-07
2013.8 = 2013-10-19
"""
# Check inputs.
if not os.path.exists(path_plot_dir):
raise IOError(textwrap.dedent("""\
Path does not exist: path_plot_dir =
{path}""".format(path=path_plot_dir)))
# Plot timeseries histogram of Returned vs SalesDate.
# Bins represent weeks.
df_plot = df[['SaleDate_decyear', 'Returned']].copy()
itemized_counts = {
ret: collections.Counter(grp['SaleDate_decyear'])
for (ret, grp) in df_plot.groupby(by='Returned')}
itemized_counts = collections.OrderedDict(
sorted(itemized_counts.items(), key=lambda tup: tup[0], reverse=True))
keys = itemized_counts.keys()
bins = int(np.ceil((df_plot['SaleDate_decyear'].max() - df_plot['SaleDate_decyear'].min())*52))
colors = sns.color_palette(n_colors=len(keys))[::-1]
plt.hist(
[list(itemized_counts[key].elements()) for key in itemized_counts.keys()],
bins=bins, stacked=True, rwidth=1.0, label=keys, color=colors)
plt.xlim(xmin=int(df_plot['SaleDate_decyear'].min()))
xlim = plt.xlim()
plt.title('Returned vs SaleDate\nby Returned status')
plt.xlabel('SaleDate (decimal year)')
plt.ylabel('Number of transactions with\nReturned = <status>')
plt.legend(title='Returned\nstatus', loc='upper left', bbox_to_anchor=(1.0, 1.0))
rect = (0, 0, 0.85, 1)
plt.tight_layout(rect=rect)
if path_plot_dir is not None:
plt.savefig(
os.path.join(path_plot_dir, 'heuristic0_returned101_vs_saledate_by_status.png'),
dpi=300)
plt.show()
# Plot timeseries histogram of Returned (0,1) vs SalesDate.
# Bins represent weeks.
df_plot = df.loc[df['Returned']!=-1, ['SaleDate_decyear', 'Returned']].copy()
itemized_counts = {
ret: collections.Counter(grp['SaleDate_decyear'])
for (ret, grp) in df_plot.groupby(by='Returned')}
itemized_counts = collections.OrderedDict(
sorted(itemized_counts.items(), key=lambda tup: tup[0], reverse=True))
keys = itemized_counts.keys()
bins = int(np.ceil((df_plot['SaleDate_decyear'].max() - df_plot['SaleDate_decyear'].min())*52))
plt.hist(
[list(itemized_counts[key].elements()) for key in itemized_counts.keys()],
bins=bins, stacked=True, rwidth=1.0, label=keys, color=colors[:2])
plt.xlim(xlim)
plt.title('Returned vs SaleDate\nby Returned status')
plt.xlabel('SaleDate (decimal year)')
plt.ylabel('Number of transactions with\nReturned = <status>')
plt.legend(title='Returned\nstatus', loc='upper left', bbox_to_anchor=(1.0, 1.0))
plt.tight_layout(rect=rect)
if path_plot_dir is not None:
plt.savefig(
os.path.join(path_plot_dir, 'heuristic1_returned01_vs_saledate_by_status.png'),
dpi=300)
plt.show()
# # ARCHIVED: Use return rate as heuristic rather than return count.
# # Plot timeseries histogram of Returned (1) vs SalesDate by BuyerID.
# df_plot = df.loc[df['Returned']==1, ['SaleDate_decyear', 'BuyerID']].copy()
# top = [tup[0] for tup in collections.Counter(df_plot['BuyerID']).most_common(n=20)]
# itemized_counts_all = {
# buy: collections.Counter(grp['SaleDate_decyear'])
# for (buy, grp) in df_plot.groupby(by='BuyerID')}
# itemized_counts_top = {'other': collections.Counter()}
# for (buyerid, counts) in itemized_counts_all.items():
# if buyerid in top:
# itemized_counts_top[buyerid] = counts
# else:
# itemized_counts_top['other'].update(counts)
# itemized_counts = collections.OrderedDict(
# sorted(itemized_counts_top.items(), key=lambda tup: sum(tup[1].values()), reverse=True))
# itemized_counts.move_to_end('other')
# keys = itemized_counts.keys()
# bins = int(np.ceil((df_plot['SaleDate_decyear'].max() - df_plot['SaleDate_decyear'].min())*52))
# colors = sns.light_palette(sns.color_palette()[2], n_colors=len(keys))
# plt.hist(
# [list(itemized_counts[key].elements()) for key in itemized_counts.keys()],
# bins=bins, stacked=True, rwidth=1.0, label=keys, color=colors)
# plt.title('Returned vs SaleDate by BuyerID')
# plt.xlabel('SaleDate (decimal year)')
# plt.ylabel('Returned (status=1)')
# plt.legend(title='BuyerID', loc='upper left', bbox_to_anchor=(1.0, 1.0))
# plt.show()
# Plot timeseries histogram of Returned (1) vs SalesDate
# by BuyerID for BuyerIDs with return rate > buyer_retrate_max (buyer_retrate_max=0.1).
# buyer_retrate = 'BuyerID_fracReturned1DivReturnedNotNull'
# Bins represent weeks.
df_plot = df.loc[df['Returned']==1, ['SaleDate_decyear', 'BuyerID', buyer_retrate]].copy()
buyer_retrate_omax = buyer_retrate+'_omax'
df_plot[buyer_retrate_omax] = df_plot[buyer_retrate] > buyer_retrate_max
itemized_counts = {
is_omax: collections.Counter(grp['SaleDate_decyear'])
for (is_omax, grp) in df_plot.groupby(by=buyer_retrate_omax)}
itemized_counts = collections.OrderedDict(
sorted(itemized_counts.items(), key=lambda tup: tup[0], reverse=False))
keys = itemized_counts.keys()
bins = int(np.ceil((df_plot['SaleDate_decyear'].max() - df_plot['SaleDate_decyear'].min())*52))
colors = sns.light_palette(sns.color_palette()[2], n_colors=len(keys))
plt.hist(
[list(itemized_counts[key].elements()) for key in itemized_counts.keys()],
bins=bins, stacked=True, rwidth=1.0, label=keys, color=colors)
plt.xlim(xlim)
plt.title('Returned vs SaleDate\nby buyer return rate')
plt.xlabel('SaleDate (decimal year)')
plt.ylabel('Number of transactions with Returned = 1\nand buyer return rate = <rate>')
plt.legend(
title='Buyer return\nrate > {retrate:.0%}'.format(retrate=buyer_retrate_max),
loc='upper left', bbox_to_anchor=(1.0, 1.0))
plt.tight_layout(rect=rect)
if path_plot_dir is not None:
plt.savefig(
os.path.join(path_plot_dir, 'heuristic2_returned1_vs_saledate_by_returnrate.png'),
dpi=300)
plt.show()
# Plot frequency distribution of return rates per BuyerID
df_plot = df[['BuyerID', buyer_retrate]].copy()
df_plot[buyer_retrate_omax] = df_plot[buyer_retrate] > buyer_retrate_max
itemized_counts = {
is_omax: grp[buyer_retrate].values
for (is_omax, grp) in df_plot.groupby(by=buyer_retrate_omax)}
itemized_counts = collections.OrderedDict(
sorted(itemized_counts.items(), key=lambda tup: tup[0], reverse=False))
keys = itemized_counts.keys()
bins = 20
colors = sns.light_palette(sns.color_palette()[2], n_colors=len(keys))
plt.hist(
[itemized_counts[key] for key in itemized_counts.keys()],
bins=bins, stacked=True, rwidth=1.0, label=keys, color=colors)
plt.title('Return rate per transaction\nfrequency distribution')
plt.xlabel('Return rate')
plt.ylabel('Number of transactions with\nbuyer return rate = X')
plt.legend(
title='Buyer return\nrate > {retrate:.0%}'.format(retrate=buyer_retrate_max),
loc='upper left', bbox_to_anchor=(1.0, 1.0))
plt.tight_layout(rect=rect)
if path_plot_dir is not None:
plt.savefig(
os.path.join(path_plot_dir, 'heuristic3_returnrate_freq-dist-transaction_by_returnrate.png'),
dpi=300)
plt.show()
# Plot frequency distribution of return rates per BuyerID
# Note: Buyers can be counted twice in the histogram if they cross the
# buyer_retrate_max = 0.1 threshold.
df_plot = df[['BuyerID', buyer_retrate]].copy()
df_plot[buyer_retrate_omax] = df_plot[buyer_retrate] > buyer_retrate_max
itemized_counts = {
is_omax: grp[['BuyerID', buyer_retrate]].groupby(by='BuyerID').mean().values.flatten()
for (is_omax, grp) in df_plot.groupby(by=buyer_retrate_omax)}
itemized_counts = collections.OrderedDict(
sorted(itemized_counts.items(), key=lambda tup: tup[0], reverse=False))
keys = itemized_counts.keys()
bins = 20
colors = sns.light_palette(sns.color_palette()[2], n_colors=len(keys))
plt.hist(
[itemized_counts[key] for key in itemized_counts.keys()],
bins=bins, stacked=True, rwidth=1.0, label=keys, color=colors)
plt.title('Return rates per buyer\nfrequency distribution')
plt.xlabel('Return rate')
plt.ylabel('Number of buyers with\nreturn rate = X')
plt.legend(
title='Buyer return\nrate > {retrate:.0%}'.format(retrate=buyer_retrate_max),
loc='upper left', bbox_to_anchor=(1.0, 1.0))
plt.tight_layout(rect=rect)
if path_plot_dir is not None:
plt.savefig(
os.path.join(path_plot_dir, 'heuristic4_returnrate_freq-dist-buyer_by_returnrate.png'),
dpi=300)
plt.show()
return None
def update_features(
df:pd.DataFrame
) -> pd.DataFrame:
r"""Update features for timeseries training.
Args:
df (pandas.DataFrame): Dataframe of featurized data.
Returns:
df (pandas.DataFrame): Dataframe of updated featurized data.
See Also:
create_features
Notes:
* BuyerID_fracReturned1DivReturnedNotNull is the return rate for a buyer.
TODO:
* Modularize script into separate helper functions.
* Modify dataframe in place
"""
# Check input.
# Copy dataframe to avoid in place modification.
df = df.copy()
########################################
# Returned_asm
# Interpretation of assumptions:
# If DSEligible=0, then the vehicle is not eligible for a guarantee.
# * And Returned=-1 (null) since we don't know whether or not it would have been returned,
# but given that it wasn't eligible, it may have been likely to have Returned=1.
# If DSEligible=1, then the vehicle is eligible for a guarantee.
# * And if Returned=0 then the guarantee was purchased and the vehicle was not returned.
# * And if Returned=1 then the guarantee was purchased and the vehicle was returned.
# * And if Returned=-1 (null) then the guarantee was not purchased.
# We don't know whether or not it would have been returned,
# but given that the dealer did not purchase, it may have been likely to have Returned=0.
# Assume:
# If Returned=-1 and DSEligible=0, then Returned_asm=1
# If Returned=-1 and DSEligible=1, then Returned_asm=0
logger.info(textwrap.dedent("""\
Returned_asm: Assume returned status to fill nulls as new feature.
If Returned=-1 and DSEligible=0, then Returned_asm=1 (assumes low P(resale|buyer, car))
If Returned=-1 and DSEligible=1, then Returned_asm=0 (assumes high P(resale|buyer, car))"""))
df['Returned_asm'] = df['Returned']
df.loc[
np.logical_and(df['Returned'] == -1, df['DSEligible'] == 0),
'Returned_asm'] = 1
df.loc[
np.logical_and(df['Returned'] == -1, df['DSEligible'] == 1),
'Returned_asm'] = 0
logger.info("Relationship between DSEligible and Returned:\n{pt}".format(
pt=pd.pivot_table(
df[['DSEligible', 'Returned']].astype(str),
index='DSEligible', columns='Returned',
aggfunc=len, margins=True, dropna=False)))
logger.info("Relationship between DSEligible and Returned_asm:\n{pt}".format(
pt=pd.pivot_table(
df[['DSEligible', 'Returned_asm']].astype(str),
index='DSEligible', columns='Returned_asm',
aggfunc=len, margins=True, dropna=False)))
logger.info("Relationship between Returned and Returned_asm:\n{pt}".format(
pt=pd.pivot_table(
df[['Returned', 'Returned_asm']].astype(str),
index='Returned', columns='Returned_asm',
aggfunc=len, margins=True, dropna=False)))
########################################
# BuyerID, SellerID, VIN, SellingLocation, CarMake, JDPowersCat:
# Make cumulative informative priors (*_num*, *_frac*) for string features.
logger.info(textwrap.dedent("""\
BuyerID, SellerID, VIN, SellingLocation, CarMake, JDPowersCat:
Make cumulative informative priors (*_num*, *_frac*) for string features."""))
# Cumulative features require sorting by time.
assert (df['SaleDate'].diff().iloc[1:] >= np.timedelta64(0, 'D')).all()
for col in ['BuyerID', 'SellerID', 'VIN', 'SellingLocation', 'CarMake', 'JDPowersCat']:
logger.info("Processing {col}".format(col=col))
####################
# Cumulative count of transactions and DSEligible:
# Cumulative count of transactions (yes including current).
df[col+'_numTransactions'] = df[[col]].groupby(by=col).cumcount().astype(int) + 1
df[col+'_numTransactions'].fillna(value=1, inplace=True)
# Cumulative count of transations that were DealShield-eligible (yes including current).
df[col+'_numDSEligible1'] = df[[col, 'DSEligible']].groupby(by=col)['DSEligible'].cumsum().astype(int)
df[col+'_numDSEligible1'].fillna(value=0, inplace=True)
# Cumulative ratio of transactions that were DealShield-eligible (0=bad, 1=good).
df[col+'_fracDSEligible1DivTransactions'] = (df[col+'_numDSEligible1']/df[col+'_numTransactions'])
df[col+'_fracDSEligible1DivTransactions'].fillna(value=1, inplace=True)
####################
# DSEligible and Returned
# Note:
# * DealShield-purchased ==> Returned != -1 (not null)
# * below requires
# DSEligible == 0 ==> Returned == -1 (is null)
# Returned != -1 (not null) ==> DSEligible == 1
assert (df.loc[df['DSEligible']==0, 'Returned'] == -1).all()
assert (df.loc[df['Returned']!=-1, 'DSEligible'] == 1).all()
# Cumulative count of transactions that were DealShield-eligible and DealShield-purchased.
df_tmp = df[[col, 'Returned']].copy()
df_tmp['ReturnedNotNull'] = df_tmp['Returned'] != -1
df[col+'_numReturnedNotNull'] = df_tmp[[col, 'ReturnedNotNull']].groupby(by=col)['ReturnedNotNull'].cumsum().astype(int)
df[col+'_numReturnedNotNull'].fillna(value=0, inplace=True)
del df_tmp
# Cumulative ratio of DealShield-eligible transactions that were DealShield-purchased (0=mode).
df[col+'_fracReturnedNotNullDivDSEligible1'] = df[col+'_numReturnedNotNull']/df[col+'_numDSEligible1']
df[col+'_fracReturnedNotNullDivDSEligible1'].fillna(value=0, inplace=True)
# Cumulative count of transactions that were DealShield-elegible and DealShield-purchased and DealShield-returned.
df_tmp = df[[col, 'Returned']].copy()
df_tmp['Returned1'] = df_tmp['Returned'] == 1
df[col+'_numReturned1'] = df_tmp[[col, 'Returned1']].groupby(by=col)['Returned1'].cumsum().astype(int)
df[col+'_numReturned1'].fillna(value=0, inplace=True)
del df_tmp
# Cumulative ratio of DealShield-eligible, DealShield-purchased transactions that were DealShield-returned (0=good, 1=bad).
# Note: BuyerID_fracReturned1DivReturnedNotNull is the cumulative return rate for a buyer.
df[col+'_fracReturned1DivReturnedNotNull'] = df[col+'_numReturned1']/df[col+'_numReturnedNotNull']
df[col+'_fracReturned1DivReturnedNotNull'].fillna(value=0, inplace=True)
# Check that weighted average of return rate equals overall return rate.
# Note: Requires groups sorted by date, ascending.
assert np.isclose(
(df[[col, col+'_fracReturned1DivReturnedNotNull', col+'_numReturnedNotNull']].groupby(by=col).last().product(axis=1).sum()/\
df[[col, col+'_numReturnedNotNull']].groupby(by=col).last().sum()).values[0],
sum(df['Returned']==1)/sum(df['Returned'] != -1),
equal_nan=True)
####################
# DSEligible and Returned_asm
# NOTE:
# * Below requires
# DSEligible == 0 ==> Returned_asm == 1
# Returned_asm == 0 ==> DSEligible == 1
assert (df.loc[df['DSEligible']==0, 'Returned_asm'] == 1).all()
assert (df.loc[df['Returned_asm']==0, 'DSEligible'] == 1).all()
# Cumulative number of transactions that were assumed to be returned.
df_tmp = df[[col, 'Returned_asm']].copy()
df_tmp['Returnedasm1'] = df_tmp['Returned_asm'] == 1
df[col+'_numReturnedasm1'] = df_tmp[[col, 'Returnedasm1']].groupby(by=col)['Returnedasm1'].cumsum().astype(int)
df[col+'_numReturnedasm1'].fillna(value=0, inplace=True)
del df_tmp
# Cumulative ratio of transactions that were assumed to be returned (0=mode).
df[col+'_fracReturnedasm1DivTransactions'] = df[col+'_numReturnedasm1']/df[col+'_numTransactions']
df[col+'_fracReturnedasm1DivTransactions'].fillna(value=0, inplace=True)
# Check that weighted average of assumed return rate equals overall assumed return rate.
assert np.isclose(
(df[[col, col+'_fracReturnedasm1DivTransactions', col+'_numTransactions']].groupby(by=col).last().product(axis=1).sum()/\
df[[col, col+'_numTransactions']].groupby(by=col).last().sum()).values[0],
sum(df['Returned_asm']==1)/sum(df['Returned_asm'] != -1),
equal_nan=True)
# Note:
# * Number of transactions that were DealShield-eligible and assumed to be returned ==
# number of transactions that were DealShield-elegible and DealShield-purchased and DealShield-returned
# (numReturned1)
return df
def update_features_append(
df_prev:pd.DataFrame,
df_next:pd.DataFrame,
debug:bool=False
) -> pd.DataFrame:
r"""Update features and merge for timeseries training.
Args:
df_prev (pandas.DataFrame): Dataframe of old data.
df_next (pandas.DataFrame): Dataframe of new data to be updated
and appended to df_prev.
debug (bool, optional, False): Flag to enforce assertions.
True: Execute assertions. Slower runtime by 3x.
False (default): Do not execute assertions. Faster runtime.
Returns:
df (pandas.DataFrame): Dataframe of updated, appended data.
See Also:
create_features
Notes:
* Only df_next is updated.
* BuyerID_fracReturned1DivReturnedNotNull is the return rate for a buyer.
TODO:
* Modularize script into separate helper functions.
* Modify dataframe in place
"""
# Check input.
# Copy dataframe to avoid in place modification.
(df_prev, df_next) = (df_prev.copy(), df_next.copy())
########################################
# Returned_asm
# Interpretation of assumptions:
# If DSEligible=0, then the vehicle is not eligible for a guarantee.
# * And Returned=-1 (null) since we don't know whether or not it would have been returned,
# but given that it wasn't eligible, it may have been likely to have Returned=1.
# If DSEligible=1, then the vehicle is eligible for a guarantee.
# * And if Returned=0 then the guarantee was purchased and the vehicle was not returned.
# * And if Returned=1 then the guarantee was purchased and the vehicle was returned.
# * And if Returned=-1 (null) then the guarantee was not purchased.
# We don't know whether or not it would have been returned,
# but given that the dealer did not purchase, it may have been likely to have Returned=0.
# Assume:
# If Returned=-1 and DSEligible=0, then Returned_asm=1
# If Returned=-1 and DSEligible=1, then Returned_asm=0
logger.info(textwrap.dedent("""\
Returned_asm: Assume returned status to fill nulls as new feature.
If Returned=-1 and DSEligible=0, then Returned_asm=1 (assumes low P(resale|buyer, car))
If Returned=-1 and DSEligible=1, then Returned_asm=0 (assumes high P(resale|buyer, car))"""))
df_next['Returned_asm'] = df_next['Returned']
df_next.loc[
np.logical_and(df_next['Returned'] == -1, df_next['DSEligible'] == 0),
'Returned_asm'] = 1
df_next.loc[
np.logical_and(df_next['Returned'] == -1, df_next['DSEligible'] == 1),
'Returned_asm'] = 0
logger.info("Relationship between DSEligible and Returned:\n{pt}".format(
pt=pd.pivot_table(
df_next[['DSEligible', 'Returned']].astype(str),
index='DSEligible', columns='Returned',
aggfunc=len, margins=True, dropna=False)))
logger.info("Relationship between DSEligible and Returned_asm:\n{pt}".format(
pt=pd.pivot_table(
df_next[['DSEligible', 'Returned_asm']].astype(str),
index='DSEligible', columns='Returned_asm',
aggfunc=len, margins=True, dropna=False)))
logger.info("Relationship between Returned and Returned_asm:\n{pt}".format(
pt=pd.pivot_table(
df_next[['Returned', 'Returned_asm']].astype(str),
index='Returned', columns='Returned_asm',
aggfunc=len, margins=True, dropna=False)))
########################################
# BuyerID, SellerID, VIN, SellingLocation, CarMake, JDPowersCat:
# Make cumulative informative priors (*_num*, *_frac*) for string features.
logger.info(textwrap.dedent("""\
BuyerID, SellerID, VIN, SellingLocation, CarMake, JDPowersCat:
Make cumulative informative priors (*_num*, *_frac*) for string features."""))
# Cumulative features require sorting by time.
if debug:
assert (df_prev['SaleDate'].diff().iloc[1:] >= np.timedelta64(0, 'D')).all()
assert (df_next['SaleDate'].diff().iloc[1:] >= np.timedelta64(0, 'D')).all()
for col in ['BuyerID', 'SellerID', 'VIN', 'SellingLocation', 'CarMake', 'JDPowersCat']:
logger.info("Processing {col}".format(col=col))
prev_nums = df_prev.groupby(by=col).last()
####################
# Cumulative count of transactions and DSEligible:
# Cumulative count of transactions (yes including current).
df_next[col+'_numTransactions'] = df_next[[col]].groupby(by=col).cumcount().astype(int) + 1
df_next[col+'_numTransactions'].fillna(value=1, inplace=True)
df_next[col+'_numTransactions'] += df_next[col].map(prev_nums[col+'_numTransactions']).fillna(value=0)
# Cumulative count of transations that were DealShield-eligible (yes including current).
df_next[col+'_numDSEligible1'] = df_next[[col, 'DSEligible']].groupby(by=col)['DSEligible'].cumsum().astype(int)
df_next[col+'_numDSEligible1'].fillna(value=0, inplace=True)
df_next[col+'_numDSEligible1'] += df_next[col].map(prev_nums[col+'_numDSEligible1']).fillna(value=0)
# Cumulative ratio of transactions that were DealShield-eligible (0=bad, 1=good).
df_next[col+'_fracDSEligible1DivTransactions'] = df_next[col+'_numDSEligible1']/df_next[col+'_numTransactions']
df_next[col+'_fracDSEligible1DivTransactions'].fillna(value=1, inplace=True)
####################
# DSEligible and Returned
# Note:
# * DealShield-purchased ==> Returned != -1 (not null)
# * below requires
# DSEligible == 0 ==> Returned == -1 (is null)
# Returned != -1 (not null) ==> DSEligible == 1
if debug:
assert (df_prev.loc[df_prev['DSEligible']==0, 'Returned'] == -1).all()
assert (df_prev.loc[df_prev['Returned']!=-1, 'DSEligible'] == 1).all()
assert (df_next.loc[df_next['DSEligible']==0, 'Returned'] == -1).all()
assert (df_next.loc[df_next['Returned']!=-1, 'DSEligible'] == 1).all()
# Cumulative count of transactions that were DealShield-eligible and DealShield-purchased.
df_tmp = df_next[[col, 'Returned']].copy()
df_tmp['ReturnedNotNull'] = df_tmp['Returned'] != -1
df_next[col+'_numReturnedNotNull'] = df_tmp[[col, 'ReturnedNotNull']].groupby(by=col)['ReturnedNotNull'].cumsum().astype(int)
df_next[col+'_numReturnedNotNull'].fillna(value=0, inplace=True)
df_next[col+'_numReturnedNotNull'] += df_next[col].map(prev_nums[col+'_numReturnedNotNull']).fillna(value=0)
del df_tmp
# Cumulative ratio of DealShield-eligible transactions that were DealShield-purchased (0=mode).
df_next[col+'_fracReturnedNotNullDivDSEligible1'] = df_next[col+'_numReturnedNotNull']/df_next[col+'_numDSEligible1']
df_next[col+'_fracReturnedNotNullDivDSEligible1'].fillna(value=0, inplace=True)
# Cumulative count of transactions that were DealShield-elegible and DealShield-purchased and DealShield-returned.
df_tmp = df_next[[col, 'Returned']].copy()
df_tmp['Returned1'] = df_tmp['Returned'] == 1
df_next[col+'_numReturned1'] = df_tmp[[col, 'Returned1']].groupby(by=col)['Returned1'].cumsum().astype(int)
df_next[col+'_numReturned1'].fillna(value=0, inplace=True)
df_next[col+'_numReturned1'] += df_next[col].map(prev_nums[col+'_numReturned1']).fillna(value=0)
del df_tmp
# Cumulative ratio of DealShield-eligible, DealShield-purchased transactions that were DealShield-returned (0=good, 1=bad).
# Note: BuyerID_fracReturned1DivReturnedNotNull is the cumulative return rate for a buyer.
df_next[col+'_fracReturned1DivReturnedNotNull'] = df_next[col+'_numReturned1']/df_next[col+'_numReturnedNotNull']
df_next[col+'_fracReturned1DivReturnedNotNull'].fillna(value=0, inplace=True)
# Check that weighted average of return rate equals overall return rate.
# Note: Requires groups sorted by date, ascending.
if debug:
df_tmp = df_prev.append(df_next)
assert np.isclose(
(df_tmp[[col, col+'_fracReturned1DivReturnedNotNull', col+'_numReturnedNotNull']].groupby(by=col).last().product(axis=1).sum()/\
df_tmp[[col, col+'_numReturnedNotNull']].groupby(by=col).last().sum()).values[0],
sum(df_tmp['Returned']==1)/sum(df_tmp['Returned'] != -1),
equal_nan=True)
del df_tmp
####################
# DSEligible and Returned_asm
# NOTE:
# * Below requires
# DSEligible == 0 ==> Returned_asm == 1
# Returned_asm == 0 ==> DSEligible == 1
if debug:
assert (df_prev.loc[df_prev['DSEligible']==0, 'Returned_asm'] == 1).all()
assert (df_prev.loc[df_prev['Returned_asm']==0, 'DSEligible'] == 1).all()
assert (df_next.loc[df_next['DSEligible']==0, 'Returned_asm'] == 1).all()
assert (df_next.loc[df_next['Returned_asm']==0, 'DSEligible'] == 1).all()
# Cumulative number of transactions that were assumed to be returned.
df_tmp = df_next[[col, 'Returned_asm']].copy()
df_tmp['Returnedasm1'] = df_tmp['Returned_asm'] == 1
df_next[col+'_numReturnedasm1'] = df_tmp[[col, 'Returnedasm1']].groupby(by=col)['Returnedasm1'].cumsum().astype(int)
df_next[col+'_numReturnedasm1'].fillna(value=0, inplace=True)
df_next[col+'_numReturnedasm1'] += df_next[col].map(prev_nums[col+'_numReturnedasm1']).fillna(value=0)
del df_tmp
# Cumulative ratio of transactions that were assumed to be returned (0=mode).
df_next[col+'_fracReturnedasm1DivTransactions'] = df_next[col+'_numReturnedasm1']/df_next[col+'_numTransactions']
df_next[col+'_fracReturnedasm1DivTransactions'].fillna(value=0, inplace=True)
# Check that weighted average of assumed return rate equals overall assumed return rate.
if debug:
df_tmp = df_prev.append(df_next)
assert np.isclose(
(df_tmp[[col, col+'_fracReturnedasm1DivTransactions', col+'_numTransactions']].groupby(by=col).last().product(axis=1).sum()/\
df_tmp[[col, col+'_numTransactions']].groupby(by=col).last().sum()).values[0],
sum(df_tmp['Returned_asm']==1)/sum(df_tmp['Returned_asm'] != -1),
equal_nan=True)
del df_tmp
# Note:
# * Number of transactions that were DealShield-eligible and assumed to be returned ==
# number of transactions that were DealShield-elegible and DealShield-purchased and DealShield-returned
# (numReturned1)
# Return updated, appended dataframe.
return df_prev.append(df_next)
def create_features_new_data(
df_prev:pd.DataFrame,
df_next:pd.DataFrame,
path_data_dir:str,
debug:bool=False
) -> pd.DataFrame:
r"""Create features for post-ETL data.
Args:
df_prev (pandas.DataFrame): Dataframe of old data.
df_next (pandas.DataFrame): Dataframe of new data with missing target column
('Returned') for which features are extracted.
path_data_dir (str): Path to data directory for caching geocode shelf file.
debug (bool, optional, False): Flag to enforce assertions.
True: Execute assertions. Slower runtime by 3x.
False (default): Do not execute assertions. Faster runtime.
Returns:
df (pandas.DataFrame): Dataframe of extracted data.
See Also:
etl
Notes:
* BuyerID_fracReturned1DivReturnedNotNull is the return rate for a buyer.
* df_prev and df_next have overlapping indexes.
TODO:
* Modularize script into separate helper functions.
* Modify dataframe in place
"""
# Check input.
# Copy dataframe to avoid in place modification.
(df_prev, df_next) = (df_prev.copy(), df_next.copy())
# Check file path.
if not os.path.exists(path_data_dir):
raise IOError(textwrap.dedent("""\
Path does not exist:
path_data_dir = {path}""".format(
path=path_data_dir)))
########################################
# Returned_asm
# Interpretation of assumptions:
# If DSEligible=0, then the vehicle is not eligible for a guarantee.
# * And Returned=-1 (null) since we don't know whether or not it would have been returned,
# but given that it wasn't eligible, it may have been likely to have Returned=1.
# If DSEligible=1, then the vehicle is eligible for a guarantee.
# * And if Returned=0 then the guarantee was purchased and the vehicle was not returned.
# * And if Returned=1 then the guarantee was purchased and the vehicle was returned.
# * And if Returned=-1 (null) then the guarantee was not purchased.
# We don't know whether or not it would have been returned,
# but given that the dealer did not purchase, it may have been likely to have Returned=0.
# Assume:
# If Returned=-1 and DSEligible=0, then Returned_asm=1
# If Returned=-1 and DSEligible=1, then Returned_asm=0
# For new data:
# If DSEligible=0, then Returned=-1, then Returned_asm=1
# If DSEligible=1, then Returned_asm is the average of the buyer's Returned_asm, or if new buyer, then 0.
logger.info(textwrap.dedent("""\
Returned_asm: Assume returned status to fill nulls as new feature.
If Returned=-1 and DSEligible=0, then Returned_asm=1 (assumes low P(resale|buyer, car))
If Returned=-1 and DSEligible=1, then Returned_asm=0 (assumes high P(resale|buyer, car))"""))
logger.info(textwrap.dedent("""\
For new data:
If DSEligible=0, then Returned=-1, then Returned_asm=1
If DSEligible=1, then Returned_asm is the average of the buyer's Returned_asm, or if new buyer, then 0."""))
df_next.loc[df_next['DSEligible']==0, 'Returned_asm'] = 1
prev_nums = df_prev.loc[df_prev['DSEligible']==1, ['BuyerID', 'Returned_asm']].groupby(by='BuyerID').mean()
df_next.loc[df_next['DSEligible']==1, 'Returned_asm'] = \
df_next.loc[df_next['DSEligible']==1, 'BuyerID'].map(prev_nums['Returned_asm']).fillna(value=0)
########################################
# SellingLocation_lat, SellingLocation_lon
# Cell takes ~1 min to execute if shelf does not exist.
# Google API limit: https://developers.google.com/maps/documentation/geocoding/usage-limits
logger.info(textwrap.dedent("""\
SellingLocation: Geocode.
Scraping webpages for addresses and looking up latitude, longitude coordinates."""))
path_shelf = os.path.join(path_data_dir, 'sellloc_geoloc.shelf')
seconds_per_query = 1.0/50.0 # Google API limit
sellloc_geoloc = dict()
with shelve.open(filename=path_shelf, flag='c') as shelf:
for loc in df_next['SellingLocation'].unique():
if loc in shelf:
raw = shelf[loc]
if raw is None:
location = raw
else:
address = raw['formatted_address']
latitude = raw['geometry']['location']['lat']
longitude = raw['geometry']['location']['lng']
location = geopy.location.Location(
address=address, point=(latitude, longitude), raw=raw)
else:
url = r'https://www.manheim.com/locations/{loc}/events'.format(loc=loc)
page = requests.get(url)
tree = bs4.BeautifulSoup(page.text, 'lxml')
address = tree.find(name='p', class_='loc_address').get_text().strip()
try:
components = {
'country': 'United States',
'postal_code': address.split()[-1]}
location = geopy.geocoders.GoogleV3().geocode(
query=address,
exactly_one=True,
components=components)
except:
logger.warning(textwrap.dedent("""\
Exception raised. Setting {loc} geo location to `None`
sys.exc_info() =
{exc}""".format(loc=loc, exc=sys.exc_info())))
location = None
finally:
time.sleep(seconds_per_query)
if location is None:
shelf[loc] = location
else:
shelf[loc] = location.raw
sellloc_geoloc[loc] = location
logger.info("Mapping SellingLocation to latitude, longitude coordinates.")
sellloc_lat = {
sellloc: (geoloc.latitude if geoloc is not None else 0.0)
for (sellloc, geoloc) in sellloc_geoloc.items()}
sellloc_lon = {
sellloc: (geoloc.longitude if geoloc is not None else 0.0)
for (sellloc, geoloc) in sellloc_geoloc.items()}
df_next['SellingLocation_lat'] = df_next['SellingLocation'].map(sellloc_lat)
df_next['SellingLocation_lon'] = df_next['SellingLocation'].map(sellloc_lon)
# # TODO: experiment with one-hot encoding (problems is that it doesn't scale)
# df_next = pd.merge(
# left=df_next,
# right=pd.get_dummies(df_next['SellingLocation'], prefix='SellingLocation'),
# how='inner',
# left_index=True,
# right_index=True)
########################################
# JDPowersCat: One-hot encoding
# TODO: Estimate sizes from Wikipedia, e.g. https://en.wikipedia.org/wiki/Vehicle_size_class.
logger.info("JDPowersCat: One-hot encoding.")
# Cast to string, replacing 'nan' with 'UNKNOWN'.
df_next['JDPowersCat'] = (df_next['JDPowersCat'].astype(str)).str.replace(' ', '').apply(
lambda cat: 'UNKNOWN' if cat == 'nan' else cat)
# One-hot encoding.
df_next = pd.merge(
left=df_next,
right=pd.get_dummies(df_next['JDPowersCat'], prefix='JDPowersCat'),
left_index=True,
right_index=True)
########################################
# LIGHT_N0G1Y2R3
# Rank lights by warning level.
logger.info("LIGHT_N0G1Y2R3: Rank lights by warning level (null=0, green=1, yellow=2, red=3).")
df_next['LIGHT_N0G1Y2R3'] = df_next['LIGHTG']*1 + df_next['LIGHTY']*2 + df_next['LIGHTR']*3
########################################
# SaleDate_*: Extract timeseries features.
logger.info("SaleDate: Extract timeseries features.")
df_next['SaleDate_dow'] = df_next['SaleDate'].dt.dayofweek
df_next['SaleDate_doy'] = df_next['SaleDate'].dt.dayofyear
df_next['SaleDate_day'] = df_next['SaleDate'].dt.day
df_next['SaleDate_decyear'] = df_next['SaleDate'].dt.year + (df_next['SaleDate'].dt.dayofyear-1)/366
########################################
# BuyerID, SellerID, VIN, SellingLocation, CarMake, JDPowersCat:
# Make cumulative informative priors (*_num*, *_frac*) for string features.
logger.info(textwrap.dedent("""\
BuyerID, SellerID, VIN, SellingLocation, CarMake, JDPowersCat:
Make cumulative informative priors (*_num*, *_frac*) for string features."""))
# Cumulative features require sorting by time.
# Note: df_prev and df_next have overlapping indexes after `reset_index`.
df_next.sort_values(by=['SaleDate'], inplace=True)
df_next.reset_index(drop=True, inplace=True)
if debug:
assert (df_prev['SaleDate'].diff().iloc[1:] >= np.timedelta64(0, 'D')).all()
assert (df_next['SaleDate'].diff().iloc[1:] >= np.timedelta64(0, 'D')).all()
for col in ['BuyerID', 'SellerID', 'VIN', 'SellingLocation', 'CarMake', 'JDPowersCat']:
logger.info("Processing {col}".format(col=col))
prev_nums = df_prev.groupby(by=col).last()
####################
# Cumulative count of transactions and DSEligible:
# Cumulative count of transactions (yes including current).
df_next[col+'_numTransactions'] = df_next[[col]].groupby(by=col).cumcount().astype(int) + 1
df_next[col+'_numTransactions'].fillna(value=1, inplace=True)
df_next[col+'_numTransactions'] += df_next[col].map(prev_nums[col+'_numTransactions']).fillna(value=0)
# Cumulative count of transations that were DealShield-eligible (yes including current).
df_next[col+'_numDSEligible1'] = df_next[[col, 'DSEligible']].groupby(by=col)['DSEligible'].cumsum().astype(int)
df_next[col+'_numDSEligible1'].fillna(value=0, inplace=True)
df_next[col+'_numDSEligible1'] += df_next[col].map(prev_nums[col+'_numDSEligible1']).fillna(value=0)
# Cumulative ratio of transactions that were DealShield-eligible (0=bad, 1=good).
df_next[col+'_fracDSEligible1DivTransactions'] = df_next[col+'_numDSEligible1']/df_next[col+'_numTransactions']
df_next[col+'_fracDSEligible1DivTransactions'].fillna(value=1, inplace=True)
####################
# DSEligible and Returned
# Note:
# * DealShield-purchased ==> Returned != -1 (not null)
# * below requires
# DSEligible == 0 ==> Returned == -1 (is null)
# Returned != -1 (not null) ==> DSEligible == 1
if debug:
assert (df_prev.loc[df_prev['DSEligible']==0, 'Returned'] == -1).all()
assert (df_prev.loc[df_prev['Returned']!=-1, 'DSEligible'] == 1).all()
# Cumulative count of transactions that were DealShield-eligible and DealShield-purchased.
df_next[col+'_numReturnedNotNull'] = df_next[col].map(prev_nums[col+'_numReturnedNotNull']).fillna(value=0)
# Cumulative ratio of DealShield-eligible transactions that were DealShield-purchased (0=mode).
df_next[col+'_fracReturnedNotNullDivDSEligible1'] = df_next[col+'_numReturnedNotNull']/df_next[col+'_numDSEligible1']
df_next[col+'_fracReturnedNotNullDivDSEligible1'].fillna(value=0, inplace=True)
# Cumulative count of transactions that were DealShield-elegible and DealShield-purchased and DealShield-returned.
df_next[col+'_numReturned1'] = df_next[col].map(prev_nums[col+'_numReturned1']).fillna(value=0)
# Cumulative ratio of DealShield-eligible, DealShield-purchased transactions that were DealShield-returned (0=good, 1=bad).
# Note: BuyerID_fracReturned1DivReturnedNotNull is the cumulative return rate for a buyer.
df_next[col+'_fracReturned1DivReturnedNotNull'] = df_next[col+'_numReturned1']/df_next[col+'_numReturnedNotNull']
df_next[col+'_fracReturned1DivReturnedNotNull'].fillna(value=0, inplace=True)
# Check that weighted average of return rate equals overall return rate.
# Note: Requires groups sorted by date, ascending.
if debug:
assert np.isclose(
(df_prev[[col, col+'_fracReturned1DivReturnedNotNull', col+'_numReturnedNotNull']].groupby(by=col).last().product(axis=1).sum()/\
df_prev[[col, col+'_numReturnedNotNull']].groupby(by=col).last().sum()).values[0],
sum(df_prev['Returned']==1)/sum(df_prev['Returned'] != -1),
equal_nan=True)
####################
# DSEligible and Returned_asm
# NOTE:
# * Below requires
# DSEligible == 0 ==> Returned_asm == 1
# Returned_asm == 0 ==> DSEligible == 1
if debug:
assert (df_prev.loc[df_prev['DSEligible']==0, 'Returned_asm'] == 1).all()
assert (df_prev.loc[df_prev['Returned_asm']==0, 'DSEligible'] == 1).all()
assert (df_next.loc[df_next['DSEligible']==0, 'Returned_asm'] == 1).all()
assert (df_next.loc[df_next['Returned_asm']==0, 'DSEligible'] == 1).all()
# Cumulative number of transactions that were assumed to be returned.
# Note: For new data, 'Returned_asm' may be a float.
df_tmp = df_next[[col, 'Returned_asm']].copy()
df_tmp['Returnedasm1'] = df_tmp['Returned_asm']
df_next[col+'_numReturnedasm1'] = df_tmp[[col, 'Returnedasm1']].groupby(by=col)['Returnedasm1'].cumsum()
df_next[col+'_numReturnedasm1'].fillna(value=0, inplace=True)
df_next[col+'_numReturnedasm1'] += df_next[col].map(prev_nums[col+'_numReturnedasm1']).fillna(value=0)
del df_tmp
# Cumulative ratio of transactions that were assumed to be returned (0=mode).
df_next[col+'_fracReturnedasm1DivTransactions'] = df_next[col+'_numReturnedasm1']/df_next[col+'_numTransactions']
df_next[col+'_fracReturnedasm1DivTransactions'].fillna(value=0, inplace=True)
# Check that weighted average of assumed return rate equals overall assumed return rate.
if debug:
assert np.isclose(
(df_prev[[col, col+'_fracReturnedasm1DivTransactions', col+'_numTransactions']].groupby(by=col).last().product(axis=1).sum()/\
df_prev[[col, col+'_numTransactions']].groupby(by=col).last().sum()).values[0],
sum(df_prev['Returned_asm']==1)/sum(df_prev['Returned_asm'] != -1),
equal_nan=True)
# Note:
# * Number of transactions that were DealShield-eligible and assumed to be returned ==
# number of transactions that were DealShield-elegible and DealShield-purchased and DealShield-returned
# (numReturned1)
return df_next
def create_pipeline_model(
df:pd.DataFrame,
path_data_dir:str,
show_plots:bool=False):
r"""Create pipeline model.
"""
# Check arguments.
path_plot_dir = os.path.join(path_data_dir, 'plot_model')
########################################
# print('#'*80)
# Define target and features
target = 'Returned'
features = set(df.columns[np.logical_or(df.dtypes=='int64', df.dtypes=='float64')])
features.difference_update([target])
features = sorted(features)
# print('Features:')
# print(features)
# print()
########################################
# print('#'*80)
# print(textwrap.dedent("""\
# `Container`: Create an empty container class and
# dynamically allocate attributes to hold variables for specific steps
# of the pipeline. """))
Container = utils.utils.Container
step = Container()
# print(textwrap.dedent("""\
# `step.s0.[df,ds]_[features,target]`: Save initial state of features, target."""))
step.s0 = Container()
step.s0.dfs = Container()
step.s0.dfs.df_features = df[features].copy()
step.s0.dfs.ds_target = df[target].copy()
# TODO: REDO after this point with step.sN.dfs.[df_features,ds_target]
# rather than redefining [df_features,ds_target]
df_features = step.s0.dfs.df_features
ds_target = step.s0.dfs.ds_target
# print()
########################################
# print('#'*80)
# print(textwrap.dedent("""\
# `transformer_scaler`, `transformer_pca`: Scale data
# then make groups of similar records with k-means clustering,
# both with and without PCA. Use the silhouette score to determine
# the number of clusters.
# """))
# time_start = time.perf_counter()
# Scale data prior to comparing clusters with/without PCA.
# Note: Using sklearn.preprocessing.RobustScaler with
# sklearn.decomposition.IncrementalPCA(whiten=False)
# is often the most stable (slowly varying scores)
# with highest scores. Centroid agreement can still be
# off due to outliers.
transformer_scaler = sk_pre.RobustScaler()
features_scaled = transformer_scaler.fit_transform(X=df_features)
transformer_pca = sk_dc.IncrementalPCA(whiten=False)
features_scaled_pca = transformer_pca.fit_transform(X=features_scaled)
print("`columns.pkl`, `transformer_scaler.pkl`, `transformer_pca.pkl`: Save column order and transformers.")
path_data = path_data_dir
path_cols = os.path.join(path_data, 'columns.pkl')
with open(path_cols, mode='wb') as fobj:
pickle.dump(obj=df_features.columns, file=fobj)
path_tform_scl = os.path.join(path_data, 'transformer_scaler.pkl')
with open(path_tform_scl, mode='wb') as fobj:
pickle.dump(obj=transformer_scaler, file=fobj)
path_tform_pca = os.path.join(path_data, 'transformer_pca.pkl')
with open(path_tform_pca, mode='wb') as fobj:
pickle.dump(obj=transformer_pca, file=fobj)
# with warnings.catch_warnings():
# warnings.simplefilter("ignore")
# print("Plot scores for scaled features:")
# utils.utils.calc_silhouette_scores(
# df_features=features_scaled, n_clusters_min=2, n_clusters_max=10,
# size_sub=None, n_scores=10, show_progress=True, show_plot=True)
# print("Plot scores for scaled PCA features:")
# utils.utils.calc_silhouette_scores(
# df_features=features_scaled_pca, n_clusters_min=2, n_clusters_max=10,
# size_sub=None, n_scores=10, show_progress=True, show_plot=True)
# time_stop = time.perf_counter()
# print("Time elapsed (sec) = {diff:.1f}".format(diff=time_stop-time_start))
# print()
########################################
# print('#'*80)
# print(textwrap.dedent("""\
# `transformer_kmeans`, `transformer_kmeans_pca`:
# Fit k-means to the data with/without PCA and
# compare the centroids for the clusters."""))
# TODO: Fix plot. Assign clusters IDs in a deterministic way so that
# cluster 0 raw matches cluster 0 transformed.
# time_start = time.perf_counter()
n_clusters = 2 # from silhouette scores
with warnings.catch_warnings():
warnings.simplefilter("ignore")
# Cluster scaled features with/without PCA using minibatch k-means
transformer_kmeans = sk_cl.MiniBatchKMeans(n_clusters=n_clusters)
transformer_kmeans.fit(X=features_scaled)
transformer_kmeans_pca = sk_cl.MiniBatchKMeans(n_clusters=n_clusters)
transformer_kmeans_pca.fit(X=features_scaled_pca)
print("`transformer_kmeans.pkl`, `transformer_kmeans_pca.pkl`: Save transformers.")
path_tform_km = os.path.join(path_data, 'transformer_kmeans.pkl')
with open(path_tform_km, mode='wb') as fobj:
pickle.dump(obj=transformer_kmeans, file=fobj)
path_tform_km_pca = os.path.join(path_data, 'transformer_kmeans_pca.pkl')
with open(path_tform_km_pca, mode='wb') as fobj:
pickle.dump(obj=transformer_kmeans_pca, file=fobj)
# with warnings.catch_warnings():
# warnings.simplefilter("ignore")
# # Plot clusters in scaled feature space.
# centroids = transformer_kmeans.cluster_centers_
# transformed_centroids = transformer_pca.inverse_transform(transformer_kmeans_pca.cluster_centers_)
# (col_1, col_0) = np.argsort(np.var(features_scaled, axis=0))[-2:]
# (name_1, name_0) = (df_features.columns.values[col_1], df_features.columns.values[col_0])
# plt.title("Data and centroids within scaled feature space")
# tfmask_gt01 = df_features[buyer_retrate] > buyer_retrate_max
# plt.plot(features_scaled[tfmask_gt01, col_0], features_scaled[tfmask_gt01, col_1],
# marker='o', linestyle='', color=sns.color_palette()[2], alpha=0.5,
# label='data, buyer_retrate_gt01')
# tfmask_lt01 = np.logical_not(tfmask_gt01)
# plt.plot(features_scaled[tfmask_lt01, col_0], features_scaled[tfmask_lt01, col_1],
# marker='.', linestyle='', color=sns.color_palette()[1], alpha=0.5,
# label='data, buyer_retrate_lt01')
# plt.plot(centroids[:, col_0], centroids[:, col_1],
# marker='+', linestyle='', markeredgewidth=2, markersize=12,
# color=sns.color_palette()[0], label='centroids')
# for (idx, centroid) in enumerate(centroids):
# plt.annotate(
# str(idx), xy=(centroid[col_0], centroid[col_1]),
# xycoords='data', xytext=(0, 0), textcoords='offset points', color='black',
# fontsize=18, rotation=0)
# plt.plot(transformed_centroids[:, col_0], transformed_centroids[:, col_1],
# marker='x', linestyle='', markeredgewidth=2, markersize=10,
# color=sns.color_palette()[1], label='transformed centroids')
# for (idx, transformed_centroid) in enumerate(transformed_centroids):
# plt.annotate(
# str(idx), xy=(transformed_centroid[col_0], transformed_centroid[col_1]),
# xycoords='data', xytext=(0, 0), textcoords='offset points', color='black',
# fontsize=18, rotation=0)
# plt.xlabel("Scaled '{name}', highest variance".format(name=name_0))
# plt.ylabel("Scaled '{name}', next highest variance".format(name=name_1))
# plt.legend(loc='upper left')
# if show_plots:
# plt.show()
# plt.gcf().clear()
# plt.clf()
# plt.cla()
# plt.close()
# with warnings.catch_warnings():
# warnings.simplefilter("ignore")
# # Plot clusters in scaled feature PCA space.
# transformed_centroids = transformer_pca.transform(transformer_kmeans.cluster_centers_)
# centroids = transformer_kmeans_pca.cluster_centers_
# plt.title("Data and centroids within scaled feature PCA space")
# plt.plot(features_scaled_pca[tfmask_gt01, 0], features_scaled_pca[tfmask_gt01, 1],
# marker='o', linestyle='', color=sns.color_palette()[2], alpha=0.5,
# label='transformed data, buyer_retrate_gt01')
# plt.plot(features_scaled_pca[tfmask_lt01, 0], features_scaled_pca[tfmask_lt01, 1],
# marker='.', linestyle='', color=sns.color_palette()[1], alpha=0.5,
# label='transformed data, buyer_retrate_lt01')
# plt.plot(transformed_centroids[:, 0], transformed_centroids[:, 1],
# marker='+', linestyle='', markeredgewidth=2, markersize=12,
# color=sns.color_palette()[0], label='transformed centroids')
# for (idx, transformed_centroid) in enumerate(transformed_centroids):
# plt.annotate(
# str(idx), xy=(transformed_centroid[0], transformed_centroid[1]),
# xycoords='data', xytext=(0, 0), textcoords='offset points', color='black',
# fontsize=18, rotation=0)
# plt.plot(centroids[:, 0], centroids[:, 1],
# marker='x', linestyle='', markeredgewidth=2, markersize=10,
# color=sns.color_palette()[1], label='centroids')
# for (idx, centroid) in enumerate(centroids):
# plt.annotate(
# str(idx), xy=(centroid[0], centroid[1]),
# xycoords='data', xytext=(0, 0), textcoords='offset points', color='black',
# fontsize=18, rotation=0)
# plt.xlabel('Principal component 0')
# plt.ylabel('Principal component 1')
# plt.legend(loc='upper left')
# if show_plots:
# plt.show()
# plt.gcf().clear()
# plt.clf()
# plt.cla()
# plt.close()
# time_stop = time.perf_counter()
# print("Time elapsed (sec) = {diff:.1f}".format(diff=time_stop-time_start))
# print()
########################################
# print('#'*80)
# print(textwrap.dedent("""\
# `df_features2`: Combine `df_features` with
# cluster labels, cluster distances, PCA components, PCA cluster labels,
# and PCA cluster distances into `df_features`."""))
# time_start = time.perf_counter()
# Cluster labels and distances in feature space.
ds_clusters = pd.Series(
transformer_kmeans.predict(X=features_scaled),
index=df_features.index, name='cluster')
n_digits = len(str(len(transformer_kmeans.cluster_centers_)))
columns = [
'cluster_{num}_dist'.format(num=str(num).rjust(n_digits, '0'))
for num in range(len(transformer_kmeans.cluster_centers_))]
df_cluster_dists = pd.DataFrame(
transformer_kmeans.transform(X=features_scaled),
index=df_features.index, columns=columns)
if not np.all(ds_clusters.values == np.argmin(df_cluster_dists.values, axis=1)):
raise AssertionError(
("Program error. Not all cluster labels match cluster label\n" +
"with minimum distance to record.\n" +
"Required: np.all(ds_clusters.values == np.argmin(df_cluster_dists.values, axis=1))"))
# PCA features.
n_digits = len(str(transformer_pca.n_components_))
columns = [
'pca_comp_{num}'.format(num=str(num).rjust(n_digits, '0'))
for num in range(transformer_pca.n_components_)]
df_features_pca = pd.DataFrame(
features_scaled_pca, index=df_features.index, columns=columns)
# Cluster labels and distances in PCA feature space.
ds_clusters_pca = pd.Series(
transformer_kmeans_pca.predict(X=features_scaled_pca),
index=df_features.index, name='pca_cluster')
n_digits = len(str(len(transformer_kmeans_pca.cluster_centers_)))
columns = [
'pca_cluster_{num}_dist'.format(num=str(num).rjust(n_digits, '0'))
for num in range(len(transformer_kmeans_pca.cluster_centers_))]
df_cluster_dists_pca = pd.DataFrame(
transformer_kmeans_pca.transform(X=features_scaled_pca),
index=df_features.index, columns=columns)
if not np.all(ds_clusters_pca.values == np.argmin(df_cluster_dists_pca.values, axis=1)):
raise AssertionError(
("Program error. Not all PCA cluster labels match PCA cluster label\n" +
"with minimum distance to record.\n" +
"Required: np.all(ds_clusters_pca.values == np.argmin(df_cluster_dists_pca.values, axis=1))"))
# Combine with original `df_features` into new `df_features2`.
df_features2 = pd.concat(
[df_features, ds_clusters, df_cluster_dists,
df_features_pca, ds_clusters_pca, df_cluster_dists_pca],
axis=1, copy=True)
# time_stop = time.perf_counter()
# print("Time elapsed (sec) = {diff:.1f}".format(diff=time_stop-time_start))
# print()
########################################
# print('#'*80)
# print(textwrap.dedent("""\
# `df_importances` , `important_features`, `df_features3`:
# `df_features3` is a view into (not a copy) of `df_features2` with only
# `important_features`. Feature importance is the normalized reduction
# in the loss score. A feature is selected as 'important' if its average
# importance is greater than the average importance of the random feature."""))
# time_start = time.perf_counter()
# Calculate feature importances.
# Note:
# * `n_estimators` impact the feature importances but only have a small
# effect on the relative importances.
# * `n_estimators` impact the scores but only have a small effect on the relative scores.
# * Use replace=False for maximum data variety.
# TODO: Use a significance test for feature importance.
estimator = sk_ens.ExtraTreesRegressor(n_estimators=10, n_jobs=-1)
df_importances = utils.utils.calc_feature_importances(
estimator=estimator, df_features=df_features2, ds_target=ds_target,
replace=False, show_progress=False, show_plot=False)
important_features = df_importances.columns[
df_importances.mean() > df_importances['random'].mean()]
important_features = list(
df_importances[important_features].mean().sort_values(ascending=False).index)
df_features3 = df_features2[important_features]
print("`important_features` =")
print(important_features)
# print()
# time_stop = time.perf_counter()
# print("Time elapsed (sec) = {diff:.1f}".format(diff=time_stop-time_start))
# print("`df_features`: Most significant projections of PCA component 78:")
# print(sorted(list(zip(df_features, transformer_pca.components_[78])), key=lambda tup: tup[1])[:3])
# print('...')
# print(sorted(list(zip(df_features, transformer_pca.components_[78])), key=lambda tup: tup[1])[-3:])
# print()
########################################
# print('#'*80)
# print(textwrap.dedent("""\
# Tune feature space by optimizing the model score
# with cross validation. Model scores are R^2,
# the coefficient of determination."""))
# time_start = time.perf_counter()
# print("Progress:", end=' ')
# size_data = len(df_features3)
# size_sub = 1000
# frac_test = 0.2
# replace = False
# n_scores = 10
# estimator = sk_ens.ExtraTreesRegressor(n_estimators=10, n_jobs=-1)
# nftrs_scores = list()
# idxs = itertools.chain(range(0, 10), range(10, 30, 3), range(30, len(important_features), 10))
# idxs = range(10)
# for idx in idxs:
# n_ftrs = idx+1
# ftrs = important_features[:n_ftrs]
# scores = list()
# for _ in range(0, n_scores):
# idxs_sub = np.random.choice(a=size_data, size=size_sub, replace=replace)
# (ftrs_train, ftrs_test,
# trg_train, trg_test) = sk_cv.train_test_split(
# df_features3[ftrs].values[idxs_sub], ds_target.values[idxs_sub],
# test_size=frac_test)
# estimator.fit(X=ftrs_train, y=trg_train)
# scores.append(estimator.score(X=ftrs_test, y=trg_test))
# nftrs_scores.append([n_ftrs, scores])
# if idx % 10 == 0:
# print("{frac:.0%}".format(frac=(idx+1)/len(important_features)), end=' ')
# print('\n')
# nftrs_pctls = np.asarray(
# [np.append(tup[0], np.percentile(tup[1], q=[5,50,95]))
# for tup in nftrs_scores])
# plt.plot(
# nftrs_pctls[:, 0], nftrs_pctls[:, 2],
# marker='.', color=sns.color_palette()[0],
# label='50th pctl score')
# plt.fill_between(
# nftrs_pctls[:, 0],
# y1=nftrs_pctls[:, 1],
# y2=nftrs_pctls[:, 3],
# alpha=0.5, color=sns.color_palette()[0],
# label='5-95th pctls of scores')
# plt.title("Model score vs number of features")
# plt.xlabel("Number of features")
# plt.ylabel("Model score")
# plt.legend(loc='upper left')
# plt.savefig(
# os.path.join(path_plot_dir, 'model_tune_nfeatures.png'),
# bbox_inches='tight', dpi=300)
# if show_plots:
# plt.show()
# plt.gcf().clear()
# plt.clf()
# plt.cla()
# plt.close()
# time_stop = time.perf_counter()
# print("Time elapsed (sec) = {diff:.1f}".format(diff=time_stop-time_start))
# print()
########################################
# print('#'*80)
# print("""`important_features2`, `df_features4`:
# `df_features4` is a view into (not a copy) of `df_features3` with only
# `important_features2`. Feature importance is the normalized reduction
# in the loss score. A feature is selected as 'important' from the
# model score vs features plot.
# """)
# time_start = time.perf_counter()
# Keep top 10 features from score vs features plot.
important_features2 = important_features[:10]
df_features4 = df_features3[important_features2]
# print("`important_features2` =")
# print(important_features2)
# print()
# print("""Cluster map of important feature correlations with heirarchical relationships.
# The deeper of the dendrogram node, the higher (anti)correlated the features are.
# The Spearman rank correlation accommodates non-linear features.
# The pair plot is a scatter matrix plot of columns vs each other.
# """)
# # Notes:
# # * `size_sub` for computing correlations should be <= 1e3 else runtime is long.
# # * Use replace=False to show most data variety.
# # * For pairplot, only plot the target variable with the top 5 important
# # features for legibility.
# # * For clustermap, `nlabels` shows every `nlabels`th label, so 20 labels total.
# size_sub = min(int(1e3), len(df_features4.index))
# idxs_sub = np.random.choice(a=df_features4.index, size=size_sub, replace=False)
# df_plot_sub = df_features4.loc[idxs_sub].copy()
# df_plot_sub[target] = ds_target.loc[idxs_sub].copy()
# df_plot_sub['buyer_retrate_gt01'] = df_features3.loc[idxs_sub, buyer_retrate] > buyer_retrate_max
# print(("Clustermap of target, '{target}', top 10 important features, buyer_retrate_gt01:").format(
# target=target))
# sns.clustermap(df_plot_sub[[target]+important_features2[:10]+['buyer_retrate_gt01']].corr(method='spearman'))
# plt.savefig(
# os.path.join(path_plot_dir, 'model_clustermap.png'),
# bbox_inches='tight', dpi=300)
# if show_plots:
# plt.show()
# plt.gcf().clear()
# plt.clf()
# plt.cla()
# plt.close()
# print(("Pairplot of target, '{target}', top 5 important features, buyer_retrate_gt01:").format(
# target=target))
# df_pairplot = df_plot_sub[[target]+important_features2[:5]+['buyer_retrate_gt01']]
# print(df_pairplot.columns)
# ds_columns = pd.Series(df_pairplot.columns, name='column')
# ds_columns.to_csv(
# os.path.join(path_plot_dir, 'model_pairplot_index_column_map.csv'),
# header=True, index_label='index')
# df_pairplot.columns = ds_columns.index
# df_pairplot.loc[:, target] = df_pairplot[np.where(ds_columns.values == target)[0][0]]
# df_pairplot.loc[:, 'buyer_retrate_gt01'] = df_pairplot[np.where(ds_columns.values == 'buyer_retrate_gt01')[0][0]]
# df_pairplot.drop([np.where(ds_columns.values == target)[0][0]], axis=1, inplace=True)
# df_pairplot.drop([np.where(ds_columns.values == 'buyer_retrate_gt01')[0][0]], axis=1, inplace=True)
# sns.pairplot(
# df_pairplot,
# hue='buyer_retrate_gt01', diag_kind='hist', markers=['.', 'o'],
# palette=[sns.color_palette()[1], sns.color_palette()[2]],
# plot_kws={'alpha':1.0})
# plt.savefig(
# os.path.join(path_plot_dir, 'model_pairplot.png'),
# bbox_inches='tight', dpi=300)
# if show_plots:
# plt.show()
# plt.gcf().clear()
# plt.clf()
# plt.cla()
# plt.close()
print("Summarize top 5 important features:")
if len(important_features2) > 0:
print(df_features4[important_features2[:5]].describe(include='all'))
else:
print('important_features2 is empty')
# print()
# print("First 5 records for top 5 important features:")
# print(df_features4[important_features2[:5]].head())
# print()
# print("""Describe top 5 important features. Format:
# Feature: importance score.
# Histogram of feature values.""")
# cols_scores = df_importances[important_features2[:5]].mean().items()
# for (col, score) in cols_scores:
# # Describe feature variables.
# print(
# ("{col}:\n" +
# " importance: {score:.3f}").format(col=col, score=score))
# # Plot histogram of feature variables.
# tfmask_gt01 = df_features3[buyer_retrate] > buyer_retrate_max
# sns.distplot(
# df_features4.loc[np.logical_not(tfmask_gt01), col], hist=True, kde=False, norm_hist=False,
# label='buyer_retrate_lt01', color=sns.color_palette()[1])
# sns.distplot(
# df_features4.loc[tfmask_gt01, col], hist=True, kde=False, norm_hist=False,
# label='buyer_retrate_gt01', color=sns.color_palette()[2])
# plt.title('Feature value histogram')
# plt.xlabel("Feature value, '{ftr}'".format(ftr=col))
# plt.ylabel('Number of feature values')
# plt.legend(loc='upper left')
# if show_plots:
# plt.show()
# plt.gcf().clear()
# plt.clf()
# plt.cla()
# plt.close()
# time_stop = time.perf_counter()
# print("Time elapsed (sec) = {diff:.1f}".format(diff=time_stop-time_start))
# print()
########################################
# print('#'*80)
# print("""Tune model hyperparameters by optimizing the model score
# with cross validation. Model scores are R^2,
# the coefficient of determination.
# """)
# time_start = time.perf_counter()
# print("Progress:", end=' ')
# size_data = len(df_features4)
# size_sub = min(len(df_features4), int(2e3))
# frac_test = 0.2
# replace = False
# nest_list = [10, 30, 100, 300]
# n_scores = 10
# nest_scores = list()
# for (inum, n_est) in enumerate(nest_list):
# estimator = sk_ens.ExtraTreesRegressor(n_estimators=n_est, n_jobs=-1)
# scores = list()
# for _ in range(0, n_scores):
# idxs_sub = np.random.choice(a=size_data, size=size_sub, replace=replace)
# (ftrs_train, ftrs_test,
# trg_train, trg_test) = sk_cv.train_test_split(
# df_features4.values[idxs_sub], ds_target.values[idxs_sub],
# test_size=frac_test)
# estimator.fit(X=ftrs_train, y=trg_train)
# scores.append(estimator.score(
# X=ftrs_test, y=trg_test))
# nest_scores.append([n_est, scores])
# print("{frac:.0%}".format(frac=(inum+1)/len(nest_list)), end=' ')
# print('\n')
# nest_pctls = np.asarray(
# [np.append(tup[0], np.percentile(tup[1], q=[5,50,95]))
# for tup in nest_scores])
# plt.plot(
# nest_pctls[:, 0], nest_pctls[:, 2],
# marker='.', color=sns.color_palette()[0],
# label='50th pctl score')
# plt.fill_between(
# nest_pctls[:, 0],
# y1=nest_pctls[:, 1],
# y2=nest_pctls[:, 3],
# alpha=0.5, color=sns.color_palette()[0],
# label='5-95th pctls of scores')
# plt.title("Model score vs number of estimators")
# plt.xlabel("Number of estimators")
# plt.ylabel("Model score")
# plt.legend(loc='lower left')
# plt.savefig(
# os.path.join(path_plot_dir, 'model_tune_nestimators.png'),
# bbox_inches='tight', dpi=300)
# if show_plots:
# plt.show()
# plt.gcf().clear()
# plt.clf()
# plt.cla()
# plt.close()
# time_stop = time.perf_counter()
# print("Time elapsed (sec) = {diff:.1f}".format(diff=time_stop-time_start))
# print()
########################################
# print('#'*80)
# print("""Test significance of predictions by shuffling the target values.
# Model scores are r^2, the coefficient of determination.
# """)
# n_estimators = 50 # from tuning curve
# time_start = time.perf_counter()
# # Calculate significance of score.
# estimator = sk_ens.ExtraTreesRegressor(n_estimators=n_estimators, n_jobs=-1)
# utils.utils.calc_score_pvalue(
# estimator=estimator, df_features=df_features4, ds_target=ds_target,
# n_iter=20, size_sub=None, frac_test=0.2,
# replace=False, show_progress=True, show_plot=True)
# print()
# time_stop = time.perf_counter()
# print("Time elapsed (sec) = {diff:.1f}".format(diff=time_stop-time_start))
# print()
########################################
# print('#'*80)
# print("""Predict target values with cross-validation,
# plot actual vs predicted and score.
# """)
n_estimators = 50 # from tuning curve
# time_start = time.perf_counter()
if len(df_features4.columns) > 0:
print("Progress:", end=' ')
n_folds = 5
estimator = sk_ens.ExtraTreesRegressor(n_estimators=n_estimators, n_jobs=-1)
kfolds = sk_cv.KFold(n=len(df_features4), n_folds=n_folds, shuffle=True)
ds_predicted = pd.Series(index=ds_target.index, name=target+'_pred')
idxs_pred = set()
for (inum, (idxs_train, idxs_test)) in enumerate(kfolds):
if not idxs_pred.isdisjoint(idxs_test):
raise AssertionError(
("Program error. Each record must be predicted only once.\n" +
"Required: idxs_pred.isdisjoint(idxs_test)"))
idxs_pred.update(idxs_test)
ftrs_train = df_features4.values[idxs_train]
ftrs_test = df_features4.values[idxs_test]
trg_train = ds_target.values[idxs_train]
trg_test = ds_target.values[idxs_test]
estimator.fit(X=ftrs_train, y=trg_train)
ds_predicted.iloc[idxs_test] = estimator.predict(X=ftrs_test)
print("{frac:.0%}".format(frac=(inum+1)/n_folds), end=' ')
print('\n')
score = sk_met.r2_score(
y_true=ds_target, y_pred=ds_predicted)
print("Model score = {score:.3f}".format(score=score))
# utils.utils.plot_actual_vs_predicted(
# y_true=ds_target.values, y_pred=ds_predicted.values,
# loglog=False, xylims=(-1.1, 1.1),
# path=None)
# TODO: fix matplotlib error.
# path=os.path.join(path_plot_dir, 'model_actual_vs_predicted.jpg'))
else:
print("Important features list is empty.")
print("""`features.pkl`, `estimator.pkl`: Save features and estimator.""")
path_ftr = os.path.join(path_data, 'features.pkl')
with open(path_ftr, mode='wb') as fobj:
pickle.dump(obj=df_features4.columns, file=fobj)
path_est = os.path.join(path_data, 'estimator.pkl')
with open(path_est, mode='wb') as fobj:
pickle.dump(obj=estimator, file=fobj)
# time_stop = time.perf_counter()
# print("Time elapsed (sec) = {diff:.1f}".format(diff=time_stop-time_start))
# print()
########################################
return None
def create_pipeline_model_new_data(
df:pd.DataFrame,
path_data_dir:str,
show_plots:bool=False):
r"""Create pipeline model.
Returns:
ds_predicted
"""
# Check arguments.
path_data = path_data_dir
########################################
# print('#'*80)
# Define target and features
target = 'Returned'
features = set(df.columns[np.logical_or(df.dtypes=='int64', df.dtypes=='float64')])
features.difference_update([target])
features = sorted(features)
# print('Features:')
# print(features)
# print()
########################################
# print('#'*80)
# print(textwrap.dedent("""\
# `Container`: Create an empty container class and
# dynamically allocate attributes to hold variables for specific steps
# of the pipeline. """))
Container = utils.utils.Container
step = Container()
# print(textwrap.dedent("""\
# `step.s0.[df,ds]_[features,target]`: Save initial state of features, target."""))
step.s0 = Container()
step.s0.dfs = Container()
step.s0.dfs.df_features = df[features].copy()
step.s0.dfs.ds_target = df[target].copy()
# TODO: REDO after this point with step.sN.dfs.[df_features,ds_target]
# rather than redefining [df_features,ds_target]
df_features = step.s0.dfs.df_features
ds_target = step.s0.dfs.ds_target
# print()
########################################
# print('#'*80)
print(textwrap.dedent("`transformer_scaler`, `transformer_pca`: Load existing transformers."))
# time_start = time.perf_counter()
# Scale data prior to comparing clusters with/without PCA.
# Note: Using sklearn.preprocessing.RobustScaler with
# sklearn.decomposition.IncrementalPCA(whiten=False)
# is often the most stable (slowly varying scores)
# with highest scores. Centroid agreement can still be
# off due to outliers.
path_cols = os.path.join(path_data, 'columns.pkl')
with open(path_cols, mode='rb') as fobj:
columns = pickle.load(file=fobj)
df_features = df_features[columns]
path_tform_scl = os.path.join(path_data, 'transformer_scaler.pkl')
with open(path_tform_scl, mode='rb') as fobj:
transformer_scaler = pickle.load(file=fobj)
features_scaled = transformer_scaler.transform(X=df_features)
path_tform_pca = os.path.join(path_data, 'transformer_pca.pkl')
with open(path_tform_pca, mode='rb') as fobj:
transformer_pca = pickle.load(file=fobj)
features_scaled_pca = transformer_pca.transform(X=features_scaled)
# time_stop = time.perf_counter()
# print("Time elapsed (sec) = {diff:.1f}".format(diff=time_stop-time_start))
# print()
########################################
# print('#'*80)
# print(textwrap.dedent("""\
# `transformer_kmeans`, `transformer_kmeans_pca`:
# Predict centroid clusters."""))
# TODO: Fix plot. Assign clusters IDs in a deterministic way so that
# cluster 0 raw matches cluster 0 transformed.
# time_start = time.perf_counter()
print("""`transformer_kmeans.pkl`, `transformer_kmeans_pca.pkl`: Load transformers.""")
path_tform_km = os.path.join(path_data, 'transformer_kmeans.pkl')
with open(path_tform_km, mode='rb') as fobj:
transformer_kmeans = pickle.load(file=fobj)
path_tform_km_pca = os.path.join(path_data, 'transformer_kmeans_pca.pkl')
with open(path_tform_km_pca, mode='rb') as fobj:
transformer_kmeans_pca = pickle.load(file=fobj)
# with warnings.catch_warnings():
# warnings.simplefilter("ignore")
# # Plot clusters in scaled feature space.
# centroids = transformer_kmeans.cluster_centers_
# transformed_centroids = transformer_pca.inverse_transform(transformer_kmeans_pca.cluster_centers_)
# (col_1, col_0) = np.argsort(np.var(features_scaled, axis=0))[-2:]
# (name_1, name_0) = (df_features.columns.values[col_1], df_features.columns.values[col_0])
# plt.title("Data and centroids within scaled feature space")
# tfmask_gt01 = df_features[buyer_retrate] > buyer_retrate_max
# plt.plot(features_scaled[tfmask_gt01, col_0], features_scaled[tfmask_gt01, col_1],
# marker='o', linestyle='', color=sns.color_palette()[2], alpha=0.5,
# label='data, buyer_retrate_gt01')
# tfmask_lt01 = np.logical_not(tfmask_gt01)
# plt.plot(features_scaled[tfmask_lt01, col_0], features_scaled[tfmask_lt01, col_1],
# marker='.', linestyle='', color=sns.color_palette()[1], alpha=0.5,
# label='data, buyer_retrate_lt01')
# plt.plot(centroids[:, col_0], centroids[:, col_1],
# marker='+', linestyle='', markeredgewidth=2, markersize=12,
# color=sns.color_palette()[0], label='centroids')
# for (idx, centroid) in enumerate(centroids):
# plt.annotate(
# str(idx), xy=(centroid[col_0], centroid[col_1]),
# xycoords='data', xytext=(0, 0), textcoords='offset points', color='black',
# fontsize=18, rotation=0)
# plt.plot(transformed_centroids[:, col_0], transformed_centroids[:, col_1],
# marker='x', linestyle='', markeredgewidth=2, markersize=10,
# color=sns.color_palette()[1], label='transformed centroids')
# for (idx, transformed_centroid) in enumerate(transformed_centroids):
# plt.annotate(
# str(idx), xy=(transformed_centroid[col_0], transformed_centroid[col_1]),
# xycoords='data', xytext=(0, 0), textcoords='offset points', color='black',
# fontsize=18, rotation=0)
# plt.xlabel("Scaled '{name}', highest variance".format(name=name_0))
# plt.ylabel("Scaled '{name}', next highest variance".format(name=name_1))
# plt.legend(loc='upper left')
# if show_plots:
# plt.show()
# plt.gcf().clear()
# plt.clf()
# plt.cla()
# plt.close()
# with warnings.catch_warnings():
# warnings.simplefilter("ignore")
# # Plot clusters in scaled feature PCA space.
# transformed_centroids = transformer_pca.transform(transformer_kmeans.cluster_centers_)
# centroids = transformer_kmeans_pca.cluster_centers_
# plt.title("Data and centroids within scaled feature PCA space")
# plt.plot(features_scaled_pca[tfmask_gt01, 0], features_scaled_pca[tfmask_gt01, 1],
# marker='o', linestyle='', color=sns.color_palette()[2], alpha=0.5,
# label='transformed data, buyer_retrate_gt01')
# plt.plot(features_scaled_pca[tfmask_lt01, 0], features_scaled_pca[tfmask_lt01, 1],
# marker='.', linestyle='', color=sns.color_palette()[1], alpha=0.5,
# label='transformed data, buyer_retrate_lt01')
# plt.plot(transformed_centroids[:, 0], transformed_centroids[:, 1],
# marker='+', linestyle='', markeredgewidth=2, markersize=12,
# color=sns.color_palette()[0], label='transformed centroids')
# for (idx, transformed_centroid) in enumerate(transformed_centroids):
# plt.annotate(
# str(idx), xy=(transformed_centroid[0], transformed_centroid[1]),
# xycoords='data', xytext=(0, 0), textcoords='offset points', color='black',
# fontsize=18, rotation=0)
# plt.plot(centroids[:, 0], centroids[:, 1],
# marker='x', linestyle='', markeredgewidth=2, markersize=10,
# color=sns.color_palette()[1], label='centroids')
# for (idx, centroid) in enumerate(centroids):
# plt.annotate(
# str(idx), xy=(centroid[0], centroid[1]),
# xycoords='data', xytext=(0, 0), textcoords='offset points', color='black',
# fontsize=18, rotation=0)
# plt.xlabel('Principal component 0')
# plt.ylabel('Principal component 1')
# plt.legend(loc='upper left')
# if show_plots:
# plt.show()
# plt.gcf().clear()
# plt.clf()
# plt.cla()
# plt.close()
# time_stop = time.perf_counter()
# print("Time elapsed (sec) = {diff:.1f}".format(diff=time_stop-time_start))
# print()
########################################
# print('#'*80)
# print(textwrap.dedent("""\
# `df_features2`: Combine `df_features` with
# cluster labels, cluster distances, PCA components, PCA cluster labels,
# and PCA cluster distances into `df_features`."""))
# time_start = time.perf_counter()
# Cluster labels and distances in feature space.
ds_clusters = pd.Series(
transformer_kmeans.predict(X=features_scaled),
index=df_features.index, name='cluster')
n_digits = len(str(len(transformer_kmeans.cluster_centers_)))
columns = [
'cluster_{num}_dist'.format(num=str(num).rjust(n_digits, '0'))
for num in range(len(transformer_kmeans.cluster_centers_))]
df_cluster_dists = pd.DataFrame(
transformer_kmeans.transform(X=features_scaled),
index=df_features.index, columns=columns)
if not np.all(ds_clusters.values == np.argmin(df_cluster_dists.values, axis=1)):
raise AssertionError(
("Program error. Not all cluster labels match cluster label\n" +
"with minimum distance to record.\n" +
"Required: np.all(ds_clusters.values == np.argmin(df_cluster_dists.values, axis=1))"))
# PCA features.
n_digits = len(str(transformer_pca.n_components_))
columns = [
'pca_comp_{num}'.format(num=str(num).rjust(n_digits, '0'))
for num in range(transformer_pca.n_components_)]
df_features_pca = pd.DataFrame(
features_scaled_pca, index=df_features.index, columns=columns)
# Cluster labels and distances in PCA feature space.
ds_clusters_pca = pd.Series(
transformer_kmeans_pca.predict(X=features_scaled_pca),
index=df_features.index, name='pca_cluster')
n_digits = len(str(len(transformer_kmeans_pca.cluster_centers_)))
columns = [
'pca_cluster_{num}_dist'.format(num=str(num).rjust(n_digits, '0'))
for num in range(len(transformer_kmeans_pca.cluster_centers_))]
df_cluster_dists_pca = pd.DataFrame(
transformer_kmeans_pca.transform(X=features_scaled_pca),
index=df_features.index, columns=columns)
if not np.all(ds_clusters_pca.values == np.argmin(df_cluster_dists_pca.values, axis=1)):
raise AssertionError(
("Program error. Not all PCA cluster labels match PCA cluster label\n" +
"with minimum distance to record.\n" +
"Required: np.all(ds_clusters_pca.values == np.argmin(df_cluster_dists_pca.values, axis=1))"))
# Combine with original `df_features` into new `df_features2`.
df_features2 = pd.concat(
[df_features, ds_clusters, df_cluster_dists,
df_features_pca, ds_clusters_pca, df_cluster_dists_pca],
axis=1, copy=True)
# time_stop = time.perf_counter()
# print("Time elapsed (sec) = {diff:.1f}".format(diff=time_stop-time_start))
# print()
########################################
# print('#'*80)
# print("""Predict target values,
# plot actual vs predicted and score.
# """)
# time_start = time.perf_counter()
print("`features.pkl`, `estimator.pkl`: Save features and estimator.")
path_ftr = os.path.join(path_data, 'features.pkl')
with open(path_ftr, mode='rb') as fobj:
important_features = pickle.load(file=fobj)
df_features4 = df_features2[important_features]
path_est = os.path.join(path_data, 'estimator.pkl')
with open(path_est, mode='rb') as fobj:
estimator = pickle.load(file=fobj)
if len(df_features4.columns) > 0:
ds_predicted = pd.Series(data=estimator.predict(X=df_features4), index=ds_target.index, name=target+'_pred')
else:
ds_predicted = pd.Series(data=[-1]*len(df_features4), index=ds_target.index, name=target+'_pred')
score = sk_met.r2_score(y_true=ds_target, y_pred=ds_predicted)
print("Model score = {score:.3f}".format(score=score))
# utils.utils.plot_actual_vs_predicted(
# y_true=ds_target.values, y_pred=ds_predicted.values,
# loglog=False, xylims=(-1.1, 1.1), path=None)
# time_stop = time.perf_counter()
# print("Time elapsed (sec) = {diff:.1f}".format(diff=time_stop-time_start))
# print()
########################################
return ds_predicted
def plot_model(
df:pd.DataFrame,
path_plot_dir:str=None
) -> None:
r"""Plot model to predict bad dealers.
Args:
df (pandas.DataFrame): DataFrame of formatted data.
path_plot_dir (str, optional, None): Path to directory in which to save plots.
Returns:
None
Notes:
* Target = 'RiskyDealerScore'
"""
# Check inputs.
if not os.path.exists(path_plot_dir):
raise IOError(textwrap.dedent("""\
Path does not exist: path_plot_dir =
{path}""".format(path=path_plot_dir)))
target = 'RiskyDealerScore'
buyer_retrate_omax = buyer_retrate+'_omax'
rect = (0, 0, 0.85, 1)
# NOTE:
# * PROBLEM WITH MATPLOTLIB NAMESPACE REQUIRES COMMENTING OUT A BLOCK AT A TIME.
# # Plot frequency distribution of RiskyDealerScore per BuyerID
# df_plot = df[['BuyerID', target]].copy()
# df_plot[buyer_retrate_omax] = df[buyer_retrate] > buyer_retrate_max
# itemized_counts = {
# is_omax: grp[target].values
# for (is_omax, grp) in df_plot.groupby(by=buyer_retrate_omax)}
# itemized_counts = collections.OrderedDict(
# sorted(itemized_counts.items(), key=lambda tup: tup[0], reverse=False))
# keys = itemized_counts.keys()
# bins = 20
# colors = sns.light_palette(sns.color_palette()[2], n_colors=len(keys))
# plt.hist(
# [itemized_counts[key] for key in itemized_counts.keys()],
# bins=bins, stacked=True, rwidth=1.0, label=keys, color=colors)
# plt.title('RiskyDealerScore per transaction\nfrequency distribution')
# plt.xlabel('RiskyDealerScore')
# plt.ylabel('Number of transactions with\nRiskyDealerScore = X')
# plt.legend(
# title='Buyer return\nrate > {retrate:.0%}'.format(retrate=buyer_retrate_max),
# loc='upper left', bbox_to_anchor=(1.0, 1.0))
# plt.tight_layout(rect=rect)
# if path_plot_dir is not None:
# plt.savefig(
# os.path.join(path_plot_dir, 'model_riskydealerscore_freq-dist-transaction_by_returnrate.png'),
# dpi=300)
# plt.show()
# Plot frequency distribution of RiskyDealerScores per BuyerID
# Note: Buyers can be counted twice in the histogram if they cross the
# buyer_retrate_max = 0.1 threshold.
df_plot = df[['BuyerID', target]].copy()
df_plot[buyer_retrate_omax] = df[buyer_retrate] > buyer_retrate_max
itemized_counts = {
is_omax: grp[['BuyerID', target]].groupby(by='BuyerID').mean().values.flatten()
for (is_omax, grp) in df_plot.groupby(by=buyer_retrate_omax)}
itemized_counts = collections.OrderedDict(
sorted(itemized_counts.items(), key=lambda tup: tup[0], reverse=False))
keys = itemized_counts.keys()
bins = 20
colors = sns.light_palette(sns.color_palette()[2], n_colors=len(keys))
plt.hist(
[itemized_counts[key] for key in itemized_counts.keys()],
bins=bins, stacked=True, rwidth=1.0, label=keys, color=colors)
plt.title('RiskyDealerScores per buyer\nfrequency distribution')
plt.xlabel('RiskyDealerScore')
plt.ylabel('Number of buyers with\nRiskyDealerScore = X')
plt.legend(
title='Buyer return\nrate > {retrate:.0%}'.format(retrate=buyer_retrate_max),
loc='upper left', bbox_to_anchor=(1.0, 1.0))
plt.tight_layout(rect=rect)
if path_plot_dir is not None:
plt.savefig(
os.path.join(path_plot_dir, 'model_riskydealerscore_freq-dist-buyer_by_returnrate.png'),
dpi=300)
plt.show()
return None
| mit |
zrhans/pythonanywhere | .virtualenvs/django19/lib/python3.4/site-packages/matplotlib/blocking_input.py | 8 | 11792 | """
This provides several classes used for blocking interaction with figure
windows:
:class:`BlockingInput`
creates a callable object to retrieve events in a blocking way for
interactive sessions
:class:`BlockingKeyMouseInput`
creates a callable object to retrieve key or mouse clicks in a blocking
way for interactive sessions.
Note: Subclass of BlockingInput. Used by waitforbuttonpress
:class:`BlockingMouseInput`
creates a callable object to retrieve mouse clicks in a blocking way for
interactive sessions.
Note: Subclass of BlockingInput. Used by ginput
:class:`BlockingContourLabeler`
creates a callable object to retrieve mouse clicks in a blocking way that
will then be used to place labels on a ContourSet
Note: Subclass of BlockingMouseInput. Used by clabel
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
from matplotlib import verbose
from matplotlib.cbook import is_sequence_of_strings
import matplotlib.lines as mlines
import warnings
class BlockingInput(object):
"""
Class that creates a callable object to retrieve events in a
blocking way.
"""
def __init__(self, fig, eventslist=()):
self.fig = fig
if not is_sequence_of_strings(eventslist):
raise ValueError("Requires a sequence of event name strings")
self.eventslist = eventslist
def on_event(self, event):
"""
Event handler that will be passed to the current figure to
retrieve events.
"""
# Add a new event to list - using a separate function is
# overkill for the base class, but this is consistent with
# subclasses
self.add_event(event)
verbose.report("Event %i" % len(self.events))
# This will extract info from events
self.post_event()
# Check if we have enough events already
if len(self.events) >= self.n and self.n > 0:
self.fig.canvas.stop_event_loop()
def post_event(self):
"""For baseclass, do nothing but collect events"""
pass
def cleanup(self):
"""Disconnect all callbacks"""
for cb in self.callbacks:
self.fig.canvas.mpl_disconnect(cb)
self.callbacks = []
def add_event(self, event):
"""For base class, this just appends an event to events."""
self.events.append(event)
def pop_event(self, index=-1):
"""
This removes an event from the event list. Defaults to
removing last event, but an index can be supplied. Note that
this does not check that there are events, much like the
normal pop method. If not events exist, this will throw an
exception.
"""
self.events.pop(index)
def pop(self, index=-1):
self.pop_event(index)
pop.__doc__ = pop_event.__doc__
def __call__(self, n=1, timeout=30):
"""
Blocking call to retrieve n events
"""
if not isinstance(n, int):
raise ValueError("Requires an integer argument")
self.n = n
self.events = []
self.callbacks = []
# Ensure that the figure is shown
self.fig.show()
# connect the events to the on_event function call
for n in self.eventslist:
self.callbacks.append(
self.fig.canvas.mpl_connect(n, self.on_event))
try:
# Start event loop
self.fig.canvas.start_event_loop(timeout=timeout)
finally: # Run even on exception like ctrl-c
# Disconnect the callbacks
self.cleanup()
# Return the events in this case
return self.events
class BlockingMouseInput(BlockingInput):
"""
Class that creates a callable object to retrieve mouse clicks in a
blocking way.
This class will also retrieve keyboard clicks and treat them like
appropriate mouse clicks (delete and backspace are like mouse button 3,
enter is like mouse button 2 and all others are like mouse button 1).
"""
button_add = 1
button_pop = 3
button_stop = 2
def __init__(self, fig, mouse_add=1, mouse_pop=3, mouse_stop=2):
BlockingInput.__init__(self, fig=fig,
eventslist=('button_press_event',
'key_press_event'))
self.button_add = mouse_add
self.button_pop = mouse_pop
self.button_stop = mouse_stop
def post_event(self):
"""
This will be called to process events
"""
if len(self.events) == 0:
warnings.warn("No events yet")
elif self.events[-1].name == 'key_press_event':
self.key_event()
else:
self.mouse_event()
def mouse_event(self):
'''Process a mouse click event'''
event = self.events[-1]
button = event.button
if button == self.button_pop:
self.mouse_event_pop(event)
elif button == self.button_stop:
self.mouse_event_stop(event)
else:
self.mouse_event_add(event)
def key_event(self):
'''
Process a key click event. This maps certain keys to appropriate
mouse click events.
'''
event = self.events[-1]
if event.key is None:
# at least in mac os X gtk backend some key returns None.
return
key = event.key.lower()
if key in ['backspace', 'delete']:
self.mouse_event_pop(event)
elif key in ['escape', 'enter']:
# on windows XP and wxAgg, the enter key doesn't seem to register
self.mouse_event_stop(event)
else:
self.mouse_event_add(event)
def mouse_event_add(self, event):
"""
Will be called for any event involving a button other than
button 2 or 3. This will add a click if it is inside axes.
"""
if event.inaxes:
self.add_click(event)
else: # If not a valid click, remove from event list
BlockingInput.pop(self, -1)
def mouse_event_stop(self, event):
"""
Will be called for any event involving button 2.
Button 2 ends blocking input.
"""
# Remove last event just for cleanliness
BlockingInput.pop(self, -1)
# This will exit even if not in infinite mode. This is
# consistent with MATLAB and sometimes quite useful, but will
# require the user to test how many points were actually
# returned before using data.
self.fig.canvas.stop_event_loop()
def mouse_event_pop(self, event):
"""
Will be called for any event involving button 3.
Button 3 removes the last click.
"""
# Remove this last event
BlockingInput.pop(self, -1)
# Now remove any existing clicks if possible
if len(self.events) > 0:
self.pop(event, -1)
def add_click(self, event):
"""
This add the coordinates of an event to the list of clicks
"""
self.clicks.append((event.xdata, event.ydata))
verbose.report("input %i: %f,%f" %
(len(self.clicks), event.xdata, event.ydata))
# If desired plot up click
if self.show_clicks:
line = mlines.Line2D([event.xdata], [event.ydata],
marker='+', color='r')
event.inaxes.add_line(line)
self.marks.append(line)
self.fig.canvas.draw()
def pop_click(self, event, index=-1):
"""
This removes a click from the list of clicks. Defaults to
removing the last click.
"""
self.clicks.pop(index)
if self.show_clicks:
mark = self.marks.pop(index)
mark.remove()
self.fig.canvas.draw()
# NOTE: I do NOT understand why the above 3 lines does not work
# for the keyboard backspace event on windows XP wxAgg.
# maybe event.inaxes here is a COPY of the actual axes?
def pop(self, event, index=-1):
"""
This removes a click and the associated event from the object.
Defaults to removing the last click, but any index can be
supplied.
"""
self.pop_click(event, index)
BlockingInput.pop(self, index)
def cleanup(self, event=None):
# clean the figure
if self.show_clicks:
for mark in self.marks:
mark.remove()
self.marks = []
self.fig.canvas.draw()
# Call base class to remove callbacks
BlockingInput.cleanup(self)
def __call__(self, n=1, timeout=30, show_clicks=True):
"""
Blocking call to retrieve n coordinate pairs through mouse
clicks.
"""
self.show_clicks = show_clicks
self.clicks = []
self.marks = []
BlockingInput.__call__(self, n=n, timeout=timeout)
return self.clicks
class BlockingContourLabeler(BlockingMouseInput):
"""
Class that creates a callable object that uses mouse clicks or key
clicks on a figure window to place contour labels.
"""
def __init__(self, cs):
self.cs = cs
BlockingMouseInput.__init__(self, fig=cs.ax.figure)
def add_click(self, event):
self.button1(event)
def pop_click(self, event, index=-1):
self.button3(event)
def button1(self, event):
"""
This will be called if an event involving a button other than
2 or 3 occcurs. This will add a label to a contour.
"""
# Shorthand
if event.inaxes == self.cs.ax:
self.cs.add_label_near(event.x, event.y, self.inline,
inline_spacing=self.inline_spacing,
transform=False)
self.fig.canvas.draw()
else: # Remove event if not valid
BlockingInput.pop(self)
def button3(self, event):
"""
This will be called if button 3 is clicked. This will remove
a label if not in inline mode. Unfortunately, if one is doing
inline labels, then there is currently no way to fix the
broken contour - once humpty-dumpty is broken, he can't be put
back together. In inline mode, this does nothing.
"""
if self.inline:
pass
else:
self.cs.pop_label()
self.cs.ax.figure.canvas.draw()
def __call__(self, inline, inline_spacing=5, n=-1, timeout=-1):
self.inline = inline
self.inline_spacing = inline_spacing
BlockingMouseInput.__call__(self, n=n, timeout=timeout,
show_clicks=False)
class BlockingKeyMouseInput(BlockingInput):
"""
Class that creates a callable object to retrieve a single mouse or
keyboard click
"""
def __init__(self, fig):
BlockingInput.__init__(self, fig=fig, eventslist=(
'button_press_event', 'key_press_event'))
def post_event(self):
"""
Determines if it is a key event
"""
if len(self.events) == 0:
warnings.warn("No events yet")
else:
self.keyormouse = self.events[-1].name == 'key_press_event'
def __call__(self, timeout=30):
"""
Blocking call to retrieve a single mouse or key click
Returns True if key click, False if mouse, or None if timeout
"""
self.keyormouse = None
BlockingInput.__call__(self, n=1, timeout=timeout)
return self.keyormouse
| apache-2.0 |
NYU-CS6313-Projects/Charts-for-CompStat | data/crash_cleaner.py | 1 | 3650 | #!/user/bin/python
# this python script cleans raw crash data and subsets the last n days of observations
# if n=-1 all rows of the raw dataset are kept
# WEEK and YEAR attributes are derived
import pandas as pd
import numpy as np
import datetime as dt
import re
import os
import logging
dpath = './'
def date_parser(ds):
if type(ds) == str:
return dt.datetime.date(dt.datetime.strptime(ds, "%m/%d/%Y"))
else:
return np.nan
def time_parser(ts):
if type(ts) == str:
return dt.datetime.time(dt.datetime.strptime(ts, "%H:%M"))
else:
return np.nan
#zip-s war by brigitte.jellinek@nyu.edu
def zip_cleaner(s):
if type(s) != str:
return np.nan
elif re.match('^\d\d\d\d\d$', s):
return s
elif re.match('^\d\d\d\d\d-\d*$', s):
return re.sub('-\d*$', '', s)
else:
return np.nan
def test_zip_cleaner():
assert '12345' == zip_cleaner('12345')
assert '12345' == zip_cleaner('12345-1234')
assert np.isnan( zip_cleaner(np.nan) )
assert np.isnan( zip_cleaner('1234') )
assert np.isnan( zip_cleaner('0') )
assert np.isnan( zip_cleaner('UNKNOWN'))
# reads the raw crash data
def read_crash_csv(data):
df = pd.read_csv(data,
dtype={
'DATE' : str,
'TIME' : str,
'BOROUGH': str,
'ZIP CODE': str,
'LATITUDE': np.floating,
'LONGITUDE': np.floating,
'LOCATION' : str, # derived type
'ON STREET NAME' : str,
'CROSS STREET NAME': str,
'OFF STREET NAME' : str,
'NUMBER OF PERSONS INJURED' : np.integer,
'NUMBER OF PERSONS KILLED' : np.integer,
'NUMBER OF PEDESTRIANS INJURED' : np.integer,
'NUMBER OF PEDESTRIANS KILLED' : np.integer,
'NUMBER OF CYCLIST INJURED' : np.integer,
'NUMBER OF CYCLIST KILLED' : np.integer,
'NUMBER OF MOTORIST INJURED' : np.integer,
'NUMBER OF MOTORIST KILLED' : np.integer,
'CONTRIBUTING FACTOR VEHICLE 1' : str,
'CONTRIBUTING FACTOR VEHICLE 2' : str,
'CONTRIBUTING FACTOR VEHICLE 3' : str,
'CONTRIBUTING FACTOR VEHICLE 4' : str,
'CONTRIBUTING FACTOR VEHICLE 5' : str,
'UNIQUE KEY' : np.integer,
'VEHICLE TYPE CODE 1' : str,
'VEHICLE TYPE CODE 2' : str,
'VEHICLE TYPE CODE 3' : str,
'VEHICLE TYPE CODE 4' : str,
'VEHICLE TYPE CODE 5' : str})
df['DATE'] = map(date_parser, df['DATE'])
df['TIME'] = map(time_parser, df['TIME'])
df['LOCATION'] = zip(df.LATITUDE,df.LONGITUDE)
df['ZIP CODE'] = map(zip_cleaner,df['ZIP CODE'])
df['WEEK'] = df['DATE'].apply(lambda x: pd.to_datetime(x).week)
df['YEAR'] = df['DATE'].apply(lambda x: pd.to_datetime(x).year)
df.columns = [field.replace(" ","_") for field in df.columns]
return(df)
# subsets the last n days of the crash data and logs a number of records in the dataset
# no subseting if n=-1
def sample_crash_data(n,path,folder):
df = read_crash_csv(os.path.join(path,'crashdata.csv'))
logging.basicConfig(filename=os.path.join(path,'sample.log'),level=logging.DEBUG)
df_new = df
if n!=-1:
start = dt.date.today()
logging.info('As for %s raw data set contains %s records ...' % (dt.datetime.strftime(start,"%m/%d/%Y %H:%M:%S")
,df.shape[0]))
end = dt.date.today()-dt.timedelta(days=n)
df_new = df[(df.DATE >= end) & (df.DATE <= start)]
df_new.to_csv(os.path.join(path,'%sdays_crashdata.csv' %(n)), index=False)
logging.info('Raw data set for the last %s days contains %s records' % (n, df_new.shape[0]))
else:
df_new.to_csv(os.path.join(path,'%srows_crashdata.csv' %(df_new.shape[0])), index=False)
# n = 150; n =-1
if __name__ == "__main__":
sample_crash_data(150,dpath,'data')
sample_crash_data(-1,dpath,'data')
| mit |
hyperspy/hyperspy | hyperspy/drawing/_widgets/rectangles.py | 2 | 19757 | # -*- coding: utf-8 -*-
# Copyright 2007-2021 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
import matplotlib.pyplot as plt
import logging
from hyperspy.drawing.widgets import Widget2DBase, ResizersMixin
_logger = logging.getLogger(__name__)
# Track if we have already warned when the widget is out of range
already_warn_out_of_range = False
class SquareWidget(Widget2DBase):
"""SquareWidget is a symmetric, Rectangle-patch based widget, which can be
dragged, and resized by keystrokes/code. As the widget is normally only
meant to indicate position, the sizing is deemed purely visual, but there
is nothing that forces this use. However, it should be noted that the outer
bounds only correspond to pure indices for odd sizes.
"""
def __init__(self, axes_manager, **kwargs):
super(SquareWidget, self).__init__(axes_manager, **kwargs)
def _set_patch(self):
"""Sets the patch to a matplotlib Rectangle with the correct geometry.
The geometry is defined by _get_patch_xy, and get_size_in_axes.
"""
xy = self._get_patch_xy()
xs, ys = self.size
self.patch = [plt.Rectangle(
xy, xs, ys,
fill=False,
lw=self.border_thickness,
ec=self.color,
alpha=self.alpha,
picker=True,)]
super(SquareWidget, self)._set_patch()
def _onmousemove(self, event):
"""on mouse motion move the patch if picked"""
if self.picked is True and event.inaxes:
self.position = (event.xdata, event.ydata)
class RectangleWidget(SquareWidget, ResizersMixin):
"""RectangleWidget is a asymmetric, Rectangle-patch based widget, which can
be dragged and resized by mouse/keys. For resizing by mouse, it adds a
small Rectangle patch on the outer border of the main patch, to serve as
resize handles. This feature can be enabled/disabled by the 'resizers'
property, and the size/color of the handles are set by
'resize_color'/'resize_pixel_size'.
For optimized changes of geometry, the class implements two methods
'set_bounds' and 'set_ibounds', to set the geomtry of the rectangle by
value and index space coordinates, respectivly. It also adds the 'width'
and 'height' properties for verbosity.
For keyboard resizing, 'x'/'c' and 'y'/'u' will increase/decrease the size
of the rectangle along the first and the second axis, respectively.
Implements the internal method _validate_geometry to make sure the patch
will always stay within bounds.
"""
# --------- External interface ---------
def _parse_bounds_args(self, args, kwargs):
"""Internal utility function to parse args/kwargs passed to set_bounds
and set_ibounds.
"""
if len(args) == 1:
return args[0]
elif len(args) == 4:
return args
elif len(kwargs) == 1 and 'bounds' in kwargs:
return kwargs.values()[0]
else:
x = kwargs.pop('x', kwargs.pop('left', self._pos[0]))
y = kwargs.pop('y', kwargs.pop('top', self._pos[1]))
if 'right' in kwargs:
w = kwargs.pop('right') - x
else:
w = kwargs.pop('w', kwargs.pop('width', self._size[0]))
if 'bottom' in kwargs:
h = kwargs.pop('bottom') - y
else:
h = kwargs.pop('h', kwargs.pop('height', self._size[1]))
return x, y, w, h
def set_ibounds(self, *args, **kwargs):
"""
Set bounds by indices. Bounds can either be specified in order left,
bottom, width, height; or by keywords:
* 'bounds': tuple (left, top, width, height)
OR
* 'x'/'left'
* 'y'/'top'
* 'w'/'width', alternatively 'right'
* 'h'/'height', alternatively 'bottom'
If specifying with keywords, any unspecified dimensions will be kept
constant (note: width/height will be kept, not right/bottom).
"""
ix, iy, iw, ih = self._parse_bounds_args(args, kwargs)
x = self.axes[0].index2value(ix)
y = self.axes[1].index2value(iy)
w = self._i2v(self.axes[0], ix + iw) - x
h = self._i2v(self.axes[1], iy + ih) - y
old_position, old_size = self.position, self.size
self._pos = np.array([x, y])
self._size = np.array([w, h])
self._apply_changes(old_size=old_size, old_position=old_position)
def set_bounds(self, *args, **kwargs):
"""
Set bounds by values. Bounds can either be specified in order left,
bottom, width, height; or by keywords:
* 'bounds': tuple (left, top, width, height)
OR
* 'x'/'left'
* 'y'/'top'
* 'w'/'width', alternatively 'right' (x+w)
* 'h'/'height', alternatively 'bottom' (y+h)
If specifying with keywords, any unspecified dimensions will be kept
constant (note: width/height will be kept, not right/bottom).
"""
global already_warn_out_of_range
x, y, w, h = self._parse_bounds_args(args, kwargs)
def warn(obj, parameter, value):
global already_warn_out_of_range
if not already_warn_out_of_range:
_logger.info('{}: {} is out of range. It is therefore set '
'to the value of {}'.format(obj, parameter, value))
already_warn_out_of_range = True
scale = [axis.scale for axis in self.axes]
l0, h0 = self.axes[0].low_value, self.axes[0].high_value
l1, h1 = self.axes[1].low_value, self.axes[1].high_value
in_range = 0
if x < l0:
x = l0
warn(self, '`x`', x)
elif h0 <= x:
x = h0 - scale[0]
warn(self, '`x`', x)
else:
in_range += 1
if y < l1:
y = l1
warn(self, '`y`', y)
elif h1 <= y:
warn(self, '`y`', y)
y = h1 - scale[1]
else:
in_range += 1
if w < scale[0]:
w = scale[0]
warn(self, '`width` or `right`', w)
elif not (l0 + scale[0] <= x + w <= h0 + scale[0]):
if self.size[0] != w: # resize
w = h0 + scale[0] - self.position[0]
warn(self, '`width` or `right`', w)
if self.position[0] != x: # moved
x = h0 + scale[0] - self.size[0]
warn(self, '`x`', x)
else:
in_range += 1
if h < scale[1]:
h = scale[1]
warn(self, '`height` or `bottom`', h)
elif not (l1 + scale[1] <= y + h <= h1 + scale[1]):
if self.size[1] != h: # resize
h = h1 + scale[1] - self.position[1]
warn(self, '`height` or `bottom`', h)
if self.position[1] != y: # moved
y = h1 + scale[1] - self.size[1]
warn(self, '`y`', y)
else:
in_range += 1
# if we are in range again, reset `already_warn_out_of_range` to False
if in_range == 4 and already_warn_out_of_range:
_logger.info('{} back in range.'.format(self.__class__.__name__))
already_warn_out_of_range = False
old_position, old_size = self.position, self.size
self._pos = np.array([x, y])
self._size = np.array([w, h])
self._apply_changes(old_size=old_size, old_position=old_position)
def _validate_pos(self, value):
"""Constrict the position within bounds.
"""
value = (min(value[0], self.axes[0].high_value - self._size[0] +
self.axes[0].scale),
min(value[1], self.axes[1].high_value - self._size[1] +
self.axes[1].scale))
return super(RectangleWidget, self)._validate_pos(value)
@property
def width(self):
return self.get_size_in_indices()[0]
@width.setter
def width(self, value):
if value == self.width:
return
ix = self.indices[0] + value
il0, ih0 = self.axes[0].low_index, self.axes[0].high_index
if value <= 0 or not (il0 < ix <= ih0):
raise ValueError('`width` value is not in range. The '
'`width` is {} and should be in range '
'{}-{}.'.format(ix, il0 + 1, ih0))
self._set_a_size(0, value)
@property
def height(self):
return self.get_size_in_indices()[1]
@height.setter
def height(self, value):
if value == self.height:
return
iy = self.indices[1] + value
il1, ih1 = self.axes[1].low_index, self.axes[1].high_index
if value <= 0 or not (il1 < iy <= ih1):
raise ValueError('`height` value is not in range. The '
'`height` is {} and should be in range '
'{}-{}.'.format(iy, il1 + 1, ih1))
self._set_a_size(1, value)
# --------- Internal functions ---------
# --- Internals that trigger events ---
def _set_size(self, value):
value = np.minimum(value, [ax.size * ax.scale for ax in self.axes])
value = np.maximum(value, [ax.scale for ax in self.axes])
if np.any(self._size != value):
old = self._size
self._size = value
self._validate_geometry()
if np.any(self._size != old):
self._size_changed()
def _set_a_size(self, idx, value):
if self._size[idx] == value or value <= 0:
return
# If we are pushed "past" an edge, size towards it
if self._navigating and self.axes[idx].value > self._pos[idx]:
if value < self._size[idx]:
self._pos[idx] += self._size[idx] - value
self._size[idx] = value
self._validate_geometry()
self._size_changed()
def _increase_xsize(self):
self._set_a_size(0, self._size[0] +
self.axes[0].scale * self.size_step)
def _decrease_xsize(self):
new_s = self._size[0] - self.axes[0].scale * self.size_step
new_s = max(new_s, self.axes[0].scale)
self._set_a_size(0, new_s)
def _increase_ysize(self):
self._set_a_size(1, self._size[1] +
self.axes[1].scale * self.size_step)
def _decrease_ysize(self):
new_s = self._size[1] - self.axes[1].scale * self.size_step
new_s = max(new_s, self.axes[1].scale)
self._set_a_size(1, new_s)
def on_key_press(self, event):
if self.selected:
if event.key == "x":
self._increase_xsize()
elif event.key == "c":
self._decrease_xsize()
elif event.key == "y":
self._increase_ysize()
elif event.key == "u":
self._decrease_ysize()
else:
super(RectangleWidget, self).on_key_press(event)
# --- End internals that trigger events ---
def _get_patch_xy(self):
"""Get xy value for Rectangle with position being top left. This value
deviates from the 'position', as 'position' correspond to the center
value of the pixel. Here, xy corresponds to the top left of the pixel.
"""
offset = [a.scale for a in self.axes]
return self._pos - 0.5 * np.array(offset)
def _update_patch_position(self):
# Override to include resizer positioning
if self.is_on and self.patch:
self.patch[0].set_xy(self._get_patch_xy())
self._update_resizers()
self.draw_patch()
def _update_patch_geometry(self):
# Override to include resizer positioning
if self.is_on and self.patch:
self.patch[0].set_bounds(*self._get_patch_bounds())
self._update_resizers()
self.draw_patch()
def _validate_geometry(self, x1=None, y1=None):
"""Make sure the entire patch always stays within bounds. First the
position (either from position property or from x1/y1 arguments), is
limited within the bounds. Then, if the bottom/right edges are out of
bounds, the position is changed so that they will be at the limit.
The modified geometry is stored, but no change checks are performed.
Call _apply_changes after this in order to process any changes (the
size might change if it is set larger than the bounds size).
"""
xaxis = self.axes[0]
yaxis = self.axes[1]
# Make sure widget size is not larger than axes
self._size[0] = min(self._size[0], xaxis.size * xaxis.scale)
self._size[1] = min(self._size[1], yaxis.size * yaxis.scale)
# Make sure x1/y1 is within bounds
if x1 is None:
x1 = self._pos[0] # Get it if not supplied
elif x1 < xaxis.low_value:
x1 = xaxis.low_value
elif x1 > xaxis.high_value:
x1 = xaxis.high_value
if y1 is None:
y1 = self._pos[1]
elif y1 < yaxis.low_value:
y1 = yaxis.low_value
elif y1 > yaxis.high_value:
y1 = yaxis.high_value
# Make sure x2/y2 is with upper bound.
# If not, keep dims, and change x1/y1!
x2 = x1 + self._size[0]
y2 = y1 + self._size[1]
if x2 > xaxis.high_value + xaxis.scale:
x2 = xaxis.high_value + xaxis.scale
x1 = x2 - self._size[0]
if y2 > yaxis.high_value + yaxis.scale:
y2 = yaxis.high_value + yaxis.scale
y1 = y2 - self._size[1]
self._pos = np.array([x1, y1])
# Apply snaps if appropriate
if self.snap_position:
self._do_snap_position()
if self.snap_size:
self._do_snap_size()
def _onmousemove(self, event):
"""on mouse motion draw the cursor if picked"""
# Simple checks to make sure we are dragging our patch:
if self.picked is True and event.inaxes:
# Setup reused parameters
xaxis = self.axes[0]
yaxis = self.axes[1]
# Mouse position
x = event.xdata
y = event.ydata
p = self._get_patch_xy()
# Old bounds
bounds = [p[0], p[1],
p[0] + self._size[0],
p[1] + self._size[1]]
# Store geometry for _apply_changes at end
old_position, old_size = self.position, self.size
if self.resizer_picked is False:
# Simply dragging main patch. Offset mouse position by
# pick_offset to get new position, then validate it.
x -= self.pick_offset[0]
y -= self.pick_offset[1]
self._validate_geometry(x, y)
else:
posx = None # New x pos. If None, the old pos will be used
posy = None # Same for y
corner = self.resizer_picked
# Adjust for resizer position:
offset = self._get_resizer_offset()
x += offset[0] * (0.5 - 1 * (corner % 2))
y += offset[1] * (0.5 - 1 * (corner // 2))
if corner % 2 == 0: # Left side start
if x > bounds[2]: # flipped to right
posx = bounds[2] # New left is old right
# New size is mouse position - new left
self._size[0] = x - posx
self.resizer_picked += 1 # Switch pick to right
elif bounds[2] - x < xaxis.scale: # Width too small
posx = bounds[2] - xaxis.scale # So move pos left
self._size[0] = bounds[2] - posx # Should be scale
else: # Moving left edge
posx = x # Set left to mouse position
# Keep right still by changing size:
self._size[0] = bounds[2] - x
else: # Right side start
if x < bounds[0]: # Flipped to left
if bounds[0] - x < xaxis.scale:
posx = bounds[0] - xaxis.scale
else:
posx = x # Set left to mouse
# Set size to old left - new left
self._size[0] = bounds[0] - posx
self.resizer_picked -= 1 # Switch pick to left
else: # Moving right edge
# Left should be left as it is, only size updates:
self._size[0] = x - bounds[0] # mouse - old left
if corner // 2 == 0: # Top side start
if y > bounds[3]: # flipped to botton
posy = bounds[3] # New top is old bottom
# New size is mouse position - new top
self._size[1] = y - posy
self.resizer_picked += 2 # Switch pick to bottom
elif bounds[3] - y < yaxis.scale: # Height too small
posy = bounds[3] - yaxis.scale # So move pos up
self._size[1] = bounds[3] - posy # Should be scale
else: # Moving top edge
posy = y # Set top to mouse index
# Keep bottom still by changing size:
self._size[1] = bounds[3] - y # old bottom - new top
else: # Bottom side start
if y < bounds[1]: # Flipped to top
if bounds[1] - y < yaxis.scale:
posy = bounds[1] - yaxis.scale
else:
posy = y # Set top to mouse
# Set size to old top - new top
self._size[1] = bounds[1] - posy
self.resizer_picked -= 2 # Switch pick to top
else: # Moving bottom edge
self._size[1] = y - bounds[1] # mouse - old top
# Bound size to scale:
if self._size[0] < xaxis.scale:
self._size[0] = xaxis.scale
if self._size[1] < yaxis.scale:
self._size[1] = yaxis.scale
if posx is not None:
posx += 0.5 * xaxis.scale
if posy is not None:
posy += 0.5 * yaxis.scale
# Validate the geometry
self._validate_geometry(posx, posy)
# Finally, apply any changes and trigger events/redraw:
self._apply_changes(old_size=old_size, old_position=old_position)
| gpl-3.0 |
anjalisood/spark-tk | regression-tests/sparktkregtests/testcases/dicom/dicom_covariance_matrix_test.py | 11 | 2402 | # vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""tests covariance of dicom images"""
import unittest
from sparktk import dtypes
from sparktkregtests.lib import sparktk_test
from numpy import cov
from numpy.linalg import svd
from numpy.testing import assert_almost_equal
class DicomCovarianceMatrixTest(sparktk_test.SparkTKTestCase):
def setUp(self):
"""import dicom data for testing"""
super(DicomCovarianceMatrixTest, self).setUp()
dataset = self.get_file("dicom_uncompressed")
dicom = self.context.dicom.import_dcm(dataset)
self.frame = dicom.pixeldata
def test_covariance_matrix(self):
"""Test the output of dicom_covariance_matrix"""
self.frame.matrix_covariance_matrix("imagematrix")
results = self.frame.to_pandas(self.frame.count())
#compare result
for i, row in results.iterrows():
actual_cov = row['CovarianceMatrix_imagematrix']
#expected ouput using numpy's covariance method
expected_cov = cov(row['imagematrix'])
assert_almost_equal(actual_cov, expected_cov,
decimal=4, err_msg="cov incorrect")
def test_invalid_column_name(self):
"""Test behavior for invalid column name"""
with self.assertRaisesRegexp(
Exception, "column ERR was not found"):
self.frame.matrix_covariance_matrix("ERR")
def test_invalid_param(self):
"""Test behavior for invalid parameter"""
with self.assertRaisesRegexp(
Exception, "takes exactly 2 arguments"):
self.frame.matrix_covariance_matrix("imagematrix", True)
if __name__ == "__main__":
unittest.main()
| apache-2.0 |
davidgbe/scikit-learn | sklearn/datasets/tests/test_svmlight_format.py | 228 | 11221 | from bz2 import BZ2File
import gzip
from io import BytesIO
import numpy as np
import os
import shutil
from tempfile import NamedTemporaryFile
from sklearn.externals.six import b
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_in
import sklearn
from sklearn.datasets import (load_svmlight_file, load_svmlight_files,
dump_svmlight_file)
currdir = os.path.dirname(os.path.abspath(__file__))
datafile = os.path.join(currdir, "data", "svmlight_classification.txt")
multifile = os.path.join(currdir, "data", "svmlight_multilabel.txt")
invalidfile = os.path.join(currdir, "data", "svmlight_invalid.txt")
invalidfile2 = os.path.join(currdir, "data", "svmlight_invalid_order.txt")
def test_load_svmlight_file():
X, y = load_svmlight_file(datafile)
# test X's shape
assert_equal(X.indptr.shape[0], 7)
assert_equal(X.shape[0], 6)
assert_equal(X.shape[1], 21)
assert_equal(y.shape[0], 6)
# test X's non-zero values
for i, j, val in ((0, 2, 2.5), (0, 10, -5.2), (0, 15, 1.5),
(1, 5, 1.0), (1, 12, -3),
(2, 20, 27)):
assert_equal(X[i, j], val)
# tests X's zero values
assert_equal(X[0, 3], 0)
assert_equal(X[0, 5], 0)
assert_equal(X[1, 8], 0)
assert_equal(X[1, 16], 0)
assert_equal(X[2, 18], 0)
# test can change X's values
X[0, 2] *= 2
assert_equal(X[0, 2], 5)
# test y
assert_array_equal(y, [1, 2, 3, 4, 1, 2])
def test_load_svmlight_file_fd():
# test loading from file descriptor
X1, y1 = load_svmlight_file(datafile)
fd = os.open(datafile, os.O_RDONLY)
try:
X2, y2 = load_svmlight_file(fd)
assert_array_equal(X1.data, X2.data)
assert_array_equal(y1, y2)
finally:
os.close(fd)
def test_load_svmlight_file_multilabel():
X, y = load_svmlight_file(multifile, multilabel=True)
assert_equal(y, [(0, 1), (2,), (), (1, 2)])
def test_load_svmlight_files():
X_train, y_train, X_test, y_test = load_svmlight_files([datafile] * 2,
dtype=np.float32)
assert_array_equal(X_train.toarray(), X_test.toarray())
assert_array_equal(y_train, y_test)
assert_equal(X_train.dtype, np.float32)
assert_equal(X_test.dtype, np.float32)
X1, y1, X2, y2, X3, y3 = load_svmlight_files([datafile] * 3,
dtype=np.float64)
assert_equal(X1.dtype, X2.dtype)
assert_equal(X2.dtype, X3.dtype)
assert_equal(X3.dtype, np.float64)
def test_load_svmlight_file_n_features():
X, y = load_svmlight_file(datafile, n_features=22)
# test X'shape
assert_equal(X.indptr.shape[0], 7)
assert_equal(X.shape[0], 6)
assert_equal(X.shape[1], 22)
# test X's non-zero values
for i, j, val in ((0, 2, 2.5), (0, 10, -5.2),
(1, 5, 1.0), (1, 12, -3)):
assert_equal(X[i, j], val)
# 21 features in file
assert_raises(ValueError, load_svmlight_file, datafile, n_features=20)
def test_load_compressed():
X, y = load_svmlight_file(datafile)
with NamedTemporaryFile(prefix="sklearn-test", suffix=".gz") as tmp:
tmp.close() # necessary under windows
with open(datafile, "rb") as f:
shutil.copyfileobj(f, gzip.open(tmp.name, "wb"))
Xgz, ygz = load_svmlight_file(tmp.name)
# because we "close" it manually and write to it,
# we need to remove it manually.
os.remove(tmp.name)
assert_array_equal(X.toarray(), Xgz.toarray())
assert_array_equal(y, ygz)
with NamedTemporaryFile(prefix="sklearn-test", suffix=".bz2") as tmp:
tmp.close() # necessary under windows
with open(datafile, "rb") as f:
shutil.copyfileobj(f, BZ2File(tmp.name, "wb"))
Xbz, ybz = load_svmlight_file(tmp.name)
# because we "close" it manually and write to it,
# we need to remove it manually.
os.remove(tmp.name)
assert_array_equal(X.toarray(), Xbz.toarray())
assert_array_equal(y, ybz)
@raises(ValueError)
def test_load_invalid_file():
load_svmlight_file(invalidfile)
@raises(ValueError)
def test_load_invalid_order_file():
load_svmlight_file(invalidfile2)
@raises(ValueError)
def test_load_zero_based():
f = BytesIO(b("-1 4:1.\n1 0:1\n"))
load_svmlight_file(f, zero_based=False)
def test_load_zero_based_auto():
data1 = b("-1 1:1 2:2 3:3\n")
data2 = b("-1 0:0 1:1\n")
f1 = BytesIO(data1)
X, y = load_svmlight_file(f1, zero_based="auto")
assert_equal(X.shape, (1, 3))
f1 = BytesIO(data1)
f2 = BytesIO(data2)
X1, y1, X2, y2 = load_svmlight_files([f1, f2], zero_based="auto")
assert_equal(X1.shape, (1, 4))
assert_equal(X2.shape, (1, 4))
def test_load_with_qid():
# load svmfile with qid attribute
data = b("""
3 qid:1 1:0.53 2:0.12
2 qid:1 1:0.13 2:0.1
7 qid:2 1:0.87 2:0.12""")
X, y = load_svmlight_file(BytesIO(data), query_id=False)
assert_array_equal(y, [3, 2, 7])
assert_array_equal(X.toarray(), [[.53, .12], [.13, .1], [.87, .12]])
res1 = load_svmlight_files([BytesIO(data)], query_id=True)
res2 = load_svmlight_file(BytesIO(data), query_id=True)
for X, y, qid in (res1, res2):
assert_array_equal(y, [3, 2, 7])
assert_array_equal(qid, [1, 1, 2])
assert_array_equal(X.toarray(), [[.53, .12], [.13, .1], [.87, .12]])
@raises(ValueError)
def test_load_invalid_file2():
load_svmlight_files([datafile, invalidfile, datafile])
@raises(TypeError)
def test_not_a_filename():
# in python 3 integers are valid file opening arguments (taken as unix
# file descriptors)
load_svmlight_file(.42)
@raises(IOError)
def test_invalid_filename():
load_svmlight_file("trou pic nic douille")
def test_dump():
Xs, y = load_svmlight_file(datafile)
Xd = Xs.toarray()
# slicing a csr_matrix can unsort its .indices, so test that we sort
# those correctly
Xsliced = Xs[np.arange(Xs.shape[0])]
for X in (Xs, Xd, Xsliced):
for zero_based in (True, False):
for dtype in [np.float32, np.float64, np.int32]:
f = BytesIO()
# we need to pass a comment to get the version info in;
# LibSVM doesn't grok comments so they're not put in by
# default anymore.
dump_svmlight_file(X.astype(dtype), y, f, comment="test",
zero_based=zero_based)
f.seek(0)
comment = f.readline()
try:
comment = str(comment, "utf-8")
except TypeError: # fails in Python 2.x
pass
assert_in("scikit-learn %s" % sklearn.__version__, comment)
comment = f.readline()
try:
comment = str(comment, "utf-8")
except TypeError: # fails in Python 2.x
pass
assert_in(["one", "zero"][zero_based] + "-based", comment)
X2, y2 = load_svmlight_file(f, dtype=dtype,
zero_based=zero_based)
assert_equal(X2.dtype, dtype)
assert_array_equal(X2.sorted_indices().indices, X2.indices)
if dtype == np.float32:
assert_array_almost_equal(
# allow a rounding error at the last decimal place
Xd.astype(dtype), X2.toarray(), 4)
else:
assert_array_almost_equal(
# allow a rounding error at the last decimal place
Xd.astype(dtype), X2.toarray(), 15)
assert_array_equal(y, y2)
def test_dump_multilabel():
X = [[1, 0, 3, 0, 5],
[0, 0, 0, 0, 0],
[0, 5, 0, 1, 0]]
y = [[0, 1, 0], [1, 0, 1], [1, 1, 0]]
f = BytesIO()
dump_svmlight_file(X, y, f, multilabel=True)
f.seek(0)
# make sure it dumps multilabel correctly
assert_equal(f.readline(), b("1 0:1 2:3 4:5\n"))
assert_equal(f.readline(), b("0,2 \n"))
assert_equal(f.readline(), b("0,1 1:5 3:1\n"))
def test_dump_concise():
one = 1
two = 2.1
three = 3.01
exact = 1.000000000000001
# loses the last decimal place
almost = 1.0000000000000001
X = [[one, two, three, exact, almost],
[1e9, 2e18, 3e27, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]]
y = [one, two, three, exact, almost]
f = BytesIO()
dump_svmlight_file(X, y, f)
f.seek(0)
# make sure it's using the most concise format possible
assert_equal(f.readline(),
b("1 0:1 1:2.1 2:3.01 3:1.000000000000001 4:1\n"))
assert_equal(f.readline(), b("2.1 0:1000000000 1:2e+18 2:3e+27\n"))
assert_equal(f.readline(), b("3.01 \n"))
assert_equal(f.readline(), b("1.000000000000001 \n"))
assert_equal(f.readline(), b("1 \n"))
f.seek(0)
# make sure it's correct too :)
X2, y2 = load_svmlight_file(f)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
def test_dump_comment():
X, y = load_svmlight_file(datafile)
X = X.toarray()
f = BytesIO()
ascii_comment = "This is a comment\nspanning multiple lines."
dump_svmlight_file(X, y, f, comment=ascii_comment, zero_based=False)
f.seek(0)
X2, y2 = load_svmlight_file(f, zero_based=False)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
# XXX we have to update this to support Python 3.x
utf8_comment = b("It is true that\n\xc2\xbd\xc2\xb2 = \xc2\xbc")
f = BytesIO()
assert_raises(UnicodeDecodeError,
dump_svmlight_file, X, y, f, comment=utf8_comment)
unicode_comment = utf8_comment.decode("utf-8")
f = BytesIO()
dump_svmlight_file(X, y, f, comment=unicode_comment, zero_based=False)
f.seek(0)
X2, y2 = load_svmlight_file(f, zero_based=False)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
f = BytesIO()
assert_raises(ValueError,
dump_svmlight_file, X, y, f, comment="I've got a \0.")
def test_dump_invalid():
X, y = load_svmlight_file(datafile)
f = BytesIO()
y2d = [y]
assert_raises(ValueError, dump_svmlight_file, X, y2d, f)
f = BytesIO()
assert_raises(ValueError, dump_svmlight_file, X, y[:-1], f)
def test_dump_query_id():
# test dumping a file with query_id
X, y = load_svmlight_file(datafile)
X = X.toarray()
query_id = np.arange(X.shape[0]) // 2
f = BytesIO()
dump_svmlight_file(X, y, f, query_id=query_id, zero_based=True)
f.seek(0)
X1, y1, query_id1 = load_svmlight_file(f, query_id=True, zero_based=True)
assert_array_almost_equal(X, X1.toarray())
assert_array_almost_equal(y, y1)
assert_array_almost_equal(query_id, query_id1)
| bsd-3-clause |
dmigo/incubator-superset | tests/core_tests.py | 1 | 26075 | # -*- coding: utf-8 -*-
"""Unit tests for Superset"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import csv
import datetime
import doctest
import io
import json
import logging
import os
import random
import re
import string
import unittest
import pandas as pd
import psycopg2
from six import text_type
import sqlalchemy as sqla
from superset import dataframe, db, jinja_context, security_manager, sql_lab, utils
from superset.connectors.sqla.models import SqlaTable
from superset.models import core as models
from superset.models.sql_lab import Query
from superset.views.core import DatabaseView
from .base_tests import SupersetTestCase
class CoreTests(SupersetTestCase):
requires_examples = True
def __init__(self, *args, **kwargs):
super(CoreTests, self).__init__(*args, **kwargs)
@classmethod
def setUpClass(cls):
cls.table_ids = {tbl.table_name: tbl.id for tbl in (
db.session
.query(SqlaTable)
.all()
)}
def setUp(self):
db.session.query(Query).delete()
db.session.query(models.DatasourceAccessRequest).delete()
db.session.query(models.Log).delete()
def tearDown(self):
db.session.query(Query).delete()
def test_login(self):
resp = self.get_resp(
'/login/',
data=dict(username='admin', password='general'))
self.assertNotIn('User confirmation needed', resp)
resp = self.get_resp('/logout/', follow_redirects=True)
self.assertIn('User confirmation needed', resp)
resp = self.get_resp(
'/login/',
data=dict(username='admin', password='wrongPassword'))
self.assertIn('User confirmation needed', resp)
def test_slice_endpoint(self):
self.login(username='admin')
slc = self.get_slice('Girls', db.session)
resp = self.get_resp('/superset/slice/{}/'.format(slc.id))
assert 'Time Column' in resp
assert 'List Roles' in resp
# Testing overrides
resp = self.get_resp(
'/superset/slice/{}/?standalone=true'.format(slc.id))
assert 'List Roles' not in resp
def test_cache_key(self):
self.login(username='admin')
slc = self.get_slice('Girls', db.session)
viz = slc.viz
qobj = viz.query_obj()
cache_key = viz.cache_key(qobj)
self.assertEqual(cache_key, viz.cache_key(qobj))
qobj['groupby'] = []
self.assertNotEqual(cache_key, viz.cache_key(qobj))
def test_old_slice_json_endpoint(self):
self.login(username='admin')
slc = self.get_slice('Girls', db.session)
json_endpoint = (
'/superset/explore_json/{}/{}/'
.format(slc.datasource_type, slc.datasource_id)
)
resp = self.get_resp(json_endpoint, {'form_data': json.dumps(slc.viz.form_data)})
assert '"Jennifer"' in resp
def test_slice_json_endpoint(self):
self.login(username='admin')
slc = self.get_slice('Girls', db.session)
resp = self.get_resp(slc.explore_json_url)
assert '"Jennifer"' in resp
def test_old_slice_csv_endpoint(self):
self.login(username='admin')
slc = self.get_slice('Girls', db.session)
csv_endpoint = (
'/superset/explore_json/{}/{}/?csv=true'
.format(slc.datasource_type, slc.datasource_id)
)
resp = self.get_resp(csv_endpoint, {'form_data': json.dumps(slc.viz.form_data)})
assert 'Jennifer,' in resp
def test_slice_csv_endpoint(self):
self.login(username='admin')
slc = self.get_slice('Girls', db.session)
csv_endpoint = '/superset/explore_json/?csv=true'
resp = self.get_resp(
csv_endpoint, {'form_data': json.dumps({'slice_id': slc.id})})
assert 'Jennifer,' in resp
def test_admin_only_permissions(self):
def assert_admin_permission_in(role_name, assert_func):
role = security_manager.find_role(role_name)
permissions = [p.permission.name for p in role.permissions]
assert_func('can_sync_druid_source', permissions)
assert_func('can_approve', permissions)
assert_admin_permission_in('Admin', self.assertIn)
assert_admin_permission_in('Alpha', self.assertNotIn)
assert_admin_permission_in('Gamma', self.assertNotIn)
def test_admin_only_menu_views(self):
def assert_admin_view_menus_in(role_name, assert_func):
role = security_manager.find_role(role_name)
view_menus = [p.view_menu.name for p in role.permissions]
assert_func('ResetPasswordView', view_menus)
assert_func('RoleModelView', view_menus)
assert_func('Security', view_menus)
assert_func('UserDBModelView', view_menus)
assert_func('SQL Lab',
view_menus)
assert_admin_view_menus_in('Admin', self.assertIn)
assert_admin_view_menus_in('Alpha', self.assertNotIn)
assert_admin_view_menus_in('Gamma', self.assertNotIn)
def test_save_slice(self):
self.login(username='admin')
slice_name = 'Energy Sankey'
slice_id = self.get_slice(slice_name, db.session).id
db.session.commit()
copy_name = 'Test Sankey Save'
tbl_id = self.table_ids.get('energy_usage')
new_slice_name = 'Test Sankey Overwirte'
url = (
'/superset/explore/table/{}/?slice_name={}&'
'action={}&datasource_name=energy_usage')
form_data = {
'viz_type': 'sankey',
'groupby': 'target',
'metric': 'sum__value',
'row_limit': 5000,
'slice_id': slice_id,
}
# Changing name and save as a new slice
self.get_resp(
url.format(
tbl_id,
copy_name,
'saveas',
),
{'form_data': json.dumps(form_data)},
)
slices = db.session.query(models.Slice) \
.filter_by(slice_name=copy_name).all()
assert len(slices) == 1
new_slice_id = slices[0].id
form_data = {
'viz_type': 'sankey',
'groupby': 'target',
'metric': 'sum__value',
'row_limit': 5000,
'slice_id': new_slice_id,
}
# Setting the name back to its original name by overwriting new slice
self.get_resp(
url.format(
tbl_id,
new_slice_name,
'overwrite',
),
{'form_data': json.dumps(form_data)},
)
slc = db.session.query(models.Slice).filter_by(id=new_slice_id).first()
assert slc.slice_name == new_slice_name
db.session.delete(slc)
def test_filter_endpoint(self):
self.login(username='admin')
slice_name = 'Energy Sankey'
slice_id = self.get_slice(slice_name, db.session).id
db.session.commit()
tbl_id = self.table_ids.get('energy_usage')
table = db.session.query(SqlaTable).filter(SqlaTable.id == tbl_id)
table.filter_select_enabled = True
url = (
'/superset/filter/table/{}/target/?viz_type=sankey&groupby=source'
'&metric=sum__value&flt_col_0=source&flt_op_0=in&flt_eq_0=&'
'slice_id={}&datasource_name=energy_usage&'
'datasource_id=1&datasource_type=table')
# Changing name
resp = self.get_resp(url.format(tbl_id, slice_id))
assert len(resp) > 0
assert 'Carbon Dioxide' in resp
def test_slice_data(self):
# slice data should have some required attributes
self.login(username='admin')
slc = self.get_slice('Girls', db.session)
slc_data_attributes = slc.data.keys()
assert('changed_on' in slc_data_attributes)
assert('modified' in slc_data_attributes)
def test_slices(self):
# Testing by hitting the two supported end points for all slices
self.login(username='admin')
Slc = models.Slice
urls = []
for slc in db.session.query(Slc).all():
urls += [
(slc.slice_name, 'explore', slc.slice_url),
(slc.slice_name, 'explore_json', slc.explore_json_url),
]
for name, method, url in urls:
logging.info('[{name}]/[{method}]: {url}'.format(**locals()))
self.client.get(url)
def test_tablemodelview_list(self):
self.login(username='admin')
url = '/tablemodelview/list/'
resp = self.get_resp(url)
# assert that a table is listed
table = db.session.query(SqlaTable).first()
assert table.name in resp
assert '/superset/explore/table/{}'.format(table.id) in resp
def test_add_slice(self):
self.login(username='admin')
# assert that /slicemodelview/add responds with 200
url = '/slicemodelview/add'
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
def test_get_user_slices(self):
self.login(username='admin')
userid = security_manager.find_user('admin').id
url = '/sliceaddview/api/read?_flt_0_created_by={}'.format(userid)
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
def test_slices_V2(self):
# Add explore-v2-beta role to admin user
# Test all slice urls as user with with explore-v2-beta role
security_manager.add_role('explore-v2-beta')
security_manager.add_user(
'explore_beta', 'explore_beta', ' user', 'explore_beta@airbnb.com',
security_manager.find_role('explore-v2-beta'),
password='general')
self.login(username='explore_beta', password='general')
Slc = models.Slice
urls = []
for slc in db.session.query(Slc).all():
urls += [
(slc.slice_name, 'slice_url', slc.slice_url),
]
for name, method, url in urls:
print('[{name}]/[{method}]: {url}'.format(**locals()))
response = self.client.get(url)
def test_doctests(self):
modules = [utils, models, sql_lab]
for mod in modules:
failed, tests = doctest.testmod(mod)
if failed:
raise Exception('Failed a doctest')
def test_misc(self):
assert self.get_resp('/health') == 'OK'
assert self.get_resp('/healthcheck') == 'OK'
assert self.get_resp('/ping') == 'OK'
def test_testconn(self, username='admin'):
self.login(username=username)
database = self.get_main_database(db.session)
# validate that the endpoint works with the password-masked sqlalchemy uri
data = json.dumps({
'uri': database.safe_sqlalchemy_uri(),
'name': 'main',
'impersonate_user': False,
})
response = self.client.post(
'/superset/testconn',
data=data,
content_type='application/json')
assert response.status_code == 200
assert response.headers['Content-Type'] == 'application/json'
# validate that the endpoint works with the decrypted sqlalchemy uri
data = json.dumps({
'uri': database.sqlalchemy_uri_decrypted,
'name': 'main',
'impersonate_user': False,
})
response = self.client.post(
'/superset/testconn',
data=data,
content_type='application/json')
assert response.status_code == 200
assert response.headers['Content-Type'] == 'application/json'
def test_custom_password_store(self):
database = self.get_main_database(db.session)
conn_pre = sqla.engine.url.make_url(database.sqlalchemy_uri_decrypted)
def custom_password_store(uri):
return 'password_store_test'
models.custom_password_store = custom_password_store
conn = sqla.engine.url.make_url(database.sqlalchemy_uri_decrypted)
if conn_pre.password:
assert conn.password == 'password_store_test'
assert conn.password != conn_pre.password
# Disable for password store for later tests
models.custom_password_store = None
def test_databaseview_edit(self, username='admin'):
# validate that sending a password-masked uri does not over-write the decrypted
# uri
self.login(username=username)
database = self.get_main_database(db.session)
sqlalchemy_uri_decrypted = database.sqlalchemy_uri_decrypted
url = 'databaseview/edit/{}'.format(database.id)
data = {k: database.__getattribute__(k) for k in DatabaseView.add_columns}
data['sqlalchemy_uri'] = database.safe_sqlalchemy_uri()
self.client.post(url, data=data)
database = self.get_main_database(db.session)
self.assertEqual(sqlalchemy_uri_decrypted, database.sqlalchemy_uri_decrypted)
def test_warm_up_cache(self):
slc = self.get_slice('Girls', db.session)
data = self.get_json_resp(
'/superset/warm_up_cache?slice_id={}'.format(slc.id))
assert data == [{'slice_id': slc.id, 'slice_name': slc.slice_name}]
data = self.get_json_resp(
'/superset/warm_up_cache?table_name=energy_usage&db_name=main')
assert len(data) == 4
def test_shortner(self):
self.login(username='admin')
data = (
'//superset/explore/table/1/?viz_type=sankey&groupby=source&'
'groupby=target&metric=sum__value&row_limit=5000&where=&having=&'
'flt_col_0=source&flt_op_0=in&flt_eq_0=&slice_id=78&slice_name='
'Energy+Sankey&collapsed_fieldsets=&action=&datasource_name='
'energy_usage&datasource_id=1&datasource_type=table&'
'previous_viz_type=sankey'
)
resp = self.client.post('/r/shortner/', data=dict(data=data))
assert re.search(r'\/r\/[0-9]+', resp.data.decode('utf-8'))
def test_kv(self):
self.logout()
self.login(username='admin')
try:
resp = self.client.post('/kv/store/', data=dict())
except Exception:
self.assertRaises(TypeError)
value = json.dumps({'data': 'this is a test'})
resp = self.client.post('/kv/store/', data=dict(data=value))
self.assertEqual(resp.status_code, 200)
kv = db.session.query(models.KeyValue).first()
kv_value = kv.value
self.assertEqual(json.loads(value), json.loads(kv_value))
resp = self.client.get('/kv/{}/'.format(kv.id))
self.assertEqual(resp.status_code, 200)
self.assertEqual(
json.loads(value),
json.loads(resp.data.decode('utf-8')))
try:
resp = self.client.get('/kv/10001/')
except Exception:
self.assertRaises(TypeError)
def test_gamma(self):
self.login(username='gamma')
assert 'List Charts' in self.get_resp('/slicemodelview/list/')
assert 'List Dashboard' in self.get_resp('/dashboardmodelview/list/')
def test_csv_endpoint(self):
self.login('admin')
sql = """
SELECT first_name, last_name
FROM ab_user
WHERE first_name='admin'
"""
client_id = '{}'.format(random.getrandbits(64))[:10]
self.run_sql(sql, client_id, raise_on_error=True)
resp = self.get_resp('/superset/csv/{}'.format(client_id))
data = csv.reader(io.StringIO(resp))
expected_data = csv.reader(
io.StringIO('first_name,last_name\nadmin, user\n'))
self.assertEqual(list(expected_data), list(data))
self.logout()
def test_extra_table_metadata(self):
self.login('admin')
dbid = self.get_main_database(db.session).id
self.get_json_resp(
'/superset/extra_table_metadata/{dbid}/'
'ab_permission_view/panoramix/'.format(**locals()))
def test_process_template(self):
maindb = self.get_main_database(db.session)
sql = "SELECT '{{ datetime(2017, 1, 1).isoformat() }}'"
tp = jinja_context.get_template_processor(database=maindb)
rendered = tp.process_template(sql)
self.assertEqual("SELECT '2017-01-01T00:00:00'", rendered)
def test_get_template_kwarg(self):
maindb = self.get_main_database(db.session)
s = '{{ foo }}'
tp = jinja_context.get_template_processor(database=maindb, foo='bar')
rendered = tp.process_template(s)
self.assertEqual('bar', rendered)
def test_template_kwarg(self):
maindb = self.get_main_database(db.session)
s = '{{ foo }}'
tp = jinja_context.get_template_processor(database=maindb)
rendered = tp.process_template(s, foo='bar')
self.assertEqual('bar', rendered)
def test_templated_sql_json(self):
self.login('admin')
sql = "SELECT '{{ datetime(2017, 1, 1).isoformat() }}' as test"
data = self.run_sql(sql, 'fdaklj3ws')
self.assertEqual(data['data'][0]['test'], '2017-01-01T00:00:00')
def test_table_metadata(self):
maindb = self.get_main_database(db.session)
backend = maindb.backend
data = self.get_json_resp(
'/superset/table/{}/ab_user/null/'.format(maindb.id))
self.assertEqual(data['name'], 'ab_user')
assert len(data['columns']) > 5
assert data.get('selectStar').startswith('SELECT')
# Engine specific tests
if backend in ('mysql', 'postgresql'):
self.assertEqual(data.get('primaryKey').get('type'), 'pk')
self.assertEqual(
data.get('primaryKey').get('column_names')[0], 'id')
self.assertEqual(len(data.get('foreignKeys')), 2)
if backend == 'mysql':
self.assertEqual(len(data.get('indexes')), 7)
elif backend == 'postgresql':
self.assertEqual(len(data.get('indexes')), 5)
def test_fetch_datasource_metadata(self):
self.login(username='admin')
url = (
'/superset/fetch_datasource_metadata?' +
'datasourceKey=1__table'
)
resp = self.get_json_resp(url)
keys = [
'name', 'filterable_cols', 'gb_cols', 'type', 'all_cols',
'order_by_choices', 'metrics_combo', 'granularity_sqla',
'time_grain_sqla', 'id',
]
for k in keys:
self.assertIn(k, resp.keys())
def test_user_profile(self, username='admin'):
self.login(username=username)
slc = self.get_slice('Girls', db.session)
# Setting some faves
url = '/superset/favstar/Slice/{}/select/'.format(slc.id)
resp = self.get_json_resp(url)
self.assertEqual(resp['count'], 1)
dash = (
db.session
.query(models.Dashboard)
.filter_by(slug='births')
.first()
)
url = '/superset/favstar/Dashboard/{}/select/'.format(dash.id)
resp = self.get_json_resp(url)
self.assertEqual(resp['count'], 1)
userid = security_manager.find_user('admin').id
resp = self.get_resp('/superset/profile/admin/')
self.assertIn('"app"', resp)
data = self.get_json_resp('/superset/recent_activity/{}/'.format(userid))
self.assertNotIn('message', data)
data = self.get_json_resp('/superset/created_slices/{}/'.format(userid))
self.assertNotIn('message', data)
data = self.get_json_resp('/superset/created_dashboards/{}/'.format(userid))
self.assertNotIn('message', data)
data = self.get_json_resp('/superset/fave_slices/{}/'.format(userid))
self.assertNotIn('message', data)
data = self.get_json_resp('/superset/fave_dashboards/{}/'.format(userid))
self.assertNotIn('message', data)
data = self.get_json_resp(
'/superset/fave_dashboards_by_username/{}/'.format(username))
self.assertNotIn('message', data)
def test_slice_id_is_always_logged_correctly_on_web_request(self):
# superset/explore case
slc = db.session.query(models.Slice).filter_by(slice_name='Girls').one()
qry = db.session.query(models.Log).filter_by(slice_id=slc.id)
self.get_resp(slc.slice_url, {'form_data': json.dumps(slc.viz.form_data)})
self.assertEqual(1, qry.count())
def test_slice_id_is_always_logged_correctly_on_ajax_request(self):
# superset/explore_json case
self.login(username='admin')
slc = db.session.query(models.Slice).filter_by(slice_name='Girls').one()
qry = db.session.query(models.Log).filter_by(slice_id=slc.id)
slc_url = slc.slice_url.replace('explore', 'explore_json')
self.get_json_resp(slc_url, {'form_data': json.dumps(slc.viz.form_data)})
self.assertEqual(1, qry.count())
def test_slice_query_endpoint(self):
# API endpoint for query string
self.login(username='admin')
slc = self.get_slice('Girls', db.session)
resp = self.get_resp('/superset/slice_query/{}/'.format(slc.id))
assert 'query' in resp
assert 'language' in resp
self.logout()
def test_viz_get_fillna_for_columns(self):
slc = self.get_slice('Girls', db.session)
q = slc.viz.query_obj()
results = slc.viz.datasource.query(q)
fillna_columns = slc.viz.get_fillna_for_columns(results.df.columns)
self.assertDictEqual(
fillna_columns,
{'name': ' NULL', 'sum__num': 0},
)
def test_import_csv(self):
self.login(username='admin')
filename = 'testCSV.csv'
table_name = ''.join(
random.choice(string.ascii_uppercase) for _ in range(5))
test_file = open(filename, 'w+')
test_file.write('a,b\n')
test_file.write('john,1\n')
test_file.write('paul,2\n')
test_file.close()
main_db_uri = (
db.session.query(models.Database)
.filter_by(database_name='main')
.all()
)
test_file = open(filename, 'rb')
form_data = {
'csv_file': test_file,
'sep': ',',
'name': table_name,
'con': main_db_uri[0].id,
'if_exists': 'append',
'index_label': 'test_label',
'mangle_dupe_cols': False,
}
url = '/databaseview/list/'
add_datasource_page = self.get_resp(url)
assert 'Upload a CSV' in add_datasource_page
url = '/csvtodatabaseview/form'
form_get = self.get_resp(url)
assert 'CSV to Database configuration' in form_get
try:
# ensure uploaded successfully
form_post = self.get_resp(url, data=form_data)
assert 'CSV file \"testCSV.csv\" uploaded to table' in form_post
finally:
os.remove(filename)
def test_dataframe_timezone(self):
tz = psycopg2.tz.FixedOffsetTimezone(offset=60, name=None)
data = [
(datetime.datetime(2017, 11, 18, 21, 53, 0, 219225, tzinfo=tz),),
(datetime.datetime(2017, 11, 18, 22, 6, 30, 61810, tzinfo=tz),),
]
df = dataframe.SupersetDataFrame(pd.DataFrame(data=list(data),
columns=['data']))
data = df.data
self.assertDictEqual(
data[0],
{'data': pd.Timestamp('2017-11-18 21:53:00.219225+0100', tz=tz)},
)
self.assertDictEqual(
data[1],
{'data': pd.Timestamp('2017-11-18 22:06:30.061810+0100', tz=tz)},
)
def test_comments_in_sqlatable_query(self):
clean_query = "SELECT '/* val 1 */' as c1, '-- val 2' as c2 FROM tbl"
commented_query = '/* comment 1 */' + clean_query + '-- comment 2'
table = SqlaTable(sql=commented_query)
rendered_query = text_type(table.get_from_clause())
self.assertEqual(clean_query, rendered_query)
def test_slice_url_overrides(self):
# No override
self.login(username='admin')
slice_name = 'Girls'
slc = self.get_slice(slice_name, db.session)
resp = self.get_resp(slc.explore_json_url)
assert '"Jennifer"' in resp
# Overriding groupby
url = slc.get_explore_url(
base_url='/superset/explore_json',
overrides={'groupby': ['state']})
resp = self.get_resp(url)
assert '"CA"' in resp
def test_slice_payload_no_data(self):
self.login(username='admin')
slc = self.get_slice('Girls', db.session)
url = slc.get_explore_url(
base_url='/superset/explore_json',
overrides={
'filters': [{'col': 'state', 'op': 'in', 'val': ['N/A']}],
},
)
data = self.get_json_resp(url)
self.assertEqual(data['status'], utils.QueryStatus.SUCCESS)
self.assertEqual(data['error'], 'No data')
def test_slice_payload_invalid_query(self):
self.login(username='admin')
slc = self.get_slice('Girls', db.session)
url = slc.get_explore_url(
base_url='/superset/explore_json',
overrides={'groupby': ['N/A']},
)
data = self.get_json_resp(url)
self.assertEqual(data['status'], utils.QueryStatus.FAILED)
assert 'KeyError' in data['stacktrace']
def test_slice_payload_viz_markdown(self):
self.login(username='admin')
slc = self.get_slice('Title', db.session)
url = slc.get_explore_url(base_url='/superset/explore_json')
data = self.get_json_resp(url)
self.assertEqual(data['status'], None)
self.assertEqual(data['error'], None)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
WillArmentrout/galSims | simulate/Simulate_Draft_ModifyDensityAlongArm_7.20.18.py | 1 | 23177 | #!/usr/bin/python
from scipy.stats import cauchy
import random
import math
import csv
import numpy as np
#import netCDF4 as nc
import argparse
#import matplotlib.pyplot as plt
#from mpl_toolkits.mplot3d import Axes3D
#import plotly.plotly as py
#import plotly.graph_objs as go
#####################################################
## TAKE IN NUMBER OF HII REGIONS FROM COMMAND LINE ##
#####################################################
parser = argparse.ArgumentParser()
parser.add_argument("numberRegions", type=int,
help="Number of HII Regions to Populate in Model")
args = parser.parse_args()
numRegions = args.numberRegions # Prompt User for number of Hii regions
############
## SETUP ##
############
useTremblin = False # Use the Tremblin 2014 model to determine HII region sizes
plot3D = False # Use Plotly to create interactive 3D plots of the HII region distribution
if useTremblin == True :
import netCDF4 as nc
ff=nc.Dataset('/Users/Marvin/Research/Projects/GalSims/3D/larson_radius_hypercube.ncdf') # Import data cube from Tremblin et. al. 2014
region = 1 # Start count of regions from 1 to NumRegions
HiiList = [] # Initialize list to store Hii data
(galRad,xRot,yRot,z,mass,lum,age,radius)=(0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0)
(diffLum,barLum,ThreekpcLum,sprLum,totLum)=(0.0,0.0,0.0,0.0,0.0)
(diffCount,barCount,ThreekpcCount,sprCount,totCount)=(0,0,0,0,0)
###############################################
## TURN ON / OFF VARIOUS GALACTIC STRUCTURES ##
###############################################
# The following definitions determine which structures will
# be present in the galaxy and what their relative proportion is.
# See Hughes et al. ApJ April 2013 for relative proportion in M51
diffuse = True
bar = True
ThreekpcArm = True
spiral = True
diffusePercent = 20
barPercent = 5
ThreekpcArmPercent = 10
spiralPercent = 100 - (diffusePercent + barPercent +
ThreekpcArmPercent)
###########################
## STRUCTURAL PARAMETERS ##
###########################
extentOfBar = 4.4 # Length of bar in kiloparsecs.
# See Benjamin et al. ApJ Sept 2005.
cutoff = 3.87#3.41#4.1 # Looking to (cutoff)x the bar length.
# Max value ~6.86 due to model limitation (Tremblin, below)
galRange = extentOfBar*cutoff
sunPos = 8.4 # Distance of Sun from GC (Reid 2009)
sunHeight = 0.02 # Distance of Sun above galactic plane (kpc) (Humphreys 1995)
circRot = 240 # Solar circular rotation speed. Reid (2014)
v0 = 0 # Initial velocity of source. Only relevant to 3kpc arm.
galRot = 44.0*math.pi/180.0 # Rotates entire galaxy by (x) degrees.
# See Benjamin et al. ApJ Sept 2005.
random.seed( 1 ) # Seed random number generator. (ARBITRARY)
numSpirals = 4 # Determines Number of Spiral arms
pitchAngle = 12.*math.pi/180. # Determines curvature of arms
# 7.3 deg --> See Wu et al. A&A April 2014 for pitch angle estimate in Sagitarrius arm
# Vallee 2014 gives pitch angle of 12 deg.
warpParam = math.pi/2 # Determines degree of Galactic warp
# DEFINE/CONVERT TO AS ANGLE?
warpHeight = 0.08 # BY INSPECTION
maxSpiralRevolutions = 1.0 # Range for number of spiral revs. (ARBITRARY)
maxCluster = 2 # Maximum number of regions in a given cluster (ARBITRARY)
avgCluster = 1 # Most commonly found number of regions in cluster (ARBITRARY)
clusterRange = 20/1000 # Sets clustered regions to be within (x) pc of each other
# See Motte et al. ApJ 2002
sigma = 0.8/2.35 # Sets FWHM of spiral arms to (x) kpc
# 200 pc See Wu et al. A&A April 2014 for deviation
# from spiral estimate in Sagitarrius arm.
# Vallee 2014 gives width of 400 pc "from mid arm to dust lane"
# Therefore, FWHM would be 800 pc and sigma = .800/2.35
zmax = .15/5 # Sets max height in z as +/- (x) kpc
gamma =0# 0.01365 # Sets spread of Cauchy-Lorentz Z-distribution of regions
alpha = 2 # Sets HII region drop off as r^-alpha(after bar)
# Mass Limits, In Units of Stellar Mass. Sets lower bound for ionizing star
#(lowerMass, upperMass) = (10, 90)
(lowerMass, upperMass) = (9, 90)
(log_lowerMass, log_upperMass) = (math.log(lowerMass), math.log(upperMass))
while region <= numRegions :
########################
## RESET INDICES, ETC ##
########################
v0 = 0
i = 1
# Reset i each time to force a region to be populated
# if all requirements are met.
selectionParam = random.random()
# Determines if Hii region is kept or thrown away.
# Forces population of regions to follow linear trend
# to end of bar and power law drop-off after bar.
numCluster = 1
numClusterTot = random.randrange(1,maxCluster,1)
whereIsRegion = random.randrange(1, diffusePercent + barPercent
+ ThreekpcArmPercent
+ spiralPercent, 1)
# Determines location of one individual region.
##################
## DIFFUSE HALO ##
##################
# HII Region will be randomly populated in Galaxy, but will not be
# be placed in central region (within bar radius).
if (whereIsRegion <= diffusePercent) and (diffuse == True) :
while i != 0 : # This loop forces an Hii region to be populated diffusely
x = random.gauss(0,galRange/2) # Sets diffuse population to have
# FWHM of galRange/2
y = random.gauss(0,galRange/2)
theta = math.atan(x/y)
galRad = pow(pow(x,2)+pow(y,2),.5)# Region's distance from center
if galRad > 11 :
galWarp = ((galRad-11)/6)*math.sin(theta)+0.3*(((galRad-11)/6)**2)*(1-math.cos(2*theta))
else :
galWarp = 0
zpos = cauchy.rvs(loc=0,scale=zmax,size=1,random_state=None)[0]
z = zpos + galWarp # Produces Cauchy-Lorentz z distribution
i += 1
if (abs(x) > extentOfBar + random.gauss(0,sigma)) \
and (galRad < galRange + random.gauss(0,sigma)) \
and (selectionParam < pow(extentOfBar,alpha)/pow(galRad,alpha)):
region += numClusterTot # Increase region count
i = 0 # Escape loop
elif (abs(x) < extentOfBar + random.gauss(0,sigma)) \
and (extentOfBar < galRad < galRange + random.gauss(0,sigma)) \
and (selectionParam < pow(extentOfBar,alpha)/pow(galRad,alpha)):
region += numClusterTot # Increase region count
i = 0 # Escape loop
##################
## GALACTIC BAR ##
##################
elif (whereIsRegion > diffusePercent) \
and (whereIsRegion <= (diffusePercent + barPercent)) \
and (bar == True) :
while i != 0 : # This loop forces an Hii region to be populated in bar
x = random.uniform(-extentOfBar,extentOfBar) + random.gauss(0,sigma) # Returns random number between (-extentOfBar,extentOfBar)
y = random.gauss(0,sigma) # Sets thickness of bar to (sigma) kpc
theta = math.atan(x/y)
galRad = pow(pow(x,2)+pow(y,2),.5)# Region's distance from center
galWarp = 0 # No warp assigned within R_Gal = 11 kpc
zPos = cauchy.rvs(loc=0,scale=zmax,size=1,random_state=None)[0]
z = galWarp + zPos
# Produces Cauchy-Lorentz z distribution
galRad = pow(pow(x,2)+pow(y,2),.5) # Region's distance from center
i += 1
if (selectionParam < galRad/(extentOfBar)) \
and (galRad < galRange) :
region += numClusterTot # Increase region count
i = 0 # Escape loop
# Note: Distribution was slightly higher than observed. Dropped with 0.9 factor.
######################
## 3 KILOPARSEC ARM ##
######################
elif (whereIsRegion > (diffusePercent + barPercent)) \
and (whereIsRegion <= (diffusePercent + barPercent + ThreekpcArmPercent)) \
and (ThreekpcArm == True) :
yInt = 3 #extentOfBar/2 Eccentricity = Sqrt(1-(3/4.4)^2)
ySign = random.randrange(-1,1)
while i != 0 : # This loop forces an Hii region to be populated in 3 kpc arm
xCart = random.uniform(-extentOfBar,extentOfBar)
yCart = math.copysign(yInt*pow(1-pow(xCart,2)/pow(extentOfBar,2),.5),ySign) # Produces 3 kpc arm structure
x = xCart + random.gauss(0, sigma) # Gaussian distribution around 3 kpc arm
y = yCart + random.gauss(0, sigma)
theta = math.atan(x/y)
zPos = cauchy.rvs(loc=0,scale=zmax,size=1,random_state=None)[0]
galWarp = 0 # No warp assigned within R_Gal = 11 kpc
z = galWarp + zPos # EDIT TO Produces Cauchy-Lorentz z distribution
galRad = pow(pow(x,2)+pow(y,2),.5) # Region's distance from center
i += 1
if (selectionParam < galRad/extentOfBar) \
and (galRad < galRange) :
v0 = 53 # Expansion of 3kpc arm
region += numClusterTot # Increase region count
i = 0 # Escape loop
#################
## SPIRAL ARMS ##
#################
elif (whereIsRegion > (diffusePercent + barPercent + ThreekpcArmPercent)) \
and (whereIsRegion <= (diffusePercent + barPercent + ThreekpcArmPercent + spiralPercent)) \
and (spiral == True):
while i != 0 : # This loop forces an Hii region to be populated in arms
whichArm = random.randint(0,numSpirals-1)
theta = random.uniform(0,2*np.pi*maxSpiralRevolutions)
if whichArm == 0:
phi0 = 223.*math.pi/180
elif whichArm == 1:
phi0 = 108.*math.pi/180
elif whichArm == 2:
phi0 = 43.*math.pi/180
elif whichArm == 3:
phi0 = 288.*math.pi/180
r = extentOfBar*math.exp(pitchAngle*theta)
xCart = r*math.cos(theta-phi0)
yCart = r*math.sin(theta-phi0)
x = xCart + random.gauss(0,sigma) # Gaussian distribution around spiral
y = yCart + random.gauss(0,sigma)
#theta = math.atan(x/y)
galRad = pow(pow(x,2)+pow(y,2),.5)# Region's distance from center
if galRad > 11 :
galWarp = ((galRad-11)/6)*math.sin(theta)+0.3*(((galRad-11)/6)**2)*(1-math.cos(2*theta))
else :
galWarp = 0
zPos = cauchy.rvs(loc=0,scale=zmax,size=1,random_state=None)[0]
z = galWarp + zPos
galRad = pow(pow(x,2)+pow(y,2),.5) # Region's distance from center in kpc
i += 1
if (galRad < galRange) \
and (selectionParam < pow(extentOfBar,alpha)/pow(galRad,alpha)) :
region += numClusterTot # Increase region count
i = 0 # Escape Loop
############################################
## DETERMINE INDIVIDUAL REGION PARAMETERS ##
############################################
while (i == 0) and (numCluster <= numClusterTot) :
#######################################
## UPDATE REGION POSITION / DISTANCE ##
#######################################
# Rotate galaxy to match Milky Way's rotation
xRot = x*math.cos(galRot) - y*math.sin(galRot)
yRot = x*math.sin(galRot) + y*math.cos(galRot)
# Determine Distance and Galactic Coordinates
dist = pow(pow(xRot,2)+pow(yRot-sunPos,2),0.5)
l = math.copysign(math.acos((pow(dist,2)+pow(sunPos,2)-pow(galRad,2))/(2*sunPos*dist))*180/math.pi,xRot)
b = math.atan((z-sunHeight)/dist)
# Set velocity of source
omega = circRot/galRad # Assume flat rotation curve.
omega0 = circRot/sunPos
if (whereIsRegion > diffusePercent) \
and (whereIsRegion <= (diffusePercent + barPercent)) \
and (bar == True) :
vR = galRad/extentOfBar*((omega - omega0)*sunPos*math.sin(l*math.pi/180)+v0*math.cos(theta))
elif (whereIsRegion > (diffusePercent + barPercent)) \
and (whereIsRegion <= (diffusePercent + barPercent + ThreekpcArmPercent)) \
and (ThreekpcArm == True) :
vR = galRad/extentOfBar*((omega - omega0)*sunPos*math.sin(l*math.pi/180)+v0*math.cos(theta))
else :
vR = (omega - omega0)*sunPos*math.sin(l*math.pi/180)+v0*math.cos(theta)
######################
## AGE DISTRIBUTION ##
######################
# Set Age Distribution
timeParam = random.randint(0,99)
age = timeParam*.127 # Age in Myr (12.7 Myr limit) in Trebmlin model
##########################
## ELECTRON TEMPERATURE ##
##########################
# Set Electron Temperature Distribution
# Relationship taken from Balser et.al. 2015, put in range accepted by Tremblin model
# Tremblin model ranges from 5000 K to 15000 K in 1000 K increments
T_e = 4928 + 277*random.gauss(0,1) + galRad*(385 + 29*random.gauss(0,1))
# T_e = 6080 + galRad*378 # Averaged value suggested in Tremblin 2014
TeParam = int(round(T_e,-3)/1000 - 5)
###################################
## NEUTRAL HYDROGEN DISTRIBUTION ##
###################################
# Set Neutral Hydrogen Density Distribution
# Tremblin model ranges from 1700 cm-3 to 5100 cm-3 in 340 cm-3 increments
densityParam = random.randint(0,10)
n_H = 1700 + densityParam*340
#######################
## MASS DISTRIBUTION ##
#######################
# Set Host Star Mass Distribution
massParam = random.random() # Used in forcing powerlaw fit
while massParam > 0. :
log_mass = random.uniform(log_lowerMass,log_upperMass)
mass = math.exp(log_mass)
# Compute likelihood of candidate from Salpeter IMF
likelihood = math.pow(mass, 1.0 - 2.35)
maxLikelihood = math.pow(lowerMass, 1.0 - 2.35)
massParam = random.uniform(0,maxLikelihood)
IMF = pow(lowerMass,2.35-1)*pow(mass,1-2.35)
#lifetime = 10000.*pow(mass,-2.5) # 10 billion years for Sun, less for higher mass stars
# L~M^3.5. Lifetime ~ M/L ~ M^(1-3.5) ~ M^(-2.5)
#print str(mass) + " : " + str(massParam) + " <? " + str(IMF) + " : " + str(age) + " <? " + str(lifetime)
if (massParam < likelihood) :#and (age < lifetime) : # Makes power law fit
massParam = 0. # Escape loop
#########################
## IONIZING LUMINOSITY ##
#########################
'''
lumPowerLaw = 3.5 # Used 1.94 previously (WHY?)
lumMin = math.log10(pow(lowerMass,lumPowerLaw))
lumMax = math.log10(pow(upperMass,lumPowerLaw))
lumParam = int(round((math.log10(pow(mass,lumPowerLaw))-lumMin)/(lumMax-lumMin)*16,0)) # Use this line to access all values of Lum from 10^47 - 10^51
# fluxParam = int(round((math.log10(pow(mass,1.94))-fluxMin)/(fluxMax-fluxMin)*12,0)+4)
'''
# Set Host Star Ionizing Luminosity Distribution
# Tremblin model ranges from 10^47 to 10^51 in quarter-dec increments
# In practice these are given as 47 to 51 in steps of 0.25
# B-star mass ranges come from Silaj et. al 2014 and Armentrout et al. 2017
# O-star mass ranges comes from Loren Anderson's Thesis (Eq 6.1, Boston University 2009)
'''
if mass < 18:
N_ly = 43.4818+0.231166*mass
else :
N_ly = 46.95*math.pow(mass-16.27,7./500.) # Fit to Sternberg 2003 by Anderson 2010
'''
# Set Host Star Ionizing Luminosity Distribution
# Tremblin model ranges from 10^47 to 10^51 in quarter-dec increments
# In practice these are given as 47 to 51 in steps of 0.25
# B-star mass ranges come from Silaj et. al 2014 and Armentrout et al. 2017
# O-star mass ranges comes from Sternberg 2003 and Armentrout et al. 2017
if mass < 9.11 :
N_ly = 45.57 # B2
elif mass < 10.135 : # interpolated
N_ly = 45.835
elif mass < 11.16 : # (13.21+9.11)/2., interpolated
N_ly = 46.1 # B1.5
elif mass < 12.185: # interpolated
N_ly = 46.3
elif mass < 13.21 :
N_ly = 46.5 # B1
elif mass < 14.1575: # interpolated
N_ly = 46.75
elif mass < 15.105 : # (13.21+17.)/2., interpolated
N_ly = 47. # B0.5
elif mass < 16.0525: # interpolated
N_ly = 47.2
elif mass < 17. :
N_ly = 47.4 # B0
elif mass < 20.15: # interpolated
N_ly = 47.48
elif mass < 23.3 :
N_ly = 47.56 # O9.5
elif mass < 24.35: # interpolated
N_ly = 47.73
elif mass < 25.4:
N_ly = 47.9 # O9
elif mass < 26.7 : # interpolated
N_ly = 48
elif mass < 28 :
N_ly = 48.1 # O8.5
elif mass < 29.4 : # interpolated
N_ly = 48.195
elif mass < 30.8 :
N_ly = 48.29 # O8
elif mass < 32.45 : # interpolated
N_ly = 48.365
elif mass < 34.1:
N_ly = 48.44 # O7.5
elif mass < 35.9 : # interpolated
N_ly = 48.535
elif mass < 37.7 :
N_ly = 48.63 # O7
elif mass < 39.35 : # interpolated
N_ly = 48.715
elif mass < 41 :
N_ly = 48.80 # O6.5
elif mass < 43.1 : # interpolated
N_ly = 48.88
elif mass < 45.2 :
N_ly = 48.96 # O6
elif mass < 47.8 : # interpolated
N_ly = 49.035
elif mass < 50.4 :
N_ly = 49.11 # O5.5
elif mass < 53.5 : # interpolated
N_ly = 49.185
elif mass < 56.6 :
N_ly = 49.26 # O5
elif mass < 62.75 : # interpolated
N_ly = 49.365
elif mass < 68.9 :
N_ly = 49.47 # O4
elif mass < 78.25 : # interpolated
N_ly = 49.55
else :
N_ly = 49.63 # O3
# Conform ionizing luminosities to fit Tremblin model
# Round ionizing luminosities to the nearsest quarter dec
if N_ly < 47 :
lumParam = 47
else :
lumParam = round(4.*N_ly)/4
freq_GHz = 10
regionLum = pow(10,N_ly)*pow(T_e,0.45)*pow(freq_GHz,-0.1)/(6.3*pow(10,52)) # Derived from Eq. 4 in Armentrout et al. 2017
regionFlux = regionLum/(4*math.pi*dist**2) # UNITS?
####################
## SIZE OF REGION ##
####################
# From Distributions, Determine HII Region Radius
if useTremblin == True :
# Using Pascal Tremblin's hypercube data
# TESTING. TAKE THESE OUT.
timeParam=2
lumParam = 47.25
TeParam = int(round(T_e,-3)/1000-5)
#TeParam = int(round((5756 + 303*random.uniform(-1,1)) + galRad*(299 + 31*random.uniform(-1,1)),-3)/1000 - 5)
densityParam = 2
radius = ff.variables['radius'][timeParam,lumParam,TeParam,densityParam]
else :
#alpha_h = 3.*pow(10.,-13.)
alpha_h = 1.17*pow(10.,-13.)*pow(T_e/10000,-0.942-0.031*math.log(T_e/10000)) #Equation 14.8 From Draine pg 142, Second Printing
# n_e = 10.**3. #removed 10.19.18
n_e = n_H
age_sec = age*10**6.*3.154*10**7.
soundSpeed = 20000 # in cm/s (0.2 km/s) Tremblin 14
rad_initial = pow(3.*pow(10.,N_ly)/(4.*math.pi*alpha_h*pow(n_e,2.)),(1./3.)) #radius in cm
radius= rad_initial*pow(1+7*age_sec*soundSpeed/(4*rad_initial),4./7.)*3.24*pow(10,-19.) #radius in pc, time evolution from Spitzer 1968
#############
## TESTING ##
#############
# This section allows the user to test various parameters for easy
# output to terminal (e.g. luminosity of various features, counts
# of regions in spiral versus bar, etc.)
if (whereIsRegion <= diffusePercent) \
and (diffuse == True) :
diffLum = diffLum + lum
diffCount += 1
regNum = 1
elif (whereIsRegion > diffusePercent) \
and (whereIsRegion <= (diffusePercent + barPercent)) \
and (bar == True) :
barLum = barLum + lum
barCount += 1
regNum = 2
elif (whereIsRegion > (diffusePercent + barPercent)) \
and (whereIsRegion <= (diffusePercent + barPercent + ThreekpcArmPercent)) \
and (ThreekpcArm == True) :
ThreekpcLum = ThreekpcLum + lum
ThreekpcCount += 1
regNum = 3
elif (whereIsRegion > (diffusePercent + barPercent + ThreekpcArmPercent)) \
and (whereIsRegion <= (diffusePercent + barPercent + ThreekpcArmPercent + spiralPercent)) \
and (spiral == True):
sprLum = sprLum + lum
sprCount += 1
if whichArm == 0 :
regNum = 5
elif whichArm == 1 :
regNum = 6
elif whichArm == 2 :
regNum = 7
elif whichArm == 3 :
regNum = 8
totLum = totLum + lum
#print region
#####################
## APPEND TO ARRAY ##
#####################
HiiList.append([galRad,xRot,yRot,z,mass,N_ly,age,radius,l,vR,regNum,b,regionFlux])
numCluster += 1
######################
## PLOT DATA POINTS ##
######################
if plot3D == True :
i = 0
(xlist,ylist,zlist)=(list(),list(),list())
while i < len(HiiList):
xlist.append(HiiList[i][1])
ylist.append(HiiList[i][2])
zlist.append(HiiList[i][3])
i+=1
trace = go.Scatter3d(x=xlist,y=ylist,z=zlist,mode='markers',marker=dict(size=5,line=dict(color='rgba(217,217,217,0.14)',width=0.5),opacity=0.8))
data=[trace]
layout = go.Layout(margin=dict(l=0,r=0,b=0,t=0))
fig = go.Figure(data=data,layout=layout)
py.iplot(fig,filename='3dscatter')
###################
## WRITE TO FILE ##
###################
with open("HIIregion_popSynthesis.csv", "wb") as f:
writer = csv.writer(f)
writer.writerows(HiiList)
'''
print "Diffuse Luminosity : " + str(diffLum*100/totLum) + "% (" + str(diffCount) + " Regions)"
print "Bar Luminosity : " + str(barLum*100/totLum) + "% (" + str(barCount) + " Regions)"
print "3 kpc Arm Luminosity : " + str(ThreekpcLum*100/totLum) + "% ("+ str(ThreekpcCount) + " Regions)"
print "Spiral Luminosity : " + str(sprLum*100/totLum) + "% (" + str(sprCount) + " Regions)"
print "Total Luminosity : " + str((barLum+ThreekpcLum+sprLum+diffLum)*100/totLum) + "% (" + str(barCount+ThreekpcCount+sprCount+diffCount) + " Regions)"
'''
| gpl-2.0 |
xavierwu/scikit-learn | examples/svm/plot_rbf_parameters.py | 132 | 8096 | '''
==================
RBF SVM parameters
==================
This example illustrates the effect of the parameters ``gamma`` and ``C`` of
the Radial Basis Function (RBF) kernel SVM.
Intuitively, the ``gamma`` parameter defines how far the influence of a single
training example reaches, with low values meaning 'far' and high values meaning
'close'. The ``gamma`` parameters can be seen as the inverse of the radius of
influence of samples selected by the model as support vectors.
The ``C`` parameter trades off misclassification of training examples against
simplicity of the decision surface. A low ``C`` makes the decision surface
smooth, while a high ``C`` aims at classifying all training examples correctly
by giving the model freedom to select more samples as support vectors.
The first plot is a visualization of the decision function for a variety of
parameter values on a simplified classification problem involving only 2 input
features and 2 possible target classes (binary classification). Note that this
kind of plot is not possible to do for problems with more features or target
classes.
The second plot is a heatmap of the classifier's cross-validation accuracy as a
function of ``C`` and ``gamma``. For this example we explore a relatively large
grid for illustration purposes. In practice, a logarithmic grid from
:math:`10^{-3}` to :math:`10^3` is usually sufficient. If the best parameters
lie on the boundaries of the grid, it can be extended in that direction in a
subsequent search.
Note that the heat map plot has a special colorbar with a midpoint value close
to the score values of the best performing models so as to make it easy to tell
them appart in the blink of an eye.
The behavior of the model is very sensitive to the ``gamma`` parameter. If
``gamma`` is too large, the radius of the area of influence of the support
vectors only includes the support vector itself and no amount of
regularization with ``C`` will be able to prevent overfitting.
When ``gamma`` is very small, the model is too constrained and cannot capture
the complexity or "shape" of the data. The region of influence of any selected
support vector would include the whole training set. The resulting model will
behave similarly to a linear model with a set of hyperplanes that separate the
centers of high density of any pair of two classes.
For intermediate values, we can see on the second plot that good models can
be found on a diagonal of ``C`` and ``gamma``. Smooth models (lower ``gamma``
values) can be made more complex by selecting a larger number of support
vectors (larger ``C`` values) hence the diagonal of good performing models.
Finally one can also observe that for some intermediate values of ``gamma`` we
get equally performing models when ``C`` becomes very large: it is not
necessary to regularize by limiting the number of support vectors. The radius of
the RBF kernel alone acts as a good structural regularizer. In practice though
it might still be interesting to limit the number of support vectors with a
lower value of ``C`` so as to favor models that use less memory and that are
faster to predict.
We should also note that small differences in scores results from the random
splits of the cross-validation procedure. Those spurious variations can be
smoothed out by increasing the number of CV iterations ``n_iter`` at the
expense of compute time. Increasing the value number of ``C_range`` and
``gamma_range`` steps will increase the resolution of the hyper-parameter heat
map.
'''
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import Normalize
from sklearn.svm import SVC
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import load_iris
from sklearn.cross_validation import StratifiedShuffleSplit
from sklearn.grid_search import GridSearchCV
# Utility function to move the midpoint of a colormap to be around
# the values of interest.
class MidpointNormalize(Normalize):
def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False):
self.midpoint = midpoint
Normalize.__init__(self, vmin, vmax, clip)
def __call__(self, value, clip=None):
x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]
return np.ma.masked_array(np.interp(value, x, y))
##############################################################################
# Load and prepare data set
#
# dataset for grid search
iris = load_iris()
X = iris.data
y = iris.target
# Dataset for decision function visualization: we only keep the first two
# features in X and sub-sample the dataset to keep only 2 classes and
# make it a binary classification problem.
X_2d = X[:, :2]
X_2d = X_2d[y > 0]
y_2d = y[y > 0]
y_2d -= 1
# It is usually a good idea to scale the data for SVM training.
# We are cheating a bit in this example in scaling all of the data,
# instead of fitting the transformation on the training set and
# just applying it on the test set.
scaler = StandardScaler()
X = scaler.fit_transform(X)
X_2d = scaler.fit_transform(X_2d)
##############################################################################
# Train classifiers
#
# For an initial search, a logarithmic grid with basis
# 10 is often helpful. Using a basis of 2, a finer
# tuning can be achieved but at a much higher cost.
C_range = np.logspace(-2, 10, 13)
gamma_range = np.logspace(-9, 3, 13)
param_grid = dict(gamma=gamma_range, C=C_range)
cv = StratifiedShuffleSplit(y, n_iter=5, test_size=0.2, random_state=42)
grid = GridSearchCV(SVC(), param_grid=param_grid, cv=cv)
grid.fit(X, y)
print("The best parameters are %s with a score of %0.2f"
% (grid.best_params_, grid.best_score_))
# Now we need to fit a classifier for all parameters in the 2d version
# (we use a smaller set of parameters here because it takes a while to train)
C_2d_range = [1e-2, 1, 1e2]
gamma_2d_range = [1e-1, 1, 1e1]
classifiers = []
for C in C_2d_range:
for gamma in gamma_2d_range:
clf = SVC(C=C, gamma=gamma)
clf.fit(X_2d, y_2d)
classifiers.append((C, gamma, clf))
##############################################################################
# visualization
#
# draw visualization of parameter effects
plt.figure(figsize=(8, 6))
xx, yy = np.meshgrid(np.linspace(-3, 3, 200), np.linspace(-3, 3, 200))
for (k, (C, gamma, clf)) in enumerate(classifiers):
# evaluate decision function in a grid
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# visualize decision function for these parameters
plt.subplot(len(C_2d_range), len(gamma_2d_range), k + 1)
plt.title("gamma=10^%d, C=10^%d" % (np.log10(gamma), np.log10(C)),
size='medium')
# visualize parameter's effect on decision function
plt.pcolormesh(xx, yy, -Z, cmap=plt.cm.RdBu)
plt.scatter(X_2d[:, 0], X_2d[:, 1], c=y_2d, cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.axis('tight')
# plot the scores of the grid
# grid_scores_ contains parameter settings and scores
# We extract just the scores
scores = [x[1] for x in grid.grid_scores_]
scores = np.array(scores).reshape(len(C_range), len(gamma_range))
# Draw heatmap of the validation accuracy as a function of gamma and C
#
# The score are encoded as colors with the hot colormap which varies from dark
# red to bright yellow. As the most interesting scores are all located in the
# 0.92 to 0.97 range we use a custom normalizer to set the mid-point to 0.92 so
# as to make it easier to visualize the small variations of score values in the
# interesting range while not brutally collapsing all the low score values to
# the same color.
plt.figure(figsize=(8, 6))
plt.subplots_adjust(left=.2, right=0.95, bottom=0.15, top=0.95)
plt.imshow(scores, interpolation='nearest', cmap=plt.cm.hot,
norm=MidpointNormalize(vmin=0.2, midpoint=0.92))
plt.xlabel('gamma')
plt.ylabel('C')
plt.colorbar()
plt.xticks(np.arange(len(gamma_range)), gamma_range, rotation=45)
plt.yticks(np.arange(len(C_range)), C_range)
plt.title('Validation accuracy')
plt.show()
| bsd-3-clause |
pratapvardhan/scikit-learn | examples/classification/plot_classification_probability.py | 138 | 2871 | """
===============================
Plot classification probability
===============================
Plot the classification probability for different classifiers. We use a 3
class dataset, and we classify it with a Support Vector classifier, L1
and L2 penalized logistic regression with either a One-Vs-Rest or multinomial
setting, and Gaussian process classification.
The logistic regression is not a multiclass classifier out of the box. As
a result it can identify only the first class.
"""
print(__doc__)
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
from sklearn import datasets
iris = datasets.load_iris()
X = iris.data[:, 0:2] # we only take the first two features for visualization
y = iris.target
n_features = X.shape[1]
C = 1.0
kernel = 1.0 * RBF([1.0, 1.0]) # for GPC
# Create different classifiers. The logistic regression cannot do
# multiclass out of the box.
classifiers = {'L1 logistic': LogisticRegression(C=C, penalty='l1'),
'L2 logistic (OvR)': LogisticRegression(C=C, penalty='l2'),
'Linear SVC': SVC(kernel='linear', C=C, probability=True,
random_state=0),
'L2 logistic (Multinomial)': LogisticRegression(
C=C, solver='lbfgs', multi_class='multinomial'),
'GPC': GaussianProcessClassifier(kernel)
}
n_classifiers = len(classifiers)
plt.figure(figsize=(3 * 2, n_classifiers * 2))
plt.subplots_adjust(bottom=.2, top=.95)
xx = np.linspace(3, 9, 100)
yy = np.linspace(1, 5, 100).T
xx, yy = np.meshgrid(xx, yy)
Xfull = np.c_[xx.ravel(), yy.ravel()]
for index, (name, classifier) in enumerate(classifiers.items()):
classifier.fit(X, y)
y_pred = classifier.predict(X)
classif_rate = np.mean(y_pred.ravel() == y.ravel()) * 100
print("classif_rate for %s : %f " % (name, classif_rate))
# View probabilities=
probas = classifier.predict_proba(Xfull)
n_classes = np.unique(y_pred).size
for k in range(n_classes):
plt.subplot(n_classifiers, n_classes, index * n_classes + k + 1)
plt.title("Class %d" % k)
if k == 0:
plt.ylabel(name)
imshow_handle = plt.imshow(probas[:, k].reshape((100, 100)),
extent=(3, 9, 1, 5), origin='lower')
plt.xticks(())
plt.yticks(())
idx = (y_pred == k)
if idx.any():
plt.scatter(X[idx, 0], X[idx, 1], marker='o', c='k')
ax = plt.axes([0.15, 0.04, 0.7, 0.05])
plt.title("Probability")
plt.colorbar(imshow_handle, cax=ax, orientation='horizontal')
plt.show()
| bsd-3-clause |
tmthydvnprt/compfipy | compfipy/util.py | 1 | 4568 | """
util.py
General constants and functions that will be used throughout the package.
"""
import numpy as np
import pandas as pd
# Constants
# ------------------------------------------------------------------------------------------------------------------------------
# Display constants
COL_DASH_WIDTH = 128
# Time constants
DEFAULT_INITIAL_PRICE = 100.0
DAYS_IN_YEAR = 365.25
DAYS_IN_TRADING_YEAR = 252.0
MONTHS_IN_YEAR = 12.0
# Percent Constants
RISK_FREE_RATE = 0.01
# Trading Signal Constants
FIBONACCI_DECIMAL = np.array([0, 0.236, 0.382, 0.5, 0.618, 1])
FIBONACCI_SEQUENCE = [0, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233]
RANK_DAYS_IN_TRADING_YEAR = [200, 125, 50, 20, 3, 14]
RANK_PERCENTS = [0.3, 0.3, 0.15, 0.15, 0.5, 0.5]
# Number Formater Functions
# ------------------------------------------------------------------------------------------------------------------------------
def fmtp(x):
"""
Format as percent.
"""
return '-' if np.isnan(x) else format(x, '.2%')
def fmtpn(x):
"""
Format as percent without the sign.
"""
return '-' if np.isnan(x) else format(100.0 * x, '.2f')
def fmtn(x):
"""
Format as float.
"""
return '-' if np.isnan(x) else format(x, '.2f')
def fmttn(x):
"""
Format as text notation float (Thousand, Million, Billion, etc.).
"""
abs_x = abs(x)
if np.isnan(x):
return '-'
elif abs_x < 1e3:
return '{:0.2f}'.format(x)
elif 1e3 <= abs_x < 1e6:
return '{:0.2f} k'.format(x / 1e3)
elif 1e6 <= abs_x < 1e9:
return '{:0.2f} M'.format(x / 1e6)
elif 1e9 <= abs_x < 1e12:
return '{:0.2f} B'.format(x / 1e9)
elif abs_x >= 1e12:
return '{:0.2f} T'.format(x / 1e12)
# Number Parser Functions
# ------------------------------------------------------------------------------------------------------------------------------
def prsp(x):
"""
Parse string as percent.
"""
return np.nan if x is '-' else float(x.replace('%', '')) / 100.0
def prspn(x):
"""
Parse string as percent without sign.
"""
return np.nan if x is '-' else float(x) / 100.0
def prsn(x):
"""
Parse string as float.
"""
return np.nan if x is '-' else float(x)
def prstn(x):
"""
Parse text notation string
"""
try:
if x.strip().endswith('T'):
return float(x[:-1]) * 1e12
elif x.strip().endswith('B'):
return float(x[:-1]) * 1e9
elif x.strip().endswith('M'):
return float(x[:-1]) * 1e6
elif x.strip().lower().endswith('k'):
return float(x[:-1]) * 1e3
else:
return float(x)
except ValueError:
return np.nan
# General Price Helper Functions
# ------------------------------------------------------------------------------------------------------------------------------
def sma(x, n=20):
"""
Return simple moving average pandas data, x, over interval, n.
"""
return pd.rolling_mean(x, n)
def ema(x, n=20):
"""
Return exponential moving average pandas data, x, over interval, n.
"""
return pd.ewma(x, n)
def calc_returns(x):
"""
Calculate arithmetic returns of price series.
"""
return x / x.shift(1) - 1.0
def calc_log_returns(x):
"""
Calculate log returns of price series.
"""
return np.log(x / x.shift(1))
def calc_price(x, x0=DEFAULT_INITIAL_PRICE):
"""
Calculate price from returns series.
"""
return (x.replace(to_replace=np.nan, value=0) + 1.0).cumprod() * x0
def calc_cagr(x):
"""
Calculate compound annual growth rate.
"""
start = x.index[0]
end = x.index[-1]
return np.power((x.ix[-1] / x.ix[0]), 1.0 / ((end - start).days / DAYS_IN_YEAR)) - 1.0
def rebase_price(x, x0=DEFAULT_INITIAL_PRICE):
"""
Convert a series to another initial price.
"""
return x0 * x / x.ix[0]
# General Number Helper Functions
# ------------------------------------------------------------------------------------------------------------------------------
def scale(x, (xmin, xmax), (ymin, ymax)):
"""
Scale a number from one range to antoher range, clipping values that are out of bounds.
"""
# Ensure everything is a float
x = float(x)
xmin = float(xmin)
xmax = float(xmax)
ymin = float(ymin)
ymax = float(ymax)
# Scale input while handling bounds
if x < xmin:
return ymin
elif x > xmax:
return ymax
else:
return ((ymax - ymin) * (x - xmin) / (xmax - xmin)) + ymin
| mit |
Vimos/scikit-learn | examples/linear_model/plot_sgd_penalties.py | 124 | 1877 | """
==============
SGD: Penalties
==============
Plot the contours of the three penalties.
All of the above are supported by
:class:`sklearn.linear_model.stochastic_gradient`.
"""
from __future__ import division
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
def l1(xs):
return np.array([np.sqrt((1 - np.sqrt(x ** 2.0)) ** 2.0) for x in xs])
def l2(xs):
return np.array([np.sqrt(1.0 - x ** 2.0) for x in xs])
def el(xs, z):
return np.array([(2 - 2 * x - 2 * z + 4 * x * z -
(4 * z ** 2
- 8 * x * z ** 2
+ 8 * x ** 2 * z ** 2
- 16 * x ** 2 * z ** 3
+ 8 * x * z ** 3 + 4 * x ** 2 * z ** 4) ** (1. / 2)
- 2 * x * z ** 2) / (2 - 4 * z) for x in xs])
def cross(ext):
plt.plot([-ext, ext], [0, 0], "k-")
plt.plot([0, 0], [-ext, ext], "k-")
xs = np.linspace(0, 1, 100)
alpha = 0.501 # 0.5 division throuh zero
cross(1.2)
l1_color = "navy"
l2_color = "c"
elastic_net_color = "darkorange"
lw = 2
plt.plot(xs, l1(xs), color=l1_color, label="L1", lw=lw)
plt.plot(xs, -1.0 * l1(xs), color=l1_color, lw=lw)
plt.plot(-1 * xs, l1(xs), color=l1_color, lw=lw)
plt.plot(-1 * xs, -1.0 * l1(xs), color=l1_color, lw=lw)
plt.plot(xs, l2(xs), color=l2_color, label="L2", lw=lw)
plt.plot(xs, -1.0 * l2(xs), color=l2_color, lw=lw)
plt.plot(-1 * xs, l2(xs), color=l2_color, lw=lw)
plt.plot(-1 * xs, -1.0 * l2(xs), color=l2_color, lw=lw)
plt.plot(xs, el(xs, alpha), color=elastic_net_color, label="Elastic Net", lw=lw)
plt.plot(xs, -1.0 * el(xs, alpha), color=elastic_net_color, lw=lw)
plt.plot(-1 * xs, el(xs, alpha), color=elastic_net_color, lw=lw)
plt.plot(-1 * xs, -1.0 * el(xs, alpha), color=elastic_net_color, lw=lw)
plt.xlabel(r"$w_0$")
plt.ylabel(r"$w_1$")
plt.legend()
plt.axis("equal")
plt.show()
| bsd-3-clause |
epam/DLab | infrastructure-provisioning/src/general/lib/aws/actions_lib.py | 1 | 82712 | # *****************************************************************************
#
# Copyright (c) 2016, EPAM SYSTEMS INC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ******************************************************************************
import boto3
import botocore
from botocore.client import Config
import backoff
from botocore.exceptions import ClientError
import time
import sys
import os
import json
from fabric.api import *
from fabric.contrib.files import exists
import logging
from dlab.meta_lib import *
from dlab.fab import *
import traceback
import urllib2
import meta_lib
import dlab.fab
def backoff_log(err):
logging.info("Unable to create Tag: " + \
str(err) + "\n Traceback: " + \
traceback.print_exc(file=sys.stdout))
append_result(str({"error": "Unable to create Tag", \
"error_message": str(err) + "\n Traceback: " + \
traceback.print_exc(file=sys.stdout)}))
traceback.print_exc(file=sys.stdout)
def put_to_bucket(bucket_name, local_file, destination_file):
try:
s3 = boto3.client('s3', config=Config(signature_version='s3v4'), region_name=os.environ['aws_region'])
with open(local_file, 'rb') as data:
s3.upload_fileobj(data, bucket_name, destination_file, ExtraArgs={'ServerSideEncryption': 'AES256'})
return True
except Exception as err:
logging.info("Unable to upload files to S3 bucket: " + str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout))
append_result(str({"error": "Unable to upload files to S3 bucket", "error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
traceback.print_exc(file=sys.stdout)
return False
def create_s3_bucket(bucket_name, tag, region):
try:
s3 = boto3.resource('s3', config=Config(signature_version='s3v4'))
if region == "us-east-1":
bucket = s3.create_bucket(Bucket=bucket_name)
else:
bucket = s3.create_bucket(Bucket=bucket_name, CreateBucketConfiguration={'LocationConstraint': region})
boto3.client('s3', config=Config(signature_version='s3v4')).put_bucket_encryption(Bucket=bucket_name, ServerSideEncryptionConfiguration={
'Rules': [
{
'ApplyServerSideEncryptionByDefault': {
'SSEAlgorithm': 'AES256'
}
},
]
})
tags = list()
tags.append(tag)
tags.append({'Key': os.environ['conf_tag_resource_id'], 'Value': os.environ['conf_service_base_name'] + ':' +
bucket_name})
if 'conf_additional_tags' in os.environ:
for tag in os.environ['conf_additional_tags'].split(';'):
tags.append(
{
'Key': tag.split(':')[0],
'Value': tag.split(':')[1]
}
)
tagging = bucket.Tagging()
tagging.put(Tagging={'TagSet': tags})
tagging.reload()
return bucket.name
except Exception as err:
logging.info("Unable to create S3 bucket: " + str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout))
append_result(str({"error": "Unable to create S3 bucket", "error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
traceback.print_exc(file=sys.stdout)
def create_vpc(vpc_cidr, tag):
try:
ec2 = boto3.resource('ec2')
vpc = ec2.create_vpc(CidrBlock=vpc_cidr)
create_tag(vpc.id, tag)
return vpc.id
except Exception as err:
logging.info("Unable to create VPC: " + str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout))
append_result(str({"error": "Unable to create VPC", "error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
traceback.print_exc(file=sys.stdout)
def enable_vpc_dns(vpc_id):
try:
client = boto3.client('ec2')
client.modify_vpc_attribute(VpcId=vpc_id,
EnableDnsHostnames={'Value': True})
except Exception as err:
logging.info("Unable to modify VPC attributes: " + str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout))
append_result(str({"error": "Unable to modify VPC attributes", "error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
traceback.print_exc(file=sys.stdout)
def remove_vpc(vpc_id):
try:
client = boto3.client('ec2')
client.delete_vpc(VpcId=vpc_id)
print("VPC {} has been removed".format(vpc_id))
except Exception as err:
logging.info("Unable to remove VPC: " + str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout))
append_result(str({"error": "Unable to remove VPC", "error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
traceback.print_exc(file=sys.stdout)
@backoff.on_exception(backoff.expo,
botocore.exceptions.ClientError,
max_tries=40,
on_giveup=backoff_log)
def create_tag(resource, tag, with_tag_res_id=True):
print('Tags for the resource {} will be created'.format(resource))
tags_list = list()
ec2 = boto3.client('ec2')
if type(tag) == dict:
resource_name = tag.get('Value')
resource_tag = tag
else:
resource_name = json.loads(tag).get('Value')
resource_tag = json.loads(tag)
if type(resource) != list:
resource = [resource]
tags_list.append(resource_tag)
if with_tag_res_id:
tags_list.append(
{
'Key': os.environ['conf_tag_resource_id'],
'Value': os.environ['conf_service_base_name'] + ':' + resource_name
}
)
if 'conf_additional_tags' in os.environ:
for tag in os.environ['conf_additional_tags'].split(';'):
tags_list.append(
{
'Key': tag.split(':')[0],
'Value': tag.split(':')[1]
}
)
ec2.create_tags(
Resources=resource,
Tags=tags_list
)
def remove_emr_tag(emr_id, tag):
try:
emr = boto3.client('emr')
emr.remove_tags(ResourceId=emr_id, TagKeys=tag)
except Exception as err:
logging.info("Unable to remove Tag: " + str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout))
append_result(str({"error": "Unable to remove Tag", "error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
traceback.print_exc(file=sys.stdout)
def create_rt(vpc_id, infra_tag_name, infra_tag_value):
try:
tag = {"Key": infra_tag_name, "Value": infra_tag_value}
route_table = []
ec2 = boto3.client('ec2')
rt = ec2.create_route_table(VpcId=vpc_id)
rt_id = rt.get('RouteTable').get('RouteTableId')
route_table.append(rt_id)
print('Created Route-Table with ID: {}'.format(rt_id))
create_tag(route_table, json.dumps(tag))
ig = ec2.create_internet_gateway()
ig_id = ig.get('InternetGateway').get('InternetGatewayId')
route_table = []
route_table.append(ig_id)
create_tag(route_table, json.dumps(tag))
ec2.attach_internet_gateway(InternetGatewayId=ig_id, VpcId=vpc_id)
ec2.create_route(DestinationCidrBlock='0.0.0.0/0', RouteTableId=rt_id, GatewayId=ig_id)
return rt_id
except Exception as err:
logging.info("Unable to create Route Table: " + str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout))
append_result(str({"error": "Unable to create Route Table", "error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
traceback.print_exc(file=sys.stdout)
def create_subnet(vpc_id, subnet, tag):
try:
ec2 = boto3.resource('ec2')
subnet = ec2.create_subnet(VpcId=vpc_id, CidrBlock=subnet)
create_tag(subnet.id, tag)
subnet.reload()
return subnet.id
except Exception as err:
logging.info("Unable to create Subnet: " + str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout))
append_result(str({"error": "Unable to create Subnet", "error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
traceback.print_exc(file=sys.stdout)
def create_security_group(security_group_name, vpc_id, security_group_rules, egress, tag):
ec2 = boto3.resource('ec2')
group = ec2.create_security_group(GroupName=security_group_name, Description='security_group_name', VpcId=vpc_id)
time.sleep(10)
create_tag(group.id, tag)
try:
group.revoke_egress(IpPermissions=[{"IpProtocol": "-1", "IpRanges": [{"CidrIp": "0.0.0.0/0"}], "UserIdGroupPairs": [], "PrefixListIds": []}])
except:
print("Mentioned rule does not exist")
for rule in security_group_rules:
group.authorize_ingress(IpPermissions=[rule])
for rule in egress:
group.authorize_egress(IpPermissions=[rule])
return group.id
def enable_auto_assign_ip(subnet_id):
try:
client = boto3.client('ec2')
client.modify_subnet_attribute(MapPublicIpOnLaunch={'Value': True}, SubnetId=subnet_id)
except Exception as err:
logging.info("Unable to create Subnet: " + str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout))
append_result(str({"error": "Unable to create Subnet",
"error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
traceback.print_exc(file=sys.stdout)
def create_instance(definitions, instance_tag, primary_disk_size=12):
try:
ec2 = boto3.resource('ec2')
security_groups_ids = []
for chunk in definitions.security_group_ids.split(','):
security_groups_ids.append(chunk.strip())
user_data = ''
if definitions.user_data_file != '':
try:
with open(definitions.user_data_file, 'r') as f:
for line in f:
user_data = user_data + line
f.close()
except:
print("Error reading user-data file")
if definitions.instance_class == 'notebook':
instances = ec2.create_instances(ImageId=definitions.ami_id, MinCount=1, MaxCount=1,
BlockDeviceMappings=[
{
"DeviceName": "/dev/sda1",
"Ebs":
{
"VolumeSize": int(primary_disk_size)
}
},
{
"DeviceName": "/dev/sdb",
"Ebs":
{
"VolumeSize": int(definitions.instance_disk_size)
}
}],
KeyName=definitions.key_name,
SecurityGroupIds=security_groups_ids,
InstanceType=definitions.instance_type,
SubnetId=definitions.subnet_id,
IamInstanceProfile={'Name': definitions.iam_profile},
UserData=user_data)
elif definitions.instance_class == 'dataengine':
instances = ec2.create_instances(ImageId=definitions.ami_id, MinCount=1, MaxCount=1,
BlockDeviceMappings=[
{
"DeviceName": "/dev/sda1",
"Ebs":
{
"VolumeSize": int(primary_disk_size)
}
}],
KeyName=definitions.key_name,
SecurityGroupIds=security_groups_ids,
InstanceType=definitions.instance_type,
SubnetId=definitions.subnet_id,
IamInstanceProfile={'Name': definitions.iam_profile},
UserData=user_data)
else:
get_iam_profile(definitions.iam_profile)
instances = ec2.create_instances(ImageId=definitions.ami_id, MinCount=1, MaxCount=1,
KeyName=definitions.key_name,
SecurityGroupIds=security_groups_ids,
InstanceType=definitions.instance_type,
SubnetId=definitions.subnet_id,
IamInstanceProfile={'Name': definitions.iam_profile},
UserData=user_data)
for instance in instances:
print("Waiting for instance {} become running.".format(instance.id))
instance.wait_until_running()
tag = {'Key': 'Name', 'Value': definitions.node_name}
create_tag(instance.id, tag)
create_tag(instance.id, instance_tag)
tag_intance_volume(instance.id, definitions.node_name, instance_tag)
return instance.id
return ''
except Exception as err:
logging.info("Unable to create EC2: " + str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout))
append_result(str({"error": "Unable to create EC2", "error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
traceback.print_exc(file=sys.stdout)
def tag_intance_volume(instance_id, node_name, instance_tag):
try:
print('volume tagging')
volume_list = meta_lib.get_instance_attr(instance_id, 'block_device_mappings')
counter = 0
instance_tag_value = instance_tag.get('Value')
for volume in volume_list:
if counter == 1:
volume_postfix = '-volume-secondary'
else:
volume_postfix = '-volume-primary'
tag = {'Key': 'Name',
'Value': node_name + volume_postfix}
volume_tag = instance_tag
volume_tag['Value'] = instance_tag_value + volume_postfix
volume_id = volume.get('Ebs').get('VolumeId')
create_tag(volume_id, tag)
create_tag(volume_id, volume_tag)
counter += 1
except Exception as err:
logging.info(
"Unable to tag volumes: " + str(err) + "\n Traceback: " + traceback.print_exc(
file=sys.stdout))
append_result(str({"error": "Unable to tag volumes",
"error_message": str(err) + "\n Traceback: " + traceback.print_exc(
file=sys.stdout)}))
traceback.print_exc(file=sys.stdout)
def tag_emr_volume(cluster_id, node_name, billing_tag):
try:
client = boto3.client('emr')
cluster = client.list_instances(ClusterId=cluster_id)
instances = cluster['Instances']
for instance in instances:
instance_tag = {'Key': os.environ['conf_service_base_name'] + '-Tag',
'Value': node_name}
tag_intance_volume(instance['Ec2InstanceId'], node_name, instance_tag)
except Exception as err:
logging.info(
"Unable to tag emr volumes: " + str(err) + "\n Traceback: " + traceback.print_exc(
file=sys.stdout))
append_result(str({"error": "Unable to tag emr volumes",
"error_message": str(err) + "\n Traceback: " + traceback.print_exc(
file=sys.stdout)}))
traceback.print_exc(file=sys.stdout)
def create_iam_role(role_name, role_profile, region, service='ec2'):
conn = boto3.client('iam')
try:
if region == 'cn-north-1':
conn.create_role(RoleName=role_name,
AssumeRolePolicyDocument='{"Version":"2012-10-17","Statement":[{"Effect":"Allow","Principal":{"Service":["' + service + '.amazonaws.com.cn"]},"Action":["sts:AssumeRole"]}]}')
else:
conn.create_role(RoleName=role_name, AssumeRolePolicyDocument='{"Version":"2012-10-17","Statement":[{"Effect":"Allow","Principal":{"Service":["' + service + '.amazonaws.com"]},"Action":["sts:AssumeRole"]}]}')
except botocore.exceptions.ClientError as e_role:
if e_role.response['Error']['Code'] == 'EntityAlreadyExists':
print("IAM role already exists. Reusing...")
else:
logging.info("Unable to create IAM role: " + str(e_role.response['Error']['Message']) + "\n Traceback: " + traceback.print_exc(file=sys.stdout))
append_result(str({"error": "Unable to create IAM role", "error_message": str(e_role.response['Error']['Message']) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
traceback.print_exc(file=sys.stdout)
return
if service == 'ec2':
try:
conn.create_instance_profile(InstanceProfileName=role_profile)
waiter = conn.get_waiter('instance_profile_exists')
waiter.wait(InstanceProfileName=role_profile)
except botocore.exceptions.ClientError as e_profile:
if e_profile.response['Error']['Code'] == 'EntityAlreadyExists':
print("Instance profile already exists. Reusing...")
else:
logging.info("Unable to create Instance Profile: " + str(e_profile.response['Error']['Message']) + "\n Traceback: " + traceback.print_exc(file=sys.stdout))
append_result(str({"error": "Unable to create Instance Profile", "error_message": str(e_profile.response['Error']['Message']) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
traceback.print_exc(file=sys.stdout)
return
try:
conn.add_role_to_instance_profile(InstanceProfileName=role_profile, RoleName=role_name)
time.sleep(30)
except botocore.exceptions.ClientError as err:
logging.info("Unable to add IAM role to instance profile: " + str(err.response['Error']['Message']) + "\n Traceback: " + traceback.print_exc(file=sys.stdout))
append_result(str({"error": "Unable to add IAM role to instance profile", "error_message": str(err.response['Error']['Message']) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
traceback.print_exc(file=sys.stdout)
def attach_policy(role_name, policy_arn):
try:
conn = boto3.client('iam')
conn.attach_role_policy(PolicyArn=policy_arn, RoleName=role_name)
time.sleep(30)
except botocore.exceptions.ClientError as err:
logging.info("Unable to attach Policy: " + str(err.response['Error']['Message']) + "\n Traceback: " + traceback.print_exc(file=sys.stdout))
append_result(str({"error": "Unable to attach Policy", "error_message": str(err.response['Error']['Message']) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
traceback.print_exc(file=sys.stdout)
def create_attach_policy(policy_name, role_name, file_path):
try:
conn = boto3.client('iam')
with open(file_path, 'r') as myfile:
json_file = myfile.read()
conn.put_role_policy(RoleName=role_name, PolicyName=policy_name, PolicyDocument=json_file)
except Exception as err:
logging.info("Unable to attach Policy: " + str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout))
append_result(str({"error": "Unable to attach Policy", "error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
traceback.print_exc(file=sys.stdout)
def allocate_elastic_ip():
try:
client = boto3.client('ec2')
response = client.allocate_address(Domain='vpc')
return response.get('AllocationId')
except Exception as err:
logging.info("Unable to allocate Elastic IP: " + str(err) + "\n Traceback: " + traceback.print_exc(
file=sys.stdout))
append_result(str({"error": "Unable to allocate Elastic IP",
"error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
traceback.print_exc(file=sys.stdout)
def release_elastic_ip(allocation_id):
try:
client = boto3.client('ec2')
client.release_address(AllocationId=allocation_id)
except Exception as err:
logging.info("Unable to release Elastic IP: " + str(err) + "\n Traceback: " + traceback.print_exc(
file=sys.stdout))
append_result(str({"error": "Unable to release Elastic IP",
"error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
traceback.print_exc(file=sys.stdout)
def associate_elastic_ip(instance_id, allocation_id):
try:
client = boto3.client('ec2')
response = client.associate_address(InstanceId=instance_id, AllocationId=allocation_id)
return response.get('AssociationId')
except Exception as err:
logging.info("Unable to associate Elastic IP: " + str(err) + "\n Traceback: " + traceback.print_exc(
file=sys.stdout))
append_result(str({"error": "Unable to associate Elastic IP",
"error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
traceback.print_exc(file=sys.stdout)
def disassociate_elastic_ip(association_id):
try:
client = boto3.client('ec2')
client.disassociate_address(AssociationId=association_id)
except Exception as err:
logging.info("Unable to disassociate Elastic IP: " + str(err) + "\n Traceback: " + traceback.print_exc(
file=sys.stdout))
append_result(str({"error": "Unable to disassociate Elastic IP",
"error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
traceback.print_exc(file=sys.stdout)
def remove_ec2(tag_name, tag_value):
try:
ec2 = boto3.resource('ec2')
client = boto3.client('ec2')
association_id = ''
allocation_id = ''
inst = ec2.instances.filter(
Filters=[{'Name': 'instance-state-name', 'Values': ['running', 'stopped', 'pending', 'stopping']},
{'Name': 'tag:{}'.format(tag_name), 'Values': ['{}'.format(tag_value)]}])
instances = list(inst)
if instances:
for instance in instances:
try:
response = client.describe_instances(InstanceIds=[instance.id])
for i in response.get('Reservations'):
for h in i.get('Instances'):
elastic_ip = h.get('PublicIpAddress')
try:
response = client.describe_addresses(PublicIps=[elastic_ip]).get('Addresses')
for el_ip in response:
allocation_id = el_ip.get('AllocationId')
association_id = el_ip.get('AssociationId')
disassociate_elastic_ip(association_id)
release_elastic_ip(allocation_id)
print("Releasing Elastic IP: {}".format(elastic_ip))
except:
print("There is no such Elastic IP: {}".format(elastic_ip))
except Exception as err:
print(err)
print("There is no Elastic IP to disassociate from instance: {}".format(instance.id))
client.terminate_instances(InstanceIds=[instance.id])
waiter = client.get_waiter('instance_terminated')
waiter.wait(InstanceIds=[instance.id])
print("The instance {} has been terminated successfully".format(instance.id))
else:
print("There are no instances with '{}' tag to terminate".format(tag_name))
except Exception as err:
logging.info("Unable to remove EC2: " + str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout))
append_result(str({"error": "Unable to EC2", "error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
traceback.print_exc(file=sys.stdout)
def stop_ec2(tag_name, tag_value):
try:
ec2 = boto3.resource('ec2')
client = boto3.client('ec2')
inst = ec2.instances.filter(
Filters=[{'Name': 'instance-state-name', 'Values': ['running', 'pending']},
{'Name': 'tag:{}'.format(tag_name), 'Values': ['{}'.format(tag_value)]}])
instances = list(inst)
if instances:
id_instances = list()
for instance in instances:
id_instances.append(instance.id)
client.stop_instances(InstanceIds=id_instances)
waiter = client.get_waiter('instance_stopped')
waiter.wait(InstanceIds=id_instances)
print("The instances {} have been stopped successfully".format(id_instances))
else:
print("There are no instances with {} name to stop".format(tag_value))
except Exception as err:
logging.info("Unable to stop EC2: " + str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout))
append_result(str({"error": "Unable to stop EC2", "error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
traceback.print_exc(file=sys.stdout)
def start_ec2(tag_name, tag_value):
try:
ec2 = boto3.resource('ec2')
client = boto3.client('ec2')
inst = ec2.instances.filter(
Filters=[{'Name': 'instance-state-name', 'Values': ['stopped']},
{'Name': 'tag:{}'.format(tag_name), 'Values': ['{}'.format(tag_value)]}])
instances = list(inst)
if instances:
id_instances = list()
for instance in instances:
id_instances.append(instance.id)
client.start_instances(InstanceIds=id_instances)
waiter = client.get_waiter('instance_status_ok')
waiter.wait(InstanceIds=id_instances)
print("The instances {} have been started successfully".format(id_instances))
else:
print("There are no instances with {} name to start".format(tag_value))
except Exception as err:
logging.info("Unable to start EC2: " + str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout))
append_result(str({"error": "Unable to start EC2", "error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
traceback.print_exc(file=sys.stdout)
def remove_detach_iam_policies(role_name, action=''):
client = boto3.client('iam')
service_base_name = os.environ['conf_service_base_name']
try:
policy_list = client.list_attached_role_policies(RoleName=role_name).get('AttachedPolicies')
for i in policy_list:
policy_arn = i.get('PolicyArn')
client.detach_role_policy(RoleName=role_name, PolicyArn=policy_arn)
print("The IAM policy {} has been detached successfully".format(policy_arn))
if action == 'delete' and service_base_name in i.get('PolicyName'):
client.delete_policy(PolicyArn=policy_arn)
print("The IAM policy {} has been deleted successfully".format(policy_arn))
except Exception as err:
logging.info("Unable to remove/detach IAM policy: " + str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout))
append_result(str({"error": "Unable to remove/detach IAM policy",
"error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
traceback.print_exc(file=sys.stdout)
def remove_roles_and_profiles(role_name, role_profile_name):
client = boto3.client('iam')
try:
client.remove_role_from_instance_profile(InstanceProfileName=role_profile_name, RoleName=role_name)
client.delete_instance_profile(InstanceProfileName=role_profile_name)
client.delete_role(RoleName=role_name)
print("The IAM role {0} and instance profile {1} have been deleted successfully".format(role_name, role_profile_name))
except Exception as err:
logging.info("Unable to remove IAM role/profile: " + str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout))
append_result(str({"error": "Unable to remove IAM role/profile",
"error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
traceback.print_exc(file=sys.stdout)
def remove_all_iam_resources(instance_type, scientist=''):
try:
client = boto3.client('iam')
service_base_name = os.environ['conf_service_base_name']
roles_list = []
for item in client.list_roles(MaxItems=250).get("Roles"):
if item.get("RoleName").startswith(service_base_name + '-'):
roles_list.append(item.get('RoleName'))
if roles_list:
roles_list.sort(reverse=True)
for iam_role in roles_list:
if '-ssn-Role' in iam_role and instance_type == 'ssn' or instance_type == 'all':
try:
client.delete_role_policy(RoleName=iam_role, PolicyName=service_base_name + '-ssn-Policy')
except:
print('There is no policy {}-ssn-Policy to delete'.format(service_base_name))
role_profiles = client.list_instance_profiles_for_role(RoleName=iam_role).get('InstanceProfiles')
if role_profiles:
for i in role_profiles:
role_profile_name = i.get('InstanceProfileName')
if role_profile_name == service_base_name + '-ssn-Profile':
remove_roles_and_profiles(iam_role, role_profile_name)
else:
print("There is no instance profile for {}".format(iam_role))
client.delete_role(RoleName=iam_role)
print("The IAM role {} has been deleted successfully".format(iam_role))
if '-edge-Role' in iam_role:
if instance_type == 'edge' and scientist in iam_role:
remove_detach_iam_policies(iam_role, 'delete')
role_profile_name = os.environ['conf_service_base_name'] + '-' + '{}'.format(scientist) + '-edge-Profile'
try:
client.get_instance_profile(InstanceProfileName=role_profile_name)
remove_roles_and_profiles(iam_role, role_profile_name)
except:
print("There is no instance profile for {}".format(iam_role))
client.delete_role(RoleName=iam_role)
print("The IAM role {} has been deleted successfully".format(iam_role))
if instance_type == 'all':
remove_detach_iam_policies(iam_role, 'delete')
role_profile_name = client.list_instance_profiles_for_role(RoleName=iam_role).get('InstanceProfiles')
if role_profile_name:
for i in role_profile_name:
role_profile_name = i.get('InstanceProfileName')
remove_roles_and_profiles(iam_role, role_profile_name)
else:
print("There is no instance profile for {}".format(iam_role))
client.delete_role(RoleName=iam_role)
print("The IAM role {} has been deleted successfully".format(iam_role))
if '-nb-de-Role' in iam_role:
if instance_type == 'notebook' and scientist in iam_role:
remove_detach_iam_policies(iam_role)
role_profile_name = os.environ['conf_service_base_name'] + '-' + "{}".format(scientist) + '-nb-de-Profile'
try:
client.get_instance_profile(InstanceProfileName=role_profile_name)
remove_roles_and_profiles(iam_role, role_profile_name)
except:
print("There is no instance profile for {}".format(iam_role))
client.delete_role(RoleName=iam_role)
print("The IAM role {} has been deleted successfully".format(iam_role))
if instance_type == 'all':
remove_detach_iam_policies(iam_role)
role_profile_name = client.list_instance_profiles_for_role(RoleName=iam_role).get('InstanceProfiles')
if role_profile_name:
for i in role_profile_name:
role_profile_name = i.get('InstanceProfileName')
remove_roles_and_profiles(iam_role, role_profile_name)
else:
print("There is no instance profile for {}".format(iam_role))
client.delete_role(RoleName=iam_role)
print("The IAM role {} has been deleted successfully".format(iam_role))
else:
print("There are no IAM roles to delete. Checking instance profiles...")
profile_list = []
for item in client.list_instance_profiles(MaxItems=250).get("InstanceProfiles"):
if item.get("InstanceProfileName").startswith(service_base_name + '-'):
profile_list.append(item.get('InstanceProfileName'))
if profile_list:
for instance_profile in profile_list:
if '-ssn-Profile' in instance_profile and instance_type == 'ssn' or instance_type == 'all':
client.delete_instance_profile(InstanceProfileName=instance_profile)
print("The instance profile {} has been deleted successfully".format(instance_profile))
if '-edge-Profile' in instance_profile:
if instance_type == 'edge' and scientist in instance_profile:
client.delete_instance_profile(InstanceProfileName=instance_profile)
print("The instance profile {} has been deleted successfully".format(instance_profile))
if instance_type == 'all':
client.delete_instance_profile(InstanceProfileName=instance_profile)
print("The instance profile {} has been deleted successfully".format(instance_profile))
if '-nb-de-Profile' in instance_profile:
if instance_type == 'notebook' and scientist in instance_profile:
client.delete_instance_profile(InstanceProfileName=instance_profile)
print("The instance profile {} has been deleted successfully".format(instance_profile))
if instance_type == 'all':
client.delete_instance_profile(InstanceProfileName=instance_profile)
print("The instance profile {} has been deleted successfully".format(instance_profile))
else:
print("There are no instance profiles to delete")
except Exception as err:
logging.info("Unable to remove some of the IAM resources: " + str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout))
append_result(str({"error": "Unable to remove some of the IAM resources", "error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
traceback.print_exc(file=sys.stdout)
def s3_cleanup(bucket, cluster_name, user_name):
s3_res = boto3.resource('s3', config=Config(signature_version='s3v4'))
client = boto3.client('s3', config=Config(signature_version='s3v4'), region_name=os.environ['aws_region'])
try:
client.head_bucket(Bucket=bucket)
except:
print("There is no bucket {} or you do not permission to access it".format(bucket))
sys.exit(0)
try:
resource = s3_res.Bucket(bucket)
prefix = user_name + '/' + cluster_name + "/"
for i in resource.objects.filter(Prefix=prefix):
s3_res.Object(resource.name, i.key).delete()
except Exception as err:
logging.info("Unable to clean S3 bucket: " + str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout))
append_result(str({"error": "Unable to clean S3 bucket", "error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
traceback.print_exc(file=sys.stdout)
def remove_s3(bucket_type='all', scientist=''):
try:
client = boto3.client('s3', config=Config(signature_version='s3v4'), region_name=os.environ['aws_region'])
s3 = boto3.resource('s3')
bucket_list = []
if bucket_type == 'ssn':
bucket_name = (os.environ['conf_service_base_name'] + '-ssn-bucket').lower().replace('_', '-')
bucket_list.append((os.environ['conf_service_base_name'] + '-shared-bucket').lower().replace('_', '-'))
elif bucket_type == 'edge':
bucket_name = (os.environ['conf_service_base_name'] + '-' + "{}".format(scientist) + '-bucket').lower().replace('_', '-')
else:
bucket_name = (os.environ['conf_service_base_name']).lower().replace('_', '-')
for item in client.list_buckets().get('Buckets'):
if bucket_name in item.get('Name'):
for i in client.get_bucket_tagging(Bucket=item.get('Name')).get('TagSet'):
i.get('Key')
if i.get('Key') == os.environ['conf_service_base_name'] + '-Tag':
bucket_list.append(item.get('Name'))
for s3bucket in bucket_list:
if s3bucket:
bucket = s3.Bucket(s3bucket)
bucket.objects.all().delete()
print("The S3 bucket {} has been cleaned".format(s3bucket))
client.delete_bucket(Bucket=s3bucket)
print("The S3 bucket {} has been deleted successfully".format(s3bucket))
else:
print("There are no buckets to delete")
except Exception as err:
logging.info("Unable to remove S3 bucket: " + str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout))
append_result(str({"error": "Unable to remove S3 bucket", "error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
traceback.print_exc(file=sys.stdout)
def remove_subnets(tag_value):
try:
ec2 = boto3.resource('ec2')
client = boto3.client('ec2')
tag_name = os.environ['conf_service_base_name'] + '-Tag'
subnets = ec2.subnets.filter(
Filters=[{'Name': 'tag:{}'.format(tag_name), 'Values': [tag_value]}])
if subnets:
for subnet in subnets:
client.delete_subnet(SubnetId=subnet.id)
print("The subnet {} has been deleted successfully".format(subnet.id))
else:
print("There are no private subnets to delete")
except Exception as err:
logging.info("Unable to remove subnet: " + str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout))
append_result(str({"error": "Unable to remove subnet", "error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
traceback.print_exc(file=sys.stdout)
def remove_sgroups(tag_value):
try:
ec2 = boto3.resource('ec2')
client = boto3.client('ec2')
tag_name = os.environ['conf_service_base_name']
sgs = ec2.security_groups.filter(
Filters=[{'Name': 'tag:{}'.format(tag_name), 'Values': [tag_value]}])
if sgs:
for sg in sgs:
client.delete_security_group(GroupId=sg.id)
print("The security group {} has been deleted successfully".format(sg.id))
else:
print("There are no security groups to delete")
except Exception as err:
logging.info("Unable to remove SG: " + str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout))
append_result(str({"error": "Unable to remove SG", "error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
traceback.print_exc(file=sys.stdout)
def add_inbound_sg_rule(sg_id, rule):
try:
client = boto3.client('ec2')
client.authorize_security_group_ingress(
GroupId=sg_id,
IpPermissions=[rule]
)
except Exception as err:
if err.response['Error']['Code'] == 'InvalidPermission.Duplicate':
print("The following inbound rule is already exist:")
print(str(rule))
else:
logging.info("Unable to add inbound rule to SG: " + str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout))
append_result(str({"error": "Unable to add inbound rule to SG", "error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
traceback.print_exc(file=sys.stdout)
def add_outbound_sg_rule(sg_id, rule):
try:
client = boto3.client('ec2')
client.authorize_security_group_egress(
GroupId=sg_id,
IpPermissions=[rule]
)
except Exception as err:
if err.response['Error']['Code'] == 'InvalidPermission.Duplicate':
print("The following outbound rule is already exist:")
print(str(rule))
else:
logging.info("Unable to add outbound rule to SG: " + str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout))
append_result(str({"error": "Unable to add outbound rule to SG", "error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
traceback.print_exc(file=sys.stdout)
def deregister_image(image_name='*'):
try:
resource = boto3.resource('ec2')
client = boto3.client('ec2')
for image in resource.images.filter(
Filters=[{'Name': 'name', 'Values': ['{}-*'.format(os.environ['conf_service_base_name'])]},
{'Name': 'tag-value', 'Values': [os.environ['conf_service_base_name']]},
{'Name': 'tag-value', 'Values': [image_name]}]):
client.deregister_image(ImageId=image.id)
for device in image.block_device_mappings:
if device.get('Ebs'):
client.delete_snapshot(SnapshotId=device.get('Ebs').get('SnapshotId'))
print("Notebook AMI {} has been deregistered successfully".format(image.id))
except Exception as err:
logging.info("Unable to de-register image: " + str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout))
append_result(str({"error": "Unable to de-register image", "error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
traceback.print_exc(file=sys.stdout)
def terminate_emr(id):
try:
emr = boto3.client('emr')
emr.terminate_job_flows(
JobFlowIds=[id]
)
waiter = emr.get_waiter('cluster_terminated')
waiter.wait(ClusterId=id)
except Exception as err:
logging.info("Unable to remove EMR: " + str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout))
append_result(str({"error": "Unable to remove EMR", "error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
traceback.print_exc(file=sys.stdout)
def remove_kernels(emr_name, tag_name, nb_tag_value, ssh_user, key_path, emr_version):
try:
ec2 = boto3.resource('ec2')
inst = ec2.instances.filter(
Filters=[{'Name': 'instance-state-name', 'Values': ['running']},
{'Name': 'tag:{}'.format(tag_name), 'Values': ['{}'.format(nb_tag_value)]}])
instances = list(inst)
if instances:
for instance in instances:
private = getattr(instance, 'private_dns_name')
env.hosts = "{}".format(private)
env.user = "{}".format(ssh_user)
env.key_filename = "{}".format(key_path)
env.host_string = env.user + "@" + env.hosts
sudo('rm -rf /home/{}/.local/share/jupyter/kernels/*_{}'.format(ssh_user, emr_name))
if exists('/home/{}/.ensure_dir/dataengine-service_{}_interpreter_ensured'.format(ssh_user, emr_name)):
if os.environ['notebook_multiple_clusters'] == 'true':
try:
livy_port = sudo("cat /opt/" + emr_version + "/" + emr_name
+ "/livy/conf/livy.conf | grep livy.server.port | tail -n 1 | awk '{printf $3}'")
process_number = sudo("netstat -natp 2>/dev/null | grep ':" + livy_port +
"' | awk '{print $7}' | sed 's|/.*||g'")
sudo('kill -9 ' + process_number)
sudo('systemctl disable livy-server-' + livy_port)
except:
print("Wasn't able to find Livy server for this EMR!")
sudo('sed -i \"s/^export SPARK_HOME.*/export SPARK_HOME=\/opt\/spark/\" /opt/zeppelin/conf/zeppelin-env.sh')
sudo("rm -rf /home/{}/.ensure_dir/dataengine-service_interpreter_ensure".format(ssh_user))
zeppelin_url = 'http://' + private + ':8080/api/interpreter/setting/'
opener = urllib2.build_opener(urllib2.ProxyHandler({}))
req = opener.open(urllib2.Request(zeppelin_url))
r_text = req.read()
interpreter_json = json.loads(r_text)
interpreter_prefix = emr_name
for interpreter in interpreter_json['body']:
if interpreter_prefix in interpreter['name']:
print("Interpreter with ID: {0} and name: {1} will be removed from zeppelin!".
format(interpreter['id'], interpreter['name']))
request = urllib2.Request(zeppelin_url + interpreter['id'], data='')
request.get_method = lambda: 'DELETE'
url = opener.open(request)
print(url.read())
sudo('chown ' + ssh_user + ':' + ssh_user + ' -R /opt/zeppelin/')
sudo('systemctl daemon-reload')
sudo("service zeppelin-notebook stop")
sudo("service zeppelin-notebook start")
zeppelin_restarted = False
while not zeppelin_restarted:
sudo('sleep 5')
result = sudo('nmap -p 8080 localhost | grep "closed" > /dev/null; echo $?')
result = result[:1]
if result == '1':
zeppelin_restarted = True
sudo('sleep 5')
sudo('rm -rf /home/{}/.ensure_dir/dataengine-service_{}_interpreter_ensured'.format(ssh_user, emr_name))
if exists('/home/{}/.ensure_dir/rstudio_dataengine-service_ensured'.format(ssh_user)):
dlab.fab.remove_rstudio_dataengines_kernel(emr_name, ssh_user)
sudo('rm -rf /opt/' + emr_version + '/' + emr_name + '/')
print("Notebook's {} kernels were removed".format(env.hosts))
else:
print("There are no notebooks to clean kernels.")
except Exception as err:
logging.info("Unable to remove kernels on Notebook: " + str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout))
append_result(str({"error": "Unable to remove kernels on Notebook", "error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
traceback.print_exc(file=sys.stdout)
def remove_route_tables(tag_name, ssn=False):
try:
client = boto3.client('ec2')
rtables = client.describe_route_tables(Filters=[{'Name': 'tag-key', 'Values': [tag_name]}]).get('RouteTables')
for rtable in rtables:
if rtable:
rtable_associations = rtable.get('Associations')
rtable = rtable.get('RouteTableId')
if ssn:
for association in rtable_associations:
client.disassociate_route_table(AssociationId=association.get('RouteTableAssociationId'))
print("Association {} has been removed".format(association.get('RouteTableAssociationId')))
client.delete_route_table(RouteTableId=rtable)
print("Route table {} has been removed".format(rtable))
else:
print("There are no route tables to remove")
except Exception as err:
logging.info("Unable to remove route table: " + str(err) + "\n Traceback: " + traceback.print_exc(
file=sys.stdout))
append_result(str({"error": "Unable to remove route table",
"error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
traceback.print_exc(file=sys.stdout)
def remove_internet_gateways(vpc_id, tag_name, tag_value):
try:
ig_id = ''
client = boto3.client('ec2')
response = client.describe_internet_gateways(
Filters=[
{'Name': 'tag-key', 'Values': [tag_name]},
{'Name': 'tag-value', 'Values': [tag_value]}]).get('InternetGateways')
for i in response:
ig_id = i.get('InternetGatewayId')
client.detach_internet_gateway(InternetGatewayId=ig_id,VpcId=vpc_id)
print("Internet gateway {0} has been detached from VPC {1}".format(ig_id, vpc_id.format))
client.delete_internet_gateway(InternetGatewayId=ig_id)
print("Internet gateway {} has been deleted successfully".format(ig_id))
except Exception as err:
logging.info("Unable to remove internet gateway: " + str(err) + "\n Traceback: " + traceback.print_exc(
file=sys.stdout))
append_result(str({"error": "Unable to remove internet gateway",
"error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
traceback.print_exc(file=sys.stdout)
def remove_vpc_endpoints(vpc_id):
try:
client = boto3.client('ec2')
response = client.describe_vpc_endpoints(Filters=[{'Name': 'vpc-id', 'Values': [vpc_id]}]).get('VpcEndpoints')
for i in response:
client.delete_vpc_endpoints(VpcEndpointIds=[i.get('VpcEndpointId')])
print("VPC Endpoint {} has been removed successfully".format(i.get('VpcEndpointId')))
except Exception as err:
logging.info("Unable to remove VPC Endpoint: " + str(err) + "\n Traceback: " + traceback.print_exc(
file=sys.stdout))
append_result(str({"error": "Unable to remove VPC Endpoint",
"error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
traceback.print_exc(file=sys.stdout)
def create_image_from_instance(tag_name='', instance_name='', image_name='', tags=''):
try:
ec2 = boto3.resource('ec2')
instances = ec2.instances.filter(
Filters=[{'Name': 'tag:{}'.format(tag_name), 'Values': [instance_name]},
{'Name': 'instance-state-name', 'Values': ['running']}])
for instance in instances:
image = instance.create_image(Name=image_name,
Description='Automatically created image for notebook server',
NoReboot=False)
image.load()
while image.state != 'available':
local("echo Waiting for image creation; sleep 20")
image.load()
tag = {'Key': 'Name', 'Value': os.environ['conf_service_base_name']}
create_tag(image.id, tag)
if tags:
all_tags = json.loads(tags)
for key in all_tags.keys():
tag = {'Key': key, 'Value': all_tags[key]}
create_tag(image.id, tag)
return image.id
return ''
except botocore.exceptions.ClientError as err:
if err.response['Error']['Code'] == 'InvalidAMIName.Duplicate':
print("Image is already created.")
else:
logging.info("Unable to create image: " + str(err) + "\n Traceback: " + traceback.print_exc(
file=sys.stdout))
append_result(str({"error": "Unable to create image",
"error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
traceback.print_exc(file=sys.stdout)
def install_emr_spark(args):
s3_client = boto3.client('s3', config=Config(signature_version='s3v4'), region_name=args.region)
s3_client.download_file(args.bucket, args.user_name + '/' + args.cluster_name + '/spark.tar.gz',
'/tmp/spark.tar.gz')
s3_client.download_file(args.bucket, args.user_name + '/' + args.cluster_name + '/spark-checksum.chk',
'/tmp/spark-checksum.chk')
if 'WARNING' in local('md5sum -c /tmp/spark-checksum.chk', capture=True):
local('rm -f /tmp/spark.tar.gz')
s3_client.download_file(args.bucket, args.user_name + '/' + args.cluster_name + '/spark.tar.gz',
'/tmp/spark.tar.gz')
if 'WARNING' in local('md5sum -c /tmp/spark-checksum.chk', capture=True):
print("The checksum of spark.tar.gz is mismatched. It could be caused by aws network issue.")
sys.exit(1)
local('sudo tar -zhxvf /tmp/spark.tar.gz -C /opt/' + args.emr_version + '/' + args.cluster_name + '/')
def jars(args, emr_dir):
print("Downloading jars...")
s3_client = boto3.client('s3', config=Config(signature_version='s3v4'), region_name=args.region)
s3_client.download_file(args.bucket, 'jars/' + args.emr_version + '/jars.tar.gz', '/tmp/jars.tar.gz')
s3_client.download_file(args.bucket, 'jars/' + args.emr_version + '/jars-checksum.chk', '/tmp/jars-checksum.chk')
if 'WARNING' in local('md5sum -c /tmp/jars-checksum.chk', capture=True):
local('rm -f /tmp/jars.tar.gz')
s3_client.download_file(args.bucket, 'jars/' + args.emr_version + '/jars.tar.gz', '/tmp/jars.tar.gz')
if 'WARNING' in local('md5sum -c /tmp/jars-checksum.chk', capture=True):
print("The checksum of jars.tar.gz is mismatched. It could be caused by aws network issue.")
sys.exit(1)
local('tar -zhxvf /tmp/jars.tar.gz -C ' + emr_dir)
def yarn(args, yarn_dir):
print("Downloading yarn configuration...")
if args.region == 'cn-north-1':
s3client = boto3.client('s3', config=Config(signature_version='s3v4'),
endpoint_url='https://s3.cn-north-1.amazonaws.com.cn', region_name=args.region)
s3resource = boto3.resource('s3', config=Config(signature_version='s3v4'),
endpoint_url='https://s3.cn-north-1.amazonaws.com.cn', region_name=args.region)
else:
s3client = boto3.client('s3', config=Config(signature_version='s3v4'), region_name=args.region)
s3resource = boto3.resource('s3', config=Config(signature_version='s3v4'))
get_files(s3client, s3resource, args.user_name + '/' + args.cluster_name + '/config/', args.bucket, yarn_dir)
local('sudo mv ' + yarn_dir + args.user_name + '/' + args.cluster_name + '/config/* ' + yarn_dir)
local('sudo rm -rf ' + yarn_dir + args.user_name + '/')
def get_files(s3client, s3resource, dist, bucket, local):
s3list = s3client.get_paginator('list_objects')
for result in s3list.paginate(Bucket=bucket, Delimiter='/', Prefix=dist):
if result.get('CommonPrefixes') is not None:
for subdir in result.get('CommonPrefixes'):
get_files(s3client, s3resource, subdir.get('Prefix'), bucket, local)
if result.get('Contents') is not None:
for file in result.get('Contents'):
if not os.path.exists(os.path.dirname(local + os.sep + file.get('Key'))):
os.makedirs(os.path.dirname(local + os.sep + file.get('Key')))
s3resource.meta.client.download_file(bucket, file.get('Key'), local + os.sep + file.get('Key'))
def get_cluster_python_version(region, bucket, user_name, cluster_name):
s3_client = boto3.client('s3', config=Config(signature_version='s3v4'), region_name=region)
s3_client.download_file(bucket, user_name + '/' + cluster_name + '/python_version', '/tmp/python_version')
def get_gitlab_cert(bucket, certfile):
try:
s3 = boto3.resource('s3')
s3.Bucket(bucket).download_file(certfile, certfile)
return True
except botocore.exceptions.ClientError as err:
if err.response['Error']['Code'] == "404":
print("The object does not exist.")
return False
def create_aws_config_files(generate_full_config=False):
try:
aws_user_dir = os.environ['AWS_DIR']
logging.info(local("rm -rf " + aws_user_dir+" 2>&1", capture=True))
logging.info(local("mkdir -p " + aws_user_dir+" 2>&1", capture=True))
with open(aws_user_dir + '/config', 'w') as aws_file:
aws_file.write("[default]\n")
aws_file.write("region = {}\n".format(os.environ['aws_region']))
if generate_full_config:
with open(aws_user_dir + '/credentials', 'w') as aws_file:
aws_file.write("[default]\n")
aws_file.write("aws_access_key_id = {}\n".format(os.environ['aws_access_key']))
aws_file.write("aws_secret_access_key = {}\n".format(os.environ['aws_secret_access_key']))
logging.info(local("chmod 600 " + aws_user_dir + "/*"+" 2>&1", capture=True))
logging.info(local("chmod 550 " + aws_user_dir+" 2>&1", capture=True))
return True
except:
sys.exit(1)
def installing_python(region, bucket, user_name, cluster_name, application='', pip_mirror='', numpy_version='1.14.3'):
get_cluster_python_version(region, bucket, user_name, cluster_name)
with file('/tmp/python_version') as f:
python_version = f.read()
python_version = python_version[0:5]
if not os.path.exists('/opt/python/python' + python_version):
local('wget https://www.python.org/ftp/python/' + python_version +
'/Python-' + python_version + '.tgz -O /tmp/Python-' + python_version + '.tgz' )
local('tar zxvf /tmp/Python-' + python_version + '.tgz -C /tmp/')
with lcd('/tmp/Python-' + python_version):
local('./configure --prefix=/opt/python/python' + python_version +
' --with-zlib-dir=/usr/local/lib/ --with-ensurepip=install')
local('sudo make altinstall')
with lcd('/tmp/'):
local('sudo rm -rf Python-' + python_version + '/')
if region == 'cn-north-1':
local('sudo -i /opt/python/python{}/bin/python{} -m pip install -U pip=={} --no-cache-dir'.format(
python_version, python_version[0:3], os.environ['conf_pip_version']))
local('sudo mv /etc/pip.conf /etc/back_pip.conf')
local('sudo touch /etc/pip.conf')
local('sudo echo "[global]" >> /etc/pip.conf')
local('sudo echo "timeout = 600" >> /etc/pip.conf')
local('sudo -i virtualenv /opt/python/python' + python_version)
venv_command = '/bin/bash /opt/python/python' + python_version + '/bin/activate'
pip_command = '/opt/python/python' + python_version + '/bin/pip' + python_version[:3]
if region == 'cn-north-1':
try:
local(venv_command + ' && sudo -i ' + pip_command +
' install -i https://{0}/simple --trusted-host {0} --timeout 60000 -U pip==9.0.3 --no-cache-dir'.format(pip_mirror))
local(venv_command + ' && sudo -i ' + pip_command + ' install pyzmq==17.0.0')
local(venv_command + ' && sudo -i ' + pip_command +
' install -i https://{0}/simple --trusted-host {0} --timeout 60000 ipython ipykernel --no-cache-dir'.
format(pip_mirror))
local(venv_command + ' && sudo -i ' + pip_command +
' install -i https://{0}/simple --trusted-host {0} --timeout 60000 boto boto3 NumPy=={1} SciPy Matplotlib==2.0.2 pandas Sympy Pillow sklearn --no-cache-dir'.
format(pip_mirror, numpy_version))
# Need to refactor when we add GPU cluster
if application == 'deeplearning':
local(venv_command + ' && sudo -i ' + pip_command +
' install -i https://{0}/simple --trusted-host {0} --timeout 60000 mxnet-cu80 opencv-python keras Theano --no-cache-dir'.format(pip_mirror))
python_without_dots = python_version.replace('.', '')
local(venv_command + ' && sudo -i ' + pip_command +
' install https://cntk.ai/PythonWheel/GPU/cntk-2.0rc3-cp{0}-cp{0}m-linux_x86_64.whl --no-cache-dir'.
format(python_without_dots[:2]))
local('sudo rm /etc/pip.conf')
local('sudo mv /etc/back_pip.conf /etc/pip.conf')
except:
local('sudo rm /etc/pip.conf')
local('sudo mv /etc/back_pip.conf /etc/pip.conf')
local('sudo rm -rf /opt/python/python{}/'.format(python_version))
sys.exit(1)
else:
local(venv_command + ' && sudo -i ' + pip_command + ' install -U pip==9.0.3 --no-cache-dir')
local(venv_command + ' && sudo -i ' + pip_command + ' install pyzmq==17.0.0')
local(venv_command + ' && sudo -i ' + pip_command + ' install ipython ipykernel --no-cache-dir')
local(venv_command + ' && sudo -i ' + pip_command +
' install boto boto3 NumPy=={} SciPy Matplotlib==2.0.2 pandas Sympy Pillow sklearn --no-cache-dir'.format(numpy_version))
# Need to refactor when we add GPU cluster
if application == 'deeplearning':
local(venv_command + ' && sudo -i ' + pip_command +
' install mxnet-cu80 opencv-python keras Theano --no-cache-dir')
python_without_dots = python_version.replace('.', '')
local(venv_command + ' && sudo -i ' + pip_command +
' install https://cntk.ai/PythonWheel/GPU/cntk-2.0rc3-cp{0}-cp{0}m-linux_x86_64.whl --no-cache-dir'.
format(python_without_dots[:2]))
local('sudo rm -rf /usr/bin/python' + python_version[0:3])
local('sudo ln -fs /opt/python/python' + python_version + '/bin/python' + python_version[0:3] +
' /usr/bin/python' + python_version[0:3])
def spark_defaults(args):
spark_def_path = '/opt/' + args.emr_version + '/' + args.cluster_name + '/spark/conf/spark-defaults.conf'
for i in eval(args.excluded_lines):
local(""" sudo bash -c " sed -i '/""" + i + """/d' """ + spark_def_path + """ " """)
local(""" sudo bash -c " sed -i '/#/d' """ + spark_def_path + """ " """)
local(""" sudo bash -c " sed -i '/^\s*$/d' """ + spark_def_path + """ " """)
local(""" sudo bash -c "sed -i '/spark.driver.extraClassPath/,/spark.driver.extraLibraryPath/s|/usr|/opt/DATAENGINE-SERVICE_VERSION/jars/usr|g' """ + spark_def_path + """ " """)
local(""" sudo bash -c "sed -i '/spark.yarn.dist.files/s/\/etc\/spark\/conf/\/opt\/DATAENGINE-SERVICE_VERSION\/CLUSTER\/conf/g' """
+ spark_def_path + """ " """)
template_file = spark_def_path
with open(template_file, 'r') as f:
text = f.read()
text = text.replace('DATAENGINE-SERVICE_VERSION', args.emr_version)
text = text.replace('CLUSTER', args.cluster_name)
with open(spark_def_path, 'w') as f:
f.write(text)
if args.region == 'us-east-1':
endpoint_url = 'https://s3.amazonaws.com'
elif args.region == 'cn-north-1':
endpoint_url = "https://s3.{}.amazonaws.com.cn".format(args.region)
else:
endpoint_url = 'https://s3-' + args.region + '.amazonaws.com'
local("""bash -c 'echo "spark.hadoop.fs.s3a.endpoint """ + endpoint_url + """" >> """ + spark_def_path + """'""")
local('echo "spark.hadoop.fs.s3a.server-side-encryption-algorithm AES256" >> {}'.format(spark_def_path))
def ensure_local_jars(os_user, jars_dir):
if not exists('/home/{}/.ensure_dir/local_jars_ensured'.format(os_user)):
try:
sudo('mkdir -p ' + jars_dir)
sudo('wget http://central.maven.org/maven2/org/apache/hadoop/hadoop-aws/2.7.4/hadoop-aws-2.7.4.jar -O ' +
jars_dir + 'hadoop-aws-2.7.4.jar')
sudo('wget http://central.maven.org/maven2/com/amazonaws/aws-java-sdk/1.7.4/aws-java-sdk-1.7.4.jar -O ' +
jars_dir + 'aws-java-sdk-1.7.4.jar')
sudo('wget http://maven.twttr.com/com/hadoop/gplcompression/hadoop-lzo/0.4.20/hadoop-lzo-0.4.20.jar -O ' +
jars_dir + 'hadoop-lzo-0.4.20.jar')
sudo('touch /home/{}/.ensure_dir/local_jars_ensured'.format(os_user))
except:
sys.exit(1)
def configure_local_spark(os_user, jars_dir, region, templates_dir, memory_type='driver'):
if not exists('/home/{}/.ensure_dir/local_spark_configured'.format(os_user)):
try:
if region == 'us-east-1':
endpoint_url = 'https://s3.amazonaws.com'
elif region == 'cn-north-1':
endpoint_url = "https://s3.{}.amazonaws.com.cn".format(region)
else:
endpoint_url = 'https://s3-' + region + '.amazonaws.com'
put(templates_dir + 'notebook_spark-defaults_local.conf', '/tmp/notebook_spark-defaults_local.conf')
sudo('echo "spark.hadoop.fs.s3a.endpoint {}" >> /tmp/notebook_spark-defaults_local.conf'.format(endpoint_url))
sudo('echo "spark.hadoop.fs.s3a.server-side-encryption-algorithm AES256" >> /tmp/notebook_spark-defaults_local.conf')
if os.environ['application'] == 'zeppelin':
sudo('echo \"spark.jars $(ls -1 ' + jars_dir + '* | tr \'\\n\' \',\')\" >> /tmp/notebook_spark-defaults_local.conf')
sudo('\cp /tmp/notebook_spark-defaults_local.conf /opt/spark/conf/spark-defaults.conf')
sudo('touch /home/{}/.ensure_dir/local_spark_configured'.format(os_user))
except:
sys.exit(1)
try:
if memory_type == 'driver':
spark_memory = dlab.fab.get_spark_memory()
sudo('sed -i "/spark.*.memory/d" /opt/spark/conf/spark-defaults.conf')
sudo('echo "spark.{0}.memory {1}m" >> /opt/spark/conf/spark-defaults.conf'.format(memory_type, spark_memory))
except:
sys.exit(1)
def configure_zeppelin_emr_interpreter(emr_version, cluster_name, region, spark_dir, os_user, yarn_dir, bucket,
user_name, endpoint_url, multiple_emrs):
try:
port_number_found = False
zeppelin_restarted = False
default_port = 8998
get_cluster_python_version(region, bucket, user_name, cluster_name)
with file('/tmp/python_version') as f:
python_version = f.read()
python_version = python_version[0:5]
livy_port = ''
livy_path = '/opt/' + emr_version + '/' + cluster_name + '/livy/'
spark_libs = "/opt/" + emr_version + "/jars/usr/share/aws/aws-java-sdk/aws-java-sdk-core*.jar /opt/" + \
emr_version + "/jars/usr/lib/hadoop/hadoop-aws*.jar /opt/" + emr_version + \
"/jars/usr/share/aws/aws-java-sdk/aws-java-sdk-s3-*.jar /opt/" + emr_version + \
"/jars/usr/lib/hadoop-lzo/lib/hadoop-lzo-*.jar"
local('echo \"Configuring emr path for Zeppelin\"')
local('sed -i \"s/^export SPARK_HOME.*/export SPARK_HOME=\/opt\/' + emr_version + '\/' +
cluster_name + '\/spark/\" /opt/zeppelin/conf/zeppelin-env.sh')
local('sed -i \"s/^export HADOOP_CONF_DIR.*/export HADOOP_CONF_DIR=\/opt\/' + emr_version + '\/' +
cluster_name + '\/conf/\" /opt/' + emr_version + '/' + cluster_name +
'/spark/conf/spark-env.sh')
local('echo \"spark.jars $(ls ' + spark_libs + ' | tr \'\\n\' \',\')\" >> /opt/' + emr_version + '/' +
cluster_name + '/spark/conf/spark-defaults.conf')
local('sed -i "/spark.executorEnv.PYTHONPATH/d" /opt/' + emr_version + '/' + cluster_name +
'/spark/conf/spark-defaults.conf')
local('sed -i "/spark.yarn.dist.files/d" /opt/' + emr_version + '/' + cluster_name +
'/spark/conf/spark-defaults.conf')
local('sudo chown ' + os_user + ':' + os_user + ' -R /opt/zeppelin/')
local('sudo systemctl daemon-reload')
local('sudo service zeppelin-notebook stop')
local('sudo service zeppelin-notebook start')
while not zeppelin_restarted:
local('sleep 5')
result = local('sudo bash -c "nmap -p 8080 localhost | grep closed > /dev/null" ; echo $?', capture=True)
result = result[:1]
if result == '1':
zeppelin_restarted = True
local('sleep 5')
local('echo \"Configuring emr spark interpreter for Zeppelin\"')
if multiple_emrs == 'true':
while not port_number_found:
port_free = local('sudo bash -c "nmap -p ' + str(default_port) +
' localhost | grep closed > /dev/null" ; echo $?', capture=True)
port_free = port_free[:1]
if port_free == '0':
livy_port = default_port
port_number_found = True
else:
default_port += 1
local('sudo echo "livy.server.port = ' + str(livy_port) + '" >> ' + livy_path + 'conf/livy.conf')
local('sudo echo "livy.spark.master = yarn" >> ' + livy_path + 'conf/livy.conf')
if os.path.exists(livy_path + 'conf/spark-blacklist.conf'):
local('sudo sed -i "s/^/#/g" ' + livy_path + 'conf/spark-blacklist.conf')
local(''' sudo echo "export SPARK_HOME=''' + spark_dir + '''" >> ''' + livy_path + '''conf/livy-env.sh''')
local(''' sudo echo "export HADOOP_CONF_DIR=''' + yarn_dir + '''" >> ''' + livy_path +
'''conf/livy-env.sh''')
local(''' sudo echo "export PYSPARK3_PYTHON=python''' + python_version[0:3] + '''" >> ''' +
livy_path + '''conf/livy-env.sh''')
template_file = "/tmp/dataengine-service_interpreter.json"
fr = open(template_file, 'r+')
text = fr.read()
text = text.replace('CLUSTER_NAME', cluster_name)
text = text.replace('SPARK_HOME', spark_dir)
text = text.replace('ENDPOINTURL', endpoint_url)
text = text.replace('LIVY_PORT', str(livy_port))
fw = open(template_file, 'w')
fw.write(text)
fw.close()
for _ in range(5):
try:
local("curl --noproxy localhost -H 'Content-Type: application/json' -X POST -d " +
"@/tmp/dataengine-service_interpreter.json http://localhost:8080/api/interpreter/setting")
break
except:
local('sleep 5')
local('sudo cp /opt/livy-server-cluster.service /etc/systemd/system/livy-server-' + str(livy_port) +
'.service')
local("sudo sed -i 's|OS_USER|" + os_user + "|' /etc/systemd/system/livy-server-" + str(livy_port) +
'.service')
local("sudo sed -i 's|LIVY_PATH|" + livy_path + "|' /etc/systemd/system/livy-server-" + str(livy_port)
+ '.service')
local('sudo chmod 644 /etc/systemd/system/livy-server-' + str(livy_port) + '.service')
local("sudo systemctl daemon-reload")
local("sudo systemctl enable livy-server-" + str(livy_port))
local('sudo systemctl start livy-server-' + str(livy_port))
else:
template_file = "/tmp/dataengine-service_interpreter.json"
p_versions = ["2", python_version[:3]]
for p_version in p_versions:
fr = open(template_file, 'r+')
text = fr.read()
text = text.replace('CLUSTERNAME', cluster_name)
text = text.replace('PYTHONVERSION', p_version)
text = text.replace('SPARK_HOME', spark_dir)
text = text.replace('PYTHONVER_SHORT', p_version[:1])
text = text.replace('ENDPOINTURL', endpoint_url)
text = text.replace('DATAENGINE-SERVICE_VERSION', emr_version)
tmp_file = "/tmp/emr_spark_py" + p_version + "_interpreter.json"
fw = open(tmp_file, 'w')
fw.write(text)
fw.close()
for _ in range(5):
try:
local("curl --noproxy localhost -H 'Content-Type: application/json' -X POST -d " +
"@/tmp/emr_spark_py" + p_version +
"_interpreter.json http://localhost:8080/api/interpreter/setting")
break
except:
local('sleep 5')
local('touch /home/' + os_user + '/.ensure_dir/dataengine-service_' + cluster_name + '_interpreter_ensured')
except:
sys.exit(1)
def configure_dataengine_spark(cluster_name, jars_dir, cluster_dir, region, datalake_enabled):
local("jar_list=`find {0} -name '*.jar' | tr '\\n' ','` ; echo \"spark.jars $jar_list\" >> \
/tmp/{1}/notebook_spark-defaults_local.conf".format(jars_dir, cluster_name))
if region == 'us-east-1':
endpoint_url = 'https://s3.amazonaws.com'
elif region == 'cn-north-1':
endpoint_url = "https://s3.{}.amazonaws.com.cn".format(region)
else:
endpoint_url = 'https://s3-' + region + '.amazonaws.com'
local("""bash -c 'echo "spark.hadoop.fs.s3a.endpoint """ + endpoint_url + """" >> /tmp/{}/notebook_spark-defaults_local.conf'""".format(cluster_name))
local('echo "spark.hadoop.fs.s3a.server-side-encryption-algorithm AES256" >> /tmp/{}/notebook_spark-defaults_local.conf'.format(cluster_name))
local('mv /tmp/{0}/notebook_spark-defaults_local.conf {1}spark/conf/spark-defaults.conf'.format(cluster_name, cluster_dir))
def remove_dataengine_kernels(tag_name, notebook_name, os_user, key_path, cluster_name):
try:
private = meta_lib.get_instance_private_ip_address(tag_name, notebook_name)
env.hosts = "{}".format(private)
env.user = "{}".format(os_user)
env.key_filename = "{}".format(key_path)
env.host_string = env.user + "@" + env.hosts
sudo('rm -rf /home/{}/.local/share/jupyter/kernels/*_{}'.format(os_user, cluster_name))
if exists('/home/{}/.ensure_dir/dataengine_{}_interpreter_ensured'.format(os_user, cluster_name)):
if os.environ['notebook_multiple_clusters'] == 'true':
try:
livy_port = sudo("cat /opt/" + cluster_name +
"/livy/conf/livy.conf | grep livy.server.port | tail -n 1 | awk '{printf $3}'")
process_number = sudo("netstat -natp 2>/dev/null | grep ':" + livy_port +
"' | awk '{print $7}' | sed 's|/.*||g'")
sudo('kill -9 ' + process_number)
sudo('systemctl disable livy-server-' + livy_port)
except:
print("Wasn't able to find Livy server for this EMR!")
sudo(
'sed -i \"s/^export SPARK_HOME.*/export SPARK_HOME=\/opt\/spark/\" /opt/zeppelin/conf/zeppelin-env.sh')
sudo("rm -rf /home/{}/.ensure_dir/dataengine_interpreter_ensure".format(os_user))
zeppelin_url = 'http://' + private + ':8080/api/interpreter/setting/'
opener = urllib2.build_opener(urllib2.ProxyHandler({}))
req = opener.open(urllib2.Request(zeppelin_url))
r_text = req.read()
interpreter_json = json.loads(r_text)
interpreter_prefix = cluster_name
for interpreter in interpreter_json['body']:
if interpreter_prefix in interpreter['name']:
print("Interpreter with ID: {} and name: {} will be removed from zeppelin!".format(
interpreter['id'], interpreter['name']))
request = urllib2.Request(zeppelin_url + interpreter['id'], data='')
request.get_method = lambda: 'DELETE'
url = opener.open(request)
print(url.read())
sudo('chown ' + os_user + ':' + os_user + ' -R /opt/zeppelin/')
sudo('systemctl daemon-reload')
sudo("service zeppelin-notebook stop")
sudo("service zeppelin-notebook start")
zeppelin_restarted = False
while not zeppelin_restarted:
sudo('sleep 5')
result = sudo('nmap -p 8080 localhost | grep "closed" > /dev/null; echo $?')
result = result[:1]
if result == '1':
zeppelin_restarted = True
sudo('sleep 5')
sudo('rm -rf /home/{}/.ensure_dir/dataengine_{}_interpreter_ensured'.format(os_user, cluster_name))
if exists('/home/{}/.ensure_dir/rstudio_dataengine_ensured'.format(os_user)):
dlab.fab.remove_rstudio_dataengines_kernel(cluster_name, os_user)
sudo('rm -rf /opt/' + cluster_name + '/')
print("Notebook's {} kernels were removed".format(env.hosts))
except Exception as err:
logging.info("Unable to remove kernels on Notebook: " + str(err) + "\n Traceback: " + traceback.print_exc(
file=sys.stdout))
append_result(str({"error": "Unable to remove kernels on Notebook",
"error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
traceback.print_exc(file=sys.stdout)
def prepare_disk(os_user):
if not exists('/home/' + os_user + '/.ensure_dir/disk_ensured'):
try:
disk_name = sudo("lsblk | grep disk | awk '{print $1}' | sort | tail -n 1")
sudo('''bash -c 'echo -e "o\nn\np\n1\n\n\nw" | fdisk /dev/{}' '''.format(disk_name))
sudo('mkfs.ext4 -F /dev/{}1'.format(disk_name))
sudo('mount /dev/{}1 /opt/'.format(disk_name))
sudo(''' bash -c "echo '/dev/{}1 /opt/ ext4 errors=remount-ro 0 1' >> /etc/fstab" '''.format(disk_name))
sudo('touch /home/' + os_user + '/.ensure_dir/disk_ensured')
except:
sys.exit(1)
def ensure_local_spark(os_user, spark_link, spark_version, hadoop_version, local_spark_path):
if not exists('/home/' + os_user + '/.ensure_dir/local_spark_ensured'):
try:
sudo('wget ' + spark_link + ' -O /tmp/spark-' + spark_version + '-bin-hadoop' + hadoop_version + '.tgz')
sudo('tar -zxvf /tmp/spark-' + spark_version + '-bin-hadoop' + hadoop_version + '.tgz -C /opt/')
sudo('mv /opt/spark-' + spark_version + '-bin-hadoop' + hadoop_version + ' ' + local_spark_path)
sudo('chown -R ' + os_user + ':' + os_user + ' ' + local_spark_path)
sudo('touch /home/' + os_user + '/.ensure_dir/local_spark_ensured')
except:
sys.exit(1)
def install_dataengine_spark(cluster_name, spark_link, spark_version, hadoop_version, cluster_dir, os_user, datalake_enabled):
local('wget ' + spark_link + ' -O /tmp/' + cluster_name + '/spark-' + spark_version + '-bin-hadoop' + hadoop_version + '.tgz')
local('tar -zxvf /tmp/' + cluster_name + '/spark-' + spark_version + '-bin-hadoop' + hadoop_version + '.tgz -C /opt/' + cluster_name)
local('mv /opt/' + cluster_name + '/spark-' + spark_version + '-bin-hadoop' + hadoop_version + ' ' + cluster_dir + 'spark/')
local('chown -R ' + os_user + ':' + os_user + ' ' + cluster_dir + 'spark/')
| apache-2.0 |
Fireblend/scikit-learn | benchmarks/bench_plot_fastkmeans.py | 294 | 4676 | from __future__ import print_function
from collections import defaultdict
from time import time
import numpy as np
from numpy import random as nr
from sklearn.cluster.k_means_ import KMeans, MiniBatchKMeans
def compute_bench(samples_range, features_range):
it = 0
results = defaultdict(lambda: [])
chunk = 100
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print('==============================')
print('Iteration %03d of %03d' % (it, max_it))
print('==============================')
print()
data = nr.random_integers(-50, 50, (n_samples, n_features))
print('K-Means')
tstart = time()
kmeans = KMeans(init='k-means++', n_clusters=10).fit(data)
delta = time() - tstart
print("Speed: %0.3fs" % delta)
print("Inertia: %0.5f" % kmeans.inertia_)
print()
results['kmeans_speed'].append(delta)
results['kmeans_quality'].append(kmeans.inertia_)
print('Fast K-Means')
# let's prepare the data in small chunks
mbkmeans = MiniBatchKMeans(init='k-means++',
n_clusters=10,
batch_size=chunk)
tstart = time()
mbkmeans.fit(data)
delta = time() - tstart
print("Speed: %0.3fs" % delta)
print("Inertia: %f" % mbkmeans.inertia_)
print()
print()
results['MiniBatchKMeans Speed'].append(delta)
results['MiniBatchKMeans Quality'].append(mbkmeans.inertia_)
return results
def compute_bench_2(chunks):
results = defaultdict(lambda: [])
n_features = 50000
means = np.array([[1, 1], [-1, -1], [1, -1], [-1, 1],
[0.5, 0.5], [0.75, -0.5], [-1, 0.75], [1, 0]])
X = np.empty((0, 2))
for i in range(8):
X = np.r_[X, means[i] + 0.8 * np.random.randn(n_features, 2)]
max_it = len(chunks)
it = 0
for chunk in chunks:
it += 1
print('==============================')
print('Iteration %03d of %03d' % (it, max_it))
print('==============================')
print()
print('Fast K-Means')
tstart = time()
mbkmeans = MiniBatchKMeans(init='k-means++',
n_clusters=8,
batch_size=chunk)
mbkmeans.fit(X)
delta = time() - tstart
print("Speed: %0.3fs" % delta)
print("Inertia: %0.3fs" % mbkmeans.inertia_)
print()
results['MiniBatchKMeans Speed'].append(delta)
results['MiniBatchKMeans Quality'].append(mbkmeans.inertia_)
return results
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
import matplotlib.pyplot as plt
samples_range = np.linspace(50, 150, 5).astype(np.int)
features_range = np.linspace(150, 50000, 5).astype(np.int)
chunks = np.linspace(500, 10000, 15).astype(np.int)
results = compute_bench(samples_range, features_range)
results_2 = compute_bench_2(chunks)
max_time = max([max(i) for i in [t for (label, t) in results.iteritems()
if "speed" in label]])
max_inertia = max([max(i) for i in [
t for (label, t) in results.iteritems()
if "speed" not in label]])
fig = plt.figure('scikit-learn K-Means benchmark results')
for c, (label, timings) in zip('brcy',
sorted(results.iteritems())):
if 'speed' in label:
ax = fig.add_subplot(2, 2, 1, projection='3d')
ax.set_zlim3d(0.0, max_time * 1.1)
else:
ax = fig.add_subplot(2, 2, 2, projection='3d')
ax.set_zlim3d(0.0, max_inertia * 1.1)
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
ax.plot_surface(X, Y, Z.T, cstride=1, rstride=1, color=c, alpha=0.5)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
i = 0
for c, (label, timings) in zip('br',
sorted(results_2.iteritems())):
i += 1
ax = fig.add_subplot(2, 2, i + 2)
y = np.asarray(timings)
ax.plot(chunks, y, color=c, alpha=0.8)
ax.set_xlabel('Chunks')
ax.set_ylabel(label)
plt.show()
| bsd-3-clause |
ml-lab/neuralnilm | neuralnilm/rectangles.py | 4 | 2236 | from neuralnilm.utils import get_colors
def plot_rectangles(ax, single_example, plot_seq_width=1, offset=0, how='bar',
**plot_kwargs):
"""
Parameters
----------
ax : matplotlib axes
single_example : numpy.ndarray
A single example from within the batch.
i.e. single_output = batch[seq_i]
Shape = (3, n_outputs)
plot_seq_width : int or float, optional
The width of a sequence plotted on the X-axis.
Multiply `left` and `right` values by `plot_seq_width` before plotting.
offset : float, optional
Shift rectangles left or right by `offset` where one complete sequence
is of length `plot_seq_width`. i.e. to move rectangles half a plot
width right, set `offset` to `plot_seq_width / 2.0`.
how : {'bar', 'line'}
**plot_kwargs : key word arguments to send to `ax.bar()`. For example:
alpha : float, optional
[0, 1]. Transparency for the rectangles.
color
"""
# sanity check
for obj in [plot_seq_width, offset]:
if not isinstance(obj, (int, float)):
raise ValueError("Incorrect input: {}".format(obj))
assert single_example.shape[0] == 3
n_outputs = single_example.shape[1]
colors = get_colors(n_outputs)
for output_i in range(n_outputs):
single_rect = single_example[:, output_i]
# left
left = (single_rect[0] * plot_seq_width) + offset
right = (single_rect[1] * plot_seq_width) + offset
# width
if single_rect[0] > 0 and single_rect[1] > 0:
width = (single_rect[1] - single_rect[0]) * plot_seq_width
else:
width = 0
height = single_rect[2]
color = colors[output_i]
plot_kwargs.setdefault('color', color)
if how == 'bar':
plot_kwargs.setdefault('edgecolor', plot_kwargs['color'])
plot_kwargs.setdefault('linewidth', 0)
ax.bar(left, height, width, **plot_kwargs)
elif how == 'line':
ax.plot([left, left, right, right],
[0, height, height, 0],
**plot_kwargs)
else:
raise ValueError("'how' is not recognised.")
| apache-2.0 |
gregstarr/anomaly-detection | test.py | 1 | 1597 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue May 30 16:42:25 2017
@author: greg
"""
import numpy as np
from ranksvm_k import ranksvm_k
import matplotlib.pyplot as plt
from scipy.sparse import csr_matrix
from gregAD import knn_score
from scipy.io import loadmat
dic = np.load('arrays.npz')
l = len(dic['y'])
th = 1.0e-6
A = csr_matrix((dic['d'],(dic['row'],dic['col'])))
beta,asv = ranksvm_k(dic['K'],A,dic['C'],prec=1.0e-4)
yy = np.dot(dic['K'][np.abs(beta[:,0])>th,:].T,beta[np.abs(beta)>th,None])
R_rank = np.empty_like(dic['y'])
for i in range(len(dic['y'])):
R_rank[i] = np.sum(yy[i]>yy)/l
train_G = knn_score(dic['K']) #simmilarity metric
R_knn = np.empty_like(dic['y']) #ranking metric [0,1)
for i in range(l):
R_knn[i] = np.sum(train_G[i]>train_G)/l
alphas = np.arange(0,1,.01)
fa1 = np.empty(100)
tp1 = np.empty(100)
fa2 = np.empty(100)
tp2 = np.empty(100)
for i,a in enumerate(alphas):
rank_class = np.sign(R_rank-a)
fa1[i] = np.sum(rank_class[dic['y']==1] == -1)/np.sum(dic['y']==1)
tp1[i] = np.sum(rank_class[dic['y']==-1] == -1)/np.sum(dic['y']==-1)
knn_class = np.sign(R_knn-a)
fa2[i] = np.sum(knn_class[dic['y']==1] == -1)/np.sum(dic['y']==1)
tp2[i] = np.sum(knn_class[dic['y']==-1] == -1)/np.sum(dic['y']==-1)
mat = loadmat('/home/greg/Documents/MATLAB/PrimalRankSVM/fatp.mat')
plt.figure()
plt.plot(fa1,tp1,label='RankSVM')
plt.plot(fa2,tp2,label='KNN')
plt.plot(mat['PRank_FA'].T,mat['PRank_DET'].T,label='RankSVM - Matlab')
plt.plot(mat['knn_FA'].T,mat['knn_DET'].T,label='KNN - Matlab')
plt.legend() | gpl-3.0 |
ndingwall/scikit-learn | examples/covariance/plot_mahalanobis_distances.py | 17 | 8149 | r"""
================================================================
Robust covariance estimation and Mahalanobis distances relevance
================================================================
This example shows covariance estimation with Mahalanobis
distances on Gaussian distributed data.
For Gaussian distributed data, the distance of an observation
:math:`x_i` to the mode of the distribution can be computed using its
Mahalanobis distance:
.. math::
d_{(\mu,\Sigma)}(x_i)^2 = (x_i - \mu)^T\Sigma^{-1}(x_i - \mu)
where :math:`\mu` and :math:`\Sigma` are the location and the covariance of
the underlying Gaussian distributions.
In practice, :math:`\mu` and :math:`\Sigma` are replaced by some
estimates. The standard covariance maximum likelihood estimate (MLE) is very
sensitive to the presence of outliers in the data set and therefore,
the downstream Mahalanobis distances also are. It would be better to
use a robust estimator of covariance to guarantee that the estimation is
resistant to "erroneous" observations in the dataset and that the
calculated Mahalanobis distances accurately reflect the true
organization of the observations.
The Minimum Covariance Determinant estimator (MCD) is a robust,
high-breakdown point (i.e. it can be used to estimate the covariance
matrix of highly contaminated datasets, up to
:math:`\frac{n_\text{samples}-n_\text{features}-1}{2}` outliers)
estimator of covariance. The idea behind the MCD is to find
:math:`\frac{n_\text{samples}+n_\text{features}+1}{2}`
observations whose empirical covariance has the smallest determinant,
yielding a "pure" subset of observations from which to compute
standards estimates of location and covariance. The MCD was introduced by
P.J.Rousseuw in [1]_.
This example illustrates how the Mahalanobis distances are affected by
outlying data. Observations drawn from a contaminating distribution
are not distinguishable from the observations coming from the real,
Gaussian distribution when using standard covariance MLE based Mahalanobis
distances. Using MCD-based
Mahalanobis distances, the two populations become
distinguishable. Associated applications include outlier detection,
observation ranking and clustering.
.. note::
See also :ref:`sphx_glr_auto_examples_covariance_plot_robust_vs_empirical_covariance.py`
.. topic:: References:
.. [1] P. J. Rousseeuw. `Least median of squares regression
<http://web.ipac.caltech.edu/staff/fmasci/home/astro_refs/LeastMedianOfSquares.pdf>`_. J. Am
Stat Ass, 79:871, 1984.
.. [2] Wilson, E. B., & Hilferty, M. M. (1931). `The distribution of chi-square.
<https://water.usgs.gov/osw/bulletin17b/Wilson_Hilferty_1931.pdf>`_
Proceedings of the National Academy of Sciences of the United States
of America, 17, 684-688.
""" # noqa: E501
# %%
# Generate data
# --------------
#
# First, we generate a dataset of 125 samples and 2 features. Both features
# are Gaussian distributed with mean of 0 but feature 1 has a standard
# deviation equal to 2 and feature 2 has a standard deviation equal to 1. Next,
# 25 samples are replaced with Gaussian outlier samples where feature 1 has
# a standard devation equal to 1 and feature 2 has a standard deviation equal
# to 7.
import numpy as np
# for consistent results
np.random.seed(7)
n_samples = 125
n_outliers = 25
n_features = 2
# generate Gaussian data of shape (125, 2)
gen_cov = np.eye(n_features)
gen_cov[0, 0] = 2.
X = np.dot(np.random.randn(n_samples, n_features), gen_cov)
# add some outliers
outliers_cov = np.eye(n_features)
outliers_cov[np.arange(1, n_features), np.arange(1, n_features)] = 7.
X[-n_outliers:] = np.dot(np.random.randn(n_outliers, n_features), outliers_cov)
# %%
# Comparison of results
# ---------------------
#
# Below, we fit MCD and MLE based covariance estimators to our data and print
# the estimated covariance matrices. Note that the estimated variance of
# feature 2 is much higher with the MLE based estimator (7.5) than
# that of the MCD robust estimator (1.2). This shows that the MCD based
# robust estimator is much more resistant to the outlier samples, which were
# designed to have a much larger variance in feature 2.
import matplotlib.pyplot as plt
from sklearn.covariance import EmpiricalCovariance, MinCovDet
# fit a MCD robust estimator to data
robust_cov = MinCovDet().fit(X)
# fit a MLE estimator to data
emp_cov = EmpiricalCovariance().fit(X)
print('Estimated covariance matrix:\n'
'MCD (Robust):\n{}\n'
'MLE:\n{}'.format(robust_cov.covariance_, emp_cov.covariance_))
# %%
# To better visualize the difference, we plot contours of the
# Mahalanobis distances calculated by both methods. Notice that the robust
# MCD based Mahalanobis distances fit the inlier black points much better,
# whereas the MLE based distances are more influenced by the outlier
# red points.
fig, ax = plt.subplots(figsize=(10, 5))
# Plot data set
inlier_plot = ax.scatter(X[:, 0], X[:, 1],
color='black', label='inliers')
outlier_plot = ax.scatter(X[:, 0][-n_outliers:], X[:, 1][-n_outliers:],
color='red', label='outliers')
ax.set_xlim(ax.get_xlim()[0], 10.)
ax.set_title("Mahalanobis distances of a contaminated data set")
# Create meshgrid of feature 1 and feature 2 values
xx, yy = np.meshgrid(np.linspace(plt.xlim()[0], plt.xlim()[1], 100),
np.linspace(plt.ylim()[0], plt.ylim()[1], 100))
zz = np.c_[xx.ravel(), yy.ravel()]
# Calculate the MLE based Mahalanobis distances of the meshgrid
mahal_emp_cov = emp_cov.mahalanobis(zz)
mahal_emp_cov = mahal_emp_cov.reshape(xx.shape)
emp_cov_contour = plt.contour(xx, yy, np.sqrt(mahal_emp_cov),
cmap=plt.cm.PuBu_r, linestyles='dashed')
# Calculate the MCD based Mahalanobis distances
mahal_robust_cov = robust_cov.mahalanobis(zz)
mahal_robust_cov = mahal_robust_cov.reshape(xx.shape)
robust_contour = ax.contour(xx, yy, np.sqrt(mahal_robust_cov),
cmap=plt.cm.YlOrBr_r, linestyles='dotted')
# Add legend
ax.legend([emp_cov_contour.collections[1], robust_contour.collections[1],
inlier_plot, outlier_plot],
['MLE dist', 'MCD dist', 'inliers', 'outliers'],
loc="upper right", borderaxespad=0)
plt.show()
# %%
# Finally, we highlight the ability of MCD based Mahalanobis distances to
# distinguish outliers. We take the cubic root of the Mahalanobis distances,
# yielding approximately normal distributions (as suggested by Wilson and
# Hilferty [2]_), then plot the values of inlier and outlier samples with
# boxplots. The distribution of outlier samples is more separated from the
# distribution of inlier samples for robust MCD based Mahalanobis distances.
fig, (ax1, ax2) = plt.subplots(1, 2)
plt.subplots_adjust(wspace=.6)
# Calculate cubic root of MLE Mahalanobis distances for samples
emp_mahal = emp_cov.mahalanobis(X - np.mean(X, 0)) ** (0.33)
# Plot boxplots
ax1.boxplot([emp_mahal[:-n_outliers], emp_mahal[-n_outliers:]], widths=.25)
# Plot individual samples
ax1.plot(np.full(n_samples - n_outliers, 1.26), emp_mahal[:-n_outliers],
'+k', markeredgewidth=1)
ax1.plot(np.full(n_outliers, 2.26), emp_mahal[-n_outliers:],
'+k', markeredgewidth=1)
ax1.axes.set_xticklabels(('inliers', 'outliers'), size=15)
ax1.set_ylabel(r"$\sqrt[3]{\rm{(Mahal. dist.)}}$", size=16)
ax1.set_title("Using non-robust estimates\n(Maximum Likelihood)")
# Calculate cubic root of MCD Mahalanobis distances for samples
robust_mahal = robust_cov.mahalanobis(X - robust_cov.location_) ** (0.33)
# Plot boxplots
ax2.boxplot([robust_mahal[:-n_outliers], robust_mahal[-n_outliers:]],
widths=.25)
# Plot individual samples
ax2.plot(np.full(n_samples - n_outliers, 1.26), robust_mahal[:-n_outliers],
'+k', markeredgewidth=1)
ax2.plot(np.full(n_outliers, 2.26), robust_mahal[-n_outliers:],
'+k', markeredgewidth=1)
ax2.axes.set_xticklabels(('inliers', 'outliers'), size=15)
ax2.set_ylabel(r"$\sqrt[3]{\rm{(Mahal. dist.)}}$", size=16)
ax2.set_title("Using robust estimates\n(Minimum Covariance Determinant)")
plt.show()
| bsd-3-clause |
mgarbanzo/radarphysics | capons.py | 1 | 1423 | #!/usr/bin/python
import numpy as np
from scipy import fftpack, pi
import matplotlib.pyplot as plt
from lags import *
N=64
m=16
Ts=1
freqs = -0.2, 0.2, -0.3, 0.3, 0.09
sgn = GenerateSignal(Ts, N, freqs)
#add some noise:
sgn = sgn + 1*np.random.rand(len(sgn))+ 1j*np.random.rand(len(sgn))
sgn = sgn-np.mean(sgn)
ary = ZeroPad(sgn,m-1)
Ry = np.cov(ary)
Ry = np.matrix(Ry)
RyI = Ry.I
print "RyI Shape: ", RyI.shape
#Defining the a(omega) vector:
omega = -2*pi/10
print "Omega: ", omega
m=np.arange(m)
a = np.exp(-m*omega*1j)
a=np.matrix(a)
print "a Shape: ", a.shape
print "a.H Shape: ", a.H.shape
#FILTER
h = RyI*a.T/(a.H.T*RyI*a.T)
HO = h.H*a.T
print HO, "This needs to be one"
w = np.arange(-pi,pi,0.01) #Frequencies for the final freq response and freq content
H = np.zeros_like(w) #H contains the freq response
P = np.zeros_like(w) #P contains the freq content
for i,omega in enumerate(w):
a = np.exp(-m*omega*1j)
a=np.matrix(a)
tmp1 = np.conjugate(h).T*a.T
#Frequency response
H[i] = np.real(np.abs(tmp1))
#Power Spectral Density
tmp2 = 1/(a.H.T*RyI*a.T)
P[i] = np.real(np.abs(tmp2))
H=np.array(H)
P=np.array(P)
ft = fftpack.fft(sgn,n=10*len(sgn))
xv = np.fft.fftfreq(10*len(sgn),d=1)
plt.subplot(311)
plt.plot(w/(2*pi),np.real(H),'r-')
plt.plot(w/(2*pi),np.imag(H),'b-')
plt.subplot(312)
plt.plot(w/(2*pi),P,'ro')
plt.subplot(313)
plt.plot(xv,np.abs(ft),'ro')
plt.show()
| gpl-3.0 |
NaturalHistoryMuseum/insect_analysis | vision/measurements/subspace_shape.py | 2 | 5525 | import numpy as np
from sklearn.neighbors import NearestNeighbors
from skimage.transform import SimilarityTransform, estimate_transform, matrix_transform
import matplotlib.pyplot as plt
import scipy
from skimage.filters import gaussian
def plot_closest_points(image_points, edge_points, closest_edge_points):
plt.plot(edge_points[:, 0], edge_points[:, 1], 'r+')
plt.plot(image_points[:, 0], image_points[:, 1], 'b')
for im, ed in zip(image_points, closest_edge_points):
plt.plot([im[0], ed[0]], [im[1], ed[1]], 'g')
plt.show()
def learn(points, K=1):
points = [point_set.flatten() for point_set in points]
w = np.stack(points, axis=1)
mu = np.mean(w, axis=1).reshape(-1, 1)
mu = (mu.reshape(-1, 2) - mu.reshape(-1, 2).mean(axis=0)).reshape(-1, 1)
W = w - mu
U, L2, _ = np.linalg.svd(np.dot(W, W.T))
D = mu.shape[0]
sigma2 = np.sum(L2[(K + 1):(D + 1)]) / (D - K)
phi = U[:, :K] @ np.sqrt(np.diag(L2[:K]) - sigma2 * np.eye(K))
return mu, phi, sigma2
def update_h(sigma2, phi, y, mu, psi):
"""Updates the hidden variables using updated parameters.
This is an implementation of the equation:
.. math::
\\hat{h} = (\\sigma^2 I + \\sum_{n=1}^N \\Phi_n^T A^T A \\Phi_n)^{-1} \\sum_{n=1}^N \\Phi_n^T A^T (y_n - A \\mu_n - b)
"""
N = y.shape[0]
K = phi.shape[1]
A = psi.params[:2, :2]
b = psi.translation
partial_0 = 0
for phi_n in np.split(phi, N, axis=0):
partial_0 += phi_n.T @ A.T @ A @ phi_n
partial_1 = sigma2 * np.eye(K) + partial_0
partial_2 = np.zeros((K, 1))
for phi_n, y_n, mu_n in zip(np.split(phi, N, axis=0), y, mu.reshape(-1, 2)):
partial_2 += phi_n.T @ A.T @ (y_n - A @ mu_n - b).reshape(2, -1)
return np.linalg.inv(partial_1) @ partial_2
def similarity(edge_image, mu, phi, sigma2, h, psi):
height, width = edge_image.shape
edge_distance = scipy.ndimage.distance_transform_edt(~edge_image)
w = (mu + phi @ h).reshape(-1, 2)
image_points = matrix_transform(w, psi.params)
closest_distances = scipy.interpolate.interp2d(range(width), range(height), edge_distance)
K = h.size
noise = scipy.stats.multivariate_normal(mean=np.zeros(K), cov=np.eye(K))
if noise.pdf(h.flatten()) == 0:
print(h.flatten())
noise = np.log(noise.pdf(h.flatten()))
return -closest_distances(image_points[:, 0], image_points[:, 1]).sum() / sigma2 + noise
def gradient_step(gradient_y, gradient_x, magnitude, locations, step_size=5):
height, width = magnitude.shape
y = np.clip(locations[:, 1], 0, height - 1).astype(int)
x = np.clip(locations[:, 0], 0, width - 1).astype(int)
y_new = np.clip(locations[:, 1] - step_size * magnitude[y, x] * gradient_y[y, x], 0, height - 1)
x_new = np.clip(locations[:, 0] - step_size * magnitude[y, x] * gradient_x[y, x], 0, width - 1)
return np.stack((x_new, y_new), axis=1)
def infer(edge_image, edge_lengths, mu, phi, sigma2,
update_slice=slice(None),
scale_estimate=None,
rotation=0,
translation=(0, 0)):
# edge_points = np.array(np.where(edge_image)).T
# edge_points[:, [0, 1]] = edge_points[:, [1, 0]]
# edge_score = edge_image.shape[0] * np.exp(-edge_lengths[edge_image] / (0.25 * edge_image.shape[0])).reshape(-1, 1)
# edge_points = np.concatenate((edge_points, edge_score), axis=1)
#
# edge_nn = NearestNeighbors(n_neighbors=1).fit(edge_points)
edge_near = scipy.ndimage.distance_transform_edt(~edge_image)
edge_near_blur = gaussian(edge_near, 2)
Gy, Gx = np.gradient(edge_near_blur)
mag = np.sqrt(np.power(Gy, 2) + np.power(Gx, 2))
if scale_estimate is None:
scale_estimate = min(edge_image.shape) * 4
mu = (mu.reshape(-1, 2) - mu.reshape(-1, 2).mean(axis=0)).reshape(-1, 1)
average_distance = np.sqrt(np.power(mu.reshape(-1, 2), 2).sum(axis=1)).mean()
scale_estimate /= average_distance * np.sqrt(2)
h = np.zeros((phi.shape[1], 1))
psi = SimilarityTransform(scale=scale_estimate, rotation=rotation, translation=translation)
while True:
w = (mu + phi @ h).reshape(-1, 2)
image_points = matrix_transform(w, psi.params)[update_slice, :]
image_points = np.concatenate((image_points, np.zeros((image_points.shape[0], 1))), axis=1)
# closest_edge_point_indices = edge_nn.kneighbors(image_points)[1].flatten()
# closest_edge_points = edge_points[closest_edge_point_indices, :2]
closest_edge_points = gradient_step(Gy, Gx, mag, image_points)
w = mu.reshape(-1, 2)
psi = estimate_transform('similarity', w[update_slice, :], closest_edge_points)
image_points = matrix_transform(w, psi.params)[update_slice, :]
image_points = np.concatenate((image_points, np.zeros((image_points.shape[0], 1))), axis=1)
# closest_edge_point_indices = edge_nn.kneighbors(image_points)[1].flatten()
# closest_edge_points = edge_points[closest_edge_point_indices, :2]
closest_edge_points = gradient_step(Gy, Gx, mag, image_points)
mu_slice = mu.reshape(-1, 2)[update_slice, :].reshape(-1, 1)
K = phi.shape[-1]
phi_full = phi.reshape(-1, 2, K)
phi_slice = phi_full[update_slice, :].reshape(-1, K)
h = update_h(sigma2, phi_slice, closest_edge_points, mu_slice, psi)
w = (mu + phi @ h).reshape(-1, 2)
image_points = matrix_transform(w, psi.params)
update_slice = yield image_points, closest_edge_points
| gpl-2.0 |
mengxn/tensorflow | tensorflow/contrib/learn/python/learn/learn_io/io_test.py | 137 | 5063 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""tf.learn IO operation tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
# pylint: disable=wildcard-import
from tensorflow.contrib.learn.python import learn
from tensorflow.contrib.learn.python.learn import datasets
from tensorflow.contrib.learn.python.learn.estimators._sklearn import accuracy_score
from tensorflow.contrib.learn.python.learn.learn_io import *
from tensorflow.python.platform import test
# pylint: enable=wildcard-import
class IOTest(test.TestCase):
# pylint: disable=undefined-variable
"""tf.learn IO operation tests."""
def test_pandas_dataframe(self):
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
random.seed(42)
iris = datasets.load_iris()
data = pd.DataFrame(iris.data)
labels = pd.DataFrame(iris.target)
classifier = learn.LinearClassifier(
feature_columns=learn.infer_real_valued_columns_from_input(data),
n_classes=3)
classifier.fit(data, labels, steps=100)
score = accuracy_score(labels[0], list(classifier.predict_classes(data)))
self.assertGreater(score, 0.5, "Failed with score = {0}".format(score))
else:
print("No pandas installed. pandas-related tests are skipped.")
def test_pandas_series(self):
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
random.seed(42)
iris = datasets.load_iris()
data = pd.DataFrame(iris.data)
labels = pd.Series(iris.target)
classifier = learn.LinearClassifier(
feature_columns=learn.infer_real_valued_columns_from_input(data),
n_classes=3)
classifier.fit(data, labels, steps=100)
score = accuracy_score(labels, list(classifier.predict_classes(data)))
self.assertGreater(score, 0.5, "Failed with score = {0}".format(score))
def test_string_data_formats(self):
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
with self.assertRaises(ValueError):
learn.io.extract_pandas_data(pd.DataFrame({"Test": ["A", "B"]}))
with self.assertRaises(ValueError):
learn.io.extract_pandas_labels(pd.DataFrame({"Test": ["A", "B"]}))
def test_dask_io(self):
if HAS_DASK and HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
import dask.dataframe as dd # pylint: disable=g-import-not-at-top
# test dask.dataframe
df = pd.DataFrame(
dict(
a=list("aabbcc"), b=list(range(6))),
index=pd.date_range(
start="20100101", periods=6))
ddf = dd.from_pandas(df, npartitions=3)
extracted_ddf = extract_dask_data(ddf)
self.assertEqual(
extracted_ddf.divisions, (0, 2, 4, 6),
"Failed with divisions = {0}".format(extracted_ddf.divisions))
self.assertEqual(
extracted_ddf.columns.tolist(), ["a", "b"],
"Failed with columns = {0}".format(extracted_ddf.columns))
# test dask.series
labels = ddf["a"]
extracted_labels = extract_dask_labels(labels)
self.assertEqual(
extracted_labels.divisions, (0, 2, 4, 6),
"Failed with divisions = {0}".format(extracted_labels.divisions))
# labels should only have one column
with self.assertRaises(ValueError):
extract_dask_labels(ddf)
else:
print("No dask installed. dask-related tests are skipped.")
def test_dask_iris_classification(self):
if HAS_DASK and HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
import dask.dataframe as dd # pylint: disable=g-import-not-at-top
random.seed(42)
iris = datasets.load_iris()
data = pd.DataFrame(iris.data)
data = dd.from_pandas(data, npartitions=2)
labels = pd.DataFrame(iris.target)
labels = dd.from_pandas(labels, npartitions=2)
classifier = learn.LinearClassifier(
feature_columns=learn.infer_real_valued_columns_from_input(data),
n_classes=3)
classifier.fit(data, labels, steps=100)
predictions = data.map_partitions(classifier.predict).compute()
score = accuracy_score(labels.compute(), predictions)
self.assertGreater(score, 0.5, "Failed with score = {0}".format(score))
if __name__ == "__main__":
test.main()
| apache-2.0 |
gwpy/gwsumm | gwsumm/tests/test_config.py | 1 | 8768 | # -*- coding: utf-8 -*-
# Copyright (C) Duncan Macleod (2013)
#
# This file is part of GWSumm.
#
# GWSumm is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GWSumm is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GWSumm. If not, see <http://www.gnu.org/licenses/>.
"""Tests for :mod:`gwsumm.config`
"""
import os
import tempfile
from io import StringIO
from collections import OrderedDict
from configparser import (DEFAULTSECT, ConfigParser)
import pytest
from matplotlib import rcParams
from astropy import units
from .. import (state, config, html)
from ..channels import get_channel
__author__ = 'Duncan Macleod <duncan.macleod@ligo.org>'
TEST_CONFIG = StringIO("""
[DEFAULT]
defaultoption = defaultvalue
[section]
option1 = value1
option2 = True
option3 = 4
[plugins]
tempfile = ''
[units]
myunit = meter
cochrane = dimensionless
[%(IFO)s]
""")
def assert_configparser_equal(a, b):
for sect in set([DEFAULTSECT] + list(a.sections()) +
list(b.sections())):
assert list(a.items(sect)) == list(b.items(sect))
class TestGWSummConfigParser(object):
PARSER = config.GWSummConfigParser
@classmethod
def new(cls):
TEST_CONFIG.seek(0)
cp = cls.PARSER()
cp.read_file(TEST_CONFIG)
TEST_CONFIG.seek(0)
return cp
@classmethod
@pytest.fixture()
def cnfg(cls):
return cls.new()
# -- test creation --------------------------
def test_init(self):
cp = self.new()
assert cp.optionxform is str
assert cp._dict is OrderedDict
def test_configdir(self):
assert set(os.listdir(config.CONFIGDIR)) == {
'defaults.ini',
'matplotlib.ini',
}
# -- test methods ---------------------------
def test_ndoptions(self, cnfg):
ndopts = cnfg.ndoptions('section')
assert isinstance(ndopts, list)
assert 'defaultoption' not in ndopts
def test_nditems(self, cnfg):
nditms = cnfg.nditems('section')
assert isinstance(nditms, list)
assert ('defaultoption', 'defaultvalue') not in nditms
def test_read(self):
cp = self.new()
# read config from file
with tempfile.NamedTemporaryFile(mode='w') as f:
f.write(TEST_CONFIG.read())
TEST_CONFIG.seek(0) # rewind for other users
read_ = cp.read(f.name)
assert read_ == [f.name]
assert cp.files == [os.path.abspath(f.name)]
# check error gets raised when file isn't read
with pytest.raises(IOError):
cp.read('does-not-exist.ini')
def test_from_configparser(self, cnfg):
# check that GWSummConfigParser gets returned as is
copy = self.PARSER.from_configparser(cnfg)
assert copy is cnfg
# check that base ConfigParser gets converted to GWSummConfigParser
cp = ConfigParser()
try:
cp.read_file(TEST_CONFIG)
except AttributeError:
cp.readfp(TEST_CONFIG)
TEST_CONFIG.seek(0)
copy = self.PARSER.from_configparser(cp)
assert isinstance(copy, self.PARSER)
assert_configparser_equal(copy, cnfg)
def test_interpolate_section_names(self, cnfg):
assert 'X1' not in cnfg.sections()
assert '%(IFO)s' in cnfg.sections()
cnfg.interpolate_section_names(IFO='X1')
assert 'X1' in cnfg.sections()
assert '%(IFO)s' not in cnfg.sections()
@pytest.mark.parametrize('ifo, obs, exp', [
('L1', None, 'LIGO Livingston'),
('X1', 'Einstein Telescope', 'Einstein Telescope'),
])
def test_set_ifo_options(self, ifo, obs, exp):
cp = self.new()
cp.set_ifo_options(ifo, observatory=obs)
assert cp.get(DEFAULTSECT, 'IFO') == ifo.upper()
assert cp.get(DEFAULTSECT, 'ifo') == ifo.lower()
assert cp.get(DEFAULTSECT, 'SITE') == ifo[0].upper()
assert cp.get(DEFAULTSECT, 'site') == ifo[0].lower()
assert cp.get(DEFAULTSECT, 'observatory') == exp
def test_set_date_options(self):
cp = self.new()
cp.set_date_options(0, 100)
assert cp.get(DEFAULTSECT, 'gps-start-time') == '0'
assert cp.get(DEFAULTSECT, 'gps-end-time') == '100'
assert cp.get(DEFAULTSECT, 'yyyy') == '1980'
assert cp.get(DEFAULTSECT, 'duration') == '100'
def test_load_rcParams(self):
# check empty config doesn't cause havoc
cp = self.PARSER()
assert cp.load_rcParams() == {}
cp = self.new()
cp.add_section('rcParams')
cp.set('rcParams', 'axes.labelsize', '100')
new = cp.load_rcParams()
assert new == {'axes.labelsize': 100}
assert rcParams['axes.labelsize'] == 100
def test_load_states(self):
cp = self.new()
cp.set_date_options(0, 100)
cp.add_section('states')
cp.set('states', 'locked', 'X1:TEST-STATE:1')
cp.load_states()
states = state.get_states()
assert len(states) == 2
assert 'locked' in states
assert states['locked'].definition == 'X1:TEST-STATE:1'
assert state.ALLSTATE in states
def test_load_plugins(self, cnfg):
# check that empty section doesn't cause havoc
cp = self.PARSER()
assert cp.load_plugins() == []
# check plugins get laoded
plugins = cnfg.load_plugins()
assert plugins == [tempfile]
def test_load_units(self, cnfg):
# check that empty section doesn't cause havoc
cp = self.PARSER()
assert cp.load_units() == []
newunits = cnfg.load_units()
assert newunits == [units.meter, units.dimensionless_unscaled]
def test_load_channels(self):
# test simple channel section
cp = self.PARSER()
cp.add_section('X1:TEST-CHANNEL')
cp.set('X1:TEST-CHANNEL', 'frametype', 'X1_TEST')
cp.load_channels()
c = get_channel('X1:TEST-CHANNEL')
assert c.frametype == 'X1_TEST'
# test with interpolation
cp.set(DEFAULTSECT, 'IFO', 'X1')
cp.add_section('%(IFO)s:TEST-CHANNEL_2')
cp.set('%(IFO)s:TEST-CHANNEL_2', 'resample', '128')
cp.interpolate_section_names(IFO='X1')
cp.load_channels()
c = get_channel('X1:TEST-CHANNEL_2')
assert c.resample == 128
# test bit parsing
cp.set('X1:TEST-CHANNEL', '0', 'Bit 0')
cp.set('X1:TEST-CHANNEL', '1', 'A_B')
cp.load_channels()
c = get_channel('X1:TEST-CHANNEL')
assert c.bits == ['Bit 0', 'A_B']
# test channels section
cp.add_section('channels-test')
cp.set('channels-test', 'channels',
'X1:TEST-CHANNEL,X1:TEST-CHANNEL_2')
cp.set('channels-test', 'unit', 'urad')
cp.load_channels()
assert c.unit == units.microradian
def test_finalize(self):
cp = self.new()
cp.set_date_options(0, 100)
cp.finalize()
def test_get_css(self):
# check empty result returns defaults
cp = self.PARSER()
css = cp.get_css()
assert css == list(html.get_css().values())
# check overrides
cp.add_section('html')
cp.set('html', 'gwbootstrap-css', 'test.css')
css = cp.get_css()
assert 'test.css' in css
assert html.get_css()['gwbootstrap'] not in css
# check custom files
cp.set('html', 'extra-css', '"extra.css","/static/extra2.css"')
css = cp.get_css()
assert 'test.css' in css # still finds overrides
assert 'extra.css' in css and '/static/extra2.css' in css
def test_get_javascript(self):
# check empty result returns defaults
cp = self.PARSER()
js = cp.get_javascript()
assert js == list(html.get_js().values())
# check overrides
cp.add_section('html')
cp.set('html', 'gwbootstrap-js', 'test.js')
js = cp.get_javascript()
assert 'test.js' in js
assert html.get_js()['gwbootstrap'] not in js
# check custom files
cp.set('html', 'extra-js', '"extra.js","/static/extra2.js"')
js = cp.get_javascript()
assert 'test.js' in js # still finds overrides
assert 'extra.js' in js and '/static/extra2.js' in js
| gpl-3.0 |
kmike/scikit-learn | examples/applications/topics_extraction_with_nmf.py | 4 | 2690 | """
========================================================
Topics extraction with Non-Negative Matrix Factorization
========================================================
This is a proof of concept application of Non Negative Matrix
Factorization of the term frequency matrix of a corpus of documents so
as to extract an additive model of the topic structure of the corpus.
The default parameters (n_samples / n_features / n_topics) should make
the example runnable in a couple of tens of seconds. You can try to
increase the dimensions of the problem be ware than the time complexity
is polynomial.
Here are some sample extracted topics that look quite good:
Topic #0:
god people bible israel jesus christian true moral think christians
believe don say human israeli church life children jewish
Topic #1:
drive windows card drivers video scsi software pc thanks vga
graphics help disk uni dos file ide controller work
Topic #2:
game team nhl games ca hockey players buffalo edu cc year play
university teams baseball columbia league player toronto
Topic #3:
window manager application mit motif size display widget program
xlib windows user color event information use events x11r5 values
Topic #4:
pitt gordon banks cs science pittsburgh univ computer soon disease
edu reply pain health david article medical medicine 16
"""
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# License: Simplified BSD
from __future__ import print_function
from time import time
from sklearn.feature_extraction import text
from sklearn import decomposition
from sklearn import datasets
n_samples = 1000
n_features = 1000
n_topics = 10
n_top_words = 20
# Load the 20 newsgroups dataset and vectorize it using the most common word
# frequency with TF-IDF weighting (without top 5% stop words)
t0 = time()
print("Loading dataset and extracting TF-IDF features...")
dataset = datasets.fetch_20newsgroups(shuffle=True, random_state=1)
vectorizer = text.CountVectorizer(max_df=0.95, max_features=n_features)
counts = vectorizer.fit_transform(dataset.data[:n_samples])
tfidf = text.TfidfTransformer().fit_transform(counts)
print("done in %0.3fs." % (time() - t0))
# Fit the NMF model
print("Fitting the NMF model on with n_samples=%d and n_features=%d..."
% (n_samples, n_features))
nmf = decomposition.NMF(n_components=n_topics).fit(tfidf)
print("done in %0.3fs." % (time() - t0))
# Inverse the vectorizer vocabulary to be able
feature_names = vectorizer.get_feature_names()
for topic_idx, topic in enumerate(nmf.components_):
print("Topic #%d:" % topic_idx)
print(" ".join([feature_names[i]
for i in topic.argsort()[:-n_top_words - 1:-1]]))
print()
| bsd-3-clause |
gitporst/spotpy | spotpy/examples/3dplot.py | 1 | 1625 | '''
Copyright 2015 by Tobias Houska
This file is part of Statistical Parameter Estimation Tool (SPOTPY).
:author: Tobias Houska
This file shows how to make 3d surface plots.
'''
import spotpy
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d.art3d import Poly3DCollection, Line3DCollection
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
import matplotlib.pyplot as plt
from numpy import *
fig = plt.figure(figsize=(10,10))
ax = fig.gca(projection='3d')
#
# Plot Rosenbrock surface
X = arange(-30, 30, 0.05)
Y = arange(-30, 30, 0.05)
X, Y = meshgrid(X, Y)
#from spot_setup_rosenbrock import spot_setup
#from spot_setup_griewank import spot_setup
from spotpy.examples.spot_setup_ackley import spot_setup
Z = np.zeros(X.shape)
for i in xrange(X.shape[0]):
for j in xrange(X.shape[1]):
sim=spot_setup().simulation([X[i,j],Y[i,j]])
like=spotpy.objectivefunctions.rmse(sim,[0])
Z[i,j] = like
surf_Rosen = ax.plot_surface(X, Y, Z,rstride=5,linewidth=0, cmap=cm.rainbow)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('RMSE')
plt.tight_layout()
plt.savefig('Griewank3d.tif',dpi=300)
#surf_Rosen = ax.plot_surface(X_Rosen, Y_Rosen, Z_Rosen, rstride=1, cstride=1,
# cmap=cm.coolwarm, linewidth=0, antialiased=False, alpha = 0.3)
# Adjust axes
#ax.set_zlim(0, 600)
#ax.zaxis.set_major_locator(LinearLocator(5))
#ax.zaxis.set_major_formatter(FormatStrFormatter('%.0f'))
# Report minimum
#print 'Minimum location', v0_ori, '\nMinimum value', Rosenbrock(v0_ori), '\nNumber of function evaluations', f_evals
# Render plot
plt.show() | mit |
aabadie/scikit-learn | examples/manifold/plot_mds.py | 88 | 2731 | """
=========================
Multi-dimensional scaling
=========================
An illustration of the metric and non-metric MDS on generated noisy data.
The reconstructed points using the metric MDS and non metric MDS are slightly
shifted to avoid overlapping.
"""
# Author: Nelle Varoquaux <nelle.varoquaux@gmail.com>
# License: BSD
print(__doc__)
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.collections import LineCollection
from sklearn import manifold
from sklearn.metrics import euclidean_distances
from sklearn.decomposition import PCA
n_samples = 20
seed = np.random.RandomState(seed=3)
X_true = seed.randint(0, 20, 2 * n_samples).astype(np.float)
X_true = X_true.reshape((n_samples, 2))
# Center the data
X_true -= X_true.mean()
similarities = euclidean_distances(X_true)
# Add noise to the similarities
noise = np.random.rand(n_samples, n_samples)
noise = noise + noise.T
noise[np.arange(noise.shape[0]), np.arange(noise.shape[0])] = 0
similarities += noise
mds = manifold.MDS(n_components=2, max_iter=3000, eps=1e-9, random_state=seed,
dissimilarity="precomputed", n_jobs=1)
pos = mds.fit(similarities).embedding_
nmds = manifold.MDS(n_components=2, metric=False, max_iter=3000, eps=1e-12,
dissimilarity="precomputed", random_state=seed, n_jobs=1,
n_init=1)
npos = nmds.fit_transform(similarities, init=pos)
# Rescale the data
pos *= np.sqrt((X_true ** 2).sum()) / np.sqrt((pos ** 2).sum())
npos *= np.sqrt((X_true ** 2).sum()) / np.sqrt((npos ** 2).sum())
# Rotate the data
clf = PCA(n_components=2)
X_true = clf.fit_transform(X_true)
pos = clf.fit_transform(pos)
npos = clf.fit_transform(npos)
fig = plt.figure(1)
ax = plt.axes([0., 0., 1., 1.])
s = 100
plt.scatter(X_true[:, 0], X_true[:, 1], color='navy', s=s, lw=0,
label='True Position')
plt.scatter(pos[:, 0], pos[:, 1], color='turquoise', s=s, lw=0, label='MDS')
plt.scatter(npos[:, 0], npos[:, 1], color='darkorange', s=s, lw=0, label='NMDS')
plt.legend(scatterpoints=1, loc='best', shadow=False)
similarities = similarities.max() / similarities * 100
similarities[np.isinf(similarities)] = 0
# Plot the edges
start_idx, end_idx = np.where(pos)
# a sequence of (*line0*, *line1*, *line2*), where::
# linen = (x0, y0), (x1, y1), ... (xm, ym)
segments = [[X_true[i, :], X_true[j, :]]
for i in range(len(pos)) for j in range(len(pos))]
values = np.abs(similarities)
lc = LineCollection(segments,
zorder=0, cmap=plt.cm.Blues,
norm=plt.Normalize(0, values.max()))
lc.set_array(similarities.flatten())
lc.set_linewidths(0.5 * np.ones(len(segments)))
ax.add_collection(lc)
plt.show()
| bsd-3-clause |
rhyolight/nupic.research | projects/neural_correlations/EXP5-Bar/NeuCorr_Exp5.py | 10 | 8590 | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import numpy as np
import random
import matplotlib.pyplot as plt
from nupic.encoders import ScalarEncoder
from nupic.bindings.algorithms import TemporalMemory as TM
from nupic.bindings.algorithms import SpatialPooler as SP
from htmresearch.support.neural_correlations_utils import *
from htmresearch.support.generate_sdr_dataset import getMovingBar
plt.ion()
random.seed(1)
def showBarMovie(bars, totalRpts=1):
plt.figure(1)
i = 0
numRpts = 0
while numRpts < totalRpts:
plt.imshow(np.transpose(bars[i]), cmap='gray')
plt.pause(.05)
i += 1
if i >= len(bars):
numRpts += 1
i = 0
def generateMovingBarDataset(Nx, Ny):
barMovies = []
barHalfLength = 2
# horizongtal bars
stratNy = 1
for startNx in range(barHalfLength, Nx-barHalfLength+1):
barMovie = getMovingBar(startLocation=(startNx, stratNy),
direction=(0, 1),
imageSize=(Nx, Ny),
barHalfLength=barHalfLength,
steps=Ny-stratNy)
barMovies.append(barMovie)
# vertical bars
# stratNx = 1
# for startNy in range(barHalfLength, Ny-barHalfLength+1, 2):
# barMovie = getMovingBar(startLocation=(startNx, stratNy),
# direction=(1, 0),
# imageSize=(Nx, Ny),
# barHalfLength=barHalfLength,
# steps=Nx-stratNx)
# barMovies.append(barMovie)
return barMovies
def createSpatialPooler():
sparsity = 0.02
numColumns = 2048
sparseCols = int(numColumns * sparsity)
sp = SP(inputDimensions=(inputSize,),
columnDimensions=(numColumns,),
potentialRadius = int(0.5*inputSize),
numActiveColumnsPerInhArea = sparseCols,
globalInhibition = True,
synPermActiveInc = 0.0001,
synPermInactiveDec = 0.0005,
synPermConnected = 0.5,
boostStrength = 0.0,
spVerbosity = 1
)
return sp
def createTemporalMemory():
tm = TM(columnDimensions = (2048,),
cellsPerColumn=8, # We changed here the number of cells per col, initially they were 32
initialPermanence=0.21,
connectedPermanence=0.3,
minThreshold=15,
maxNewSynapseCount=40,
permanenceIncrement=0.1,
permanenceDecrement=0.1,
activationThreshold=15,
predictedSegmentDecrement=0.01
)
return tm
def calculateCorrelation(spikeTrains, pairs):
numPairs = len(pairs)
corr = np.zeros((numPairs, ))
for pairI in range(numPairs):
if (np.sum(spikeTrains[pairs[pairI][0], :]) == 0 or
np.sum(spikeTrains[pairs[pairI][1], :]) == 0):
corr[pairI] = np.nan
continue
(corrMatrix, numNegPCC) = computePWCorrelations(
spikeTrains[pairs[pairI], :], removeAutoCorr=True)
corr[pairI] = corrMatrix[0, 1]
return corr
if __name__ == "__main__":
Nx = 20
Ny = 20
inputSize = Nx * Ny
barMovies = generateMovingBarDataset(Nx, Ny)
# showBarMovie(barMovies[0])
sp = createSpatialPooler()
tm = createTemporalMemory()
numEpochs = 20
stepsPerEpoch = 0
for barMoive in barMovies:
stepsPerEpoch += len(barMoive)
totalTS = stepsPerEpoch * numEpochs
columnUsage = np.zeros(tm.numberOfColumns(), dtype="uint32")
entropyX = []
entropyY = []
negPCCX_cells = []
negPCCY_cells = []
negPCCX_cols = []
negPCCY_cols = []
# Randomly generate the indices of the columns to keep track during simulation time
colIndices = np.random.permutation(tm.numberOfColumns())[
0:4] # keep track of 4 columns
corrWithinColumnVsEpoch = []
corrAcrossColumnVsEpoch = []
corrRandomVsEpoch = []
predictedActiveColVsEpoch = []
for epoch in range(numEpochs):
print " {} epochs processed".format(epoch)
predCellNum = []
activeCellNum = []
predictedActiveColumnsNum = []
spikeTrains = np.zeros((tm.numberOfCells(), stepsPerEpoch), dtype="uint32")
t = 0
for i in range(len(barMovies)):
barMoive = barMovies[i]
tm.reset()
for image in barMoive:
prePredictiveCells = tm.getPredictiveCells()
prePredictiveColumn = np.array(
list(prePredictiveCells)) / tm.getCellsPerColumn()
outputColumns = np.zeros(sp.getNumColumns(), dtype="uint32")
sp.compute(image, False, outputColumns)
tm.compute(outputColumns.nonzero()[0], learn=True)
for cell in tm.getActiveCells():
spikeTrains[cell, t] = 1
# Obtain active columns:
activeColumnsIndices = [tm.columnForCell(i) for i in
tm.getActiveCells()]
currentColumns = [1 if i in activeColumnsIndices else 0 for i in
range(tm.numberOfColumns())]
for col in np.nonzero(currentColumns)[0]:
columnUsage[col] += 1
t += 1
predictiveCells = tm.getPredictiveCells()
predCellNum.append(len(predictiveCells))
predColumn = np.array(list(predictiveCells)) / tm.getCellsPerColumn()
activeCellNum.append(len(activeColumnsIndices))
predictedActiveColumns = np.intersect1d(prePredictiveColumn,
activeColumnsIndices)
predictedActiveColumnsNum.append(len(predictedActiveColumns))
print
print " Predicted Active Column {}".format(np.mean(predictedActiveColumnsNum[1:]))
numPairs = 1000
tmNumCols = np.prod(tm.getColumnDimensions())
cellsPerColumn = tm.getCellsPerColumn()
# within column correlation
withinColPairs = sampleCellsWithinColumns(numPairs, cellsPerColumn, tmNumCols)
corrWithinColumn = calculateCorrelation(spikeTrains, withinColPairs)
# across column correlaiton
corrAcrossColumn = np.zeros((numPairs, ))
acrossColPairs = sampleCellsAcrossColumns(numPairs, cellsPerColumn, tmNumCols)
corrAcrossColumn = calculateCorrelation(spikeTrains, acrossColPairs)
# sample random pairs
randomPairs = sampleCellsRandom(numPairs, cellsPerColumn, tmNumCols)
corrRandomPairs = calculateCorrelation(spikeTrains, randomPairs)
fig, ax = plt.subplots(2, 2)
ax[0, 0].hist(corrWithinColumn, range=[-.2, 1], bins=50)
ax[0, 0].set_title('within column')
ax[0, 1].hist(corrAcrossColumn, range=[-.1, .1], bins=50)
ax[0, 1].set_title('across column')
ax[1, 0].hist(corrRandomPairs, range=[-.1, .1], bins=50)
ax[1, 0].set_title('random pairs')
plt.savefig('plots/corrHist/epoch_{}.pdf'.format(epoch))
plt.close(fig)
print "Within column correlation {}".format(np.nanmean(corrWithinColumn))
print "Across column correlation {}".format(np.nanmean(corrAcrossColumn))
print "Random Cell Pair correlation {}".format(np.nanmean(corrRandomPairs))
predictedActiveColVsEpoch.append(np.mean(predictedActiveColumnsNum[1:]))
corrWithinColumnVsEpoch.append(corrWithinColumn)
corrAcrossColumnVsEpoch.append(corrAcrossColumn)
corrRandomVsEpoch.append(corrRandomPairs)
fig, ax = plt.subplots(4, 1)
ax[0].plot(predictedActiveColVsEpoch)
ax[0].set_title('Correctly Predicted Cols')
ax[0].set_xticks([])
ax[1].plot(np.nanmean(corrWithinColumnVsEpoch, 1))
ax[1].set_title('corr within column')
ax[1].set_xticks([])
ax[2].plot(np.nanmean(corrAcrossColumnVsEpoch, 1))
ax[2].set_title('corr across column')
ax[2].set_xticks([])
ax[3].plot(np.nanmean(corrRandomVsEpoch, 1))
ax[3].set_title('corr random pairs')
ax[3].set_xlabel(' epochs ')
ax[3].set_xticks([])
plt.savefig('CorrelationVsTraining.pdf')
| gpl-3.0 |
BigDataforYou/movie_recommendation_workshop_1 | big_data_4_you_demo_1/venv/lib/python2.7/site-packages/pandas/tests/test_categorical.py | 1 | 177721 | # -*- coding: utf-8 -*-
# pylint: disable=E1101,E1103,W0232
import os
import sys
from datetime import datetime
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
import pandas.compat as compat
import pandas.core.common as com
import pandas.util.testing as tm
from pandas import (Categorical, Index, Series, DataFrame, PeriodIndex,
Timestamp, CategoricalIndex)
from pandas.compat import range, lrange, u, PY3
from pandas.core.config import option_context
# GH 12066
# flake8: noqa
class TestCategorical(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.factor = Categorical.from_array(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'],
ordered=True)
def test_getitem(self):
self.assertEqual(self.factor[0], 'a')
self.assertEqual(self.factor[-1], 'c')
subf = self.factor[[0, 1, 2]]
tm.assert_almost_equal(subf._codes, [0, 1, 1])
subf = self.factor[np.asarray(self.factor) == 'c']
tm.assert_almost_equal(subf._codes, [2, 2, 2])
def test_getitem_listlike(self):
# GH 9469
# properly coerce the input indexers
np.random.seed(1)
c = Categorical(np.random.randint(0, 5, size=150000).astype(np.int8))
result = c.codes[np.array([100000]).astype(np.int64)]
expected = c[np.array([100000]).astype(np.int64)].codes
self.assert_numpy_array_equal(result, expected)
def test_setitem(self):
# int/positional
c = self.factor.copy()
c[0] = 'b'
self.assertEqual(c[0], 'b')
c[-1] = 'a'
self.assertEqual(c[-1], 'a')
# boolean
c = self.factor.copy()
indexer = np.zeros(len(c), dtype='bool')
indexer[0] = True
indexer[-1] = True
c[indexer] = 'c'
expected = Categorical.from_array(['c', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], ordered=True)
self.assert_categorical_equal(c, expected)
def test_setitem_listlike(self):
# GH 9469
# properly coerce the input indexers
np.random.seed(1)
c = Categorical(np.random.randint(0, 5, size=150000).astype(
np.int8)).add_categories([-1000])
indexer = np.array([100000]).astype(np.int64)
c[indexer] = -1000
# we are asserting the code result here
# which maps to the -1000 category
result = c.codes[np.array([100000]).astype(np.int64)]
self.assertEqual(result, np.array([5], dtype='int8'))
def test_constructor_unsortable(self):
# it works!
arr = np.array([1, 2, 3, datetime.now()], dtype='O')
factor = Categorical.from_array(arr, ordered=False)
self.assertFalse(factor.ordered)
if compat.PY3:
self.assertRaises(
TypeError, lambda: Categorical.from_array(arr, ordered=True))
else:
# this however will raise as cannot be sorted (on PY3 or older
# numpies)
if LooseVersion(np.__version__) < "1.10":
self.assertRaises(
TypeError,
lambda: Categorical.from_array(arr, ordered=True))
else:
Categorical.from_array(arr, ordered=True)
def test_is_equal_dtype(self):
# test dtype comparisons between cats
c1 = Categorical(list('aabca'), categories=list('abc'), ordered=False)
c2 = Categorical(list('aabca'), categories=list('cab'), ordered=False)
c3 = Categorical(list('aabca'), categories=list('cab'), ordered=True)
self.assertTrue(c1.is_dtype_equal(c1))
self.assertTrue(c2.is_dtype_equal(c2))
self.assertTrue(c3.is_dtype_equal(c3))
self.assertFalse(c1.is_dtype_equal(c2))
self.assertFalse(c1.is_dtype_equal(c3))
self.assertFalse(c1.is_dtype_equal(Index(list('aabca'))))
self.assertFalse(c1.is_dtype_equal(c1.astype(object)))
self.assertTrue(c1.is_dtype_equal(CategoricalIndex(c1)))
self.assertFalse(c1.is_dtype_equal(
CategoricalIndex(c1, categories=list('cab'))))
self.assertFalse(c1.is_dtype_equal(CategoricalIndex(c1, ordered=True)))
def test_constructor(self):
exp_arr = np.array(["a", "b", "c", "a", "b", "c"])
c1 = Categorical(exp_arr)
self.assert_numpy_array_equal(c1.__array__(), exp_arr)
c2 = Categorical(exp_arr, categories=["a", "b", "c"])
self.assert_numpy_array_equal(c2.__array__(), exp_arr)
c2 = Categorical(exp_arr, categories=["c", "b", "a"])
self.assert_numpy_array_equal(c2.__array__(), exp_arr)
# categories must be unique
def f():
Categorical([1, 2], [1, 2, 2])
self.assertRaises(ValueError, f)
def f():
Categorical(["a", "b"], ["a", "b", "b"])
self.assertRaises(ValueError, f)
def f():
with tm.assert_produces_warning(FutureWarning):
Categorical([1, 2], [1, 2, np.nan, np.nan])
self.assertRaises(ValueError, f)
# The default should be unordered
c1 = Categorical(["a", "b", "c", "a"])
self.assertFalse(c1.ordered)
# Categorical as input
c1 = Categorical(["a", "b", "c", "a"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(c1, categories=["a", "b", "c"])
self.assert_numpy_array_equal(c1.__array__(), c2.__array__())
self.assert_numpy_array_equal(c2.categories, np.array(["a", "b", "c"]))
# Series of dtype category
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(Series(c1))
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(Series(c1))
self.assertTrue(c1.equals(c2))
# Series
c1 = Categorical(["a", "b", "c", "a"])
c2 = Categorical(Series(["a", "b", "c", "a"]))
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(
Series(["a", "b", "c", "a"]), categories=["a", "b", "c", "d"])
self.assertTrue(c1.equals(c2))
# This should result in integer categories, not float!
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
self.assertTrue(com.is_integer_dtype(cat.categories))
# https://github.com/pydata/pandas/issues/3678
cat = pd.Categorical([np.nan, 1, 2, 3])
self.assertTrue(com.is_integer_dtype(cat.categories))
# this should result in floats
cat = pd.Categorical([np.nan, 1, 2., 3])
self.assertTrue(com.is_float_dtype(cat.categories))
cat = pd.Categorical([np.nan, 1., 2., 3.])
self.assertTrue(com.is_float_dtype(cat.categories))
# Deprecating NaNs in categoires (GH #10748)
# preserve int as far as possible by converting to object if NaN is in
# categories
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, 1, 2, 3],
categories=[np.nan, 1, 2, 3])
self.assertTrue(com.is_object_dtype(cat.categories))
# This doesn't work -> this would probably need some kind of "remember
# the original type" feature to try to cast the array interface result
# to...
# vals = np.asarray(cat[cat.notnull()])
# self.assertTrue(com.is_integer_dtype(vals))
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, "a", "b", "c"],
categories=[np.nan, "a", "b", "c"])
self.assertTrue(com.is_object_dtype(cat.categories))
# but don't do it for floats
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, 1., 2., 3.],
categories=[np.nan, 1., 2., 3.])
self.assertTrue(com.is_float_dtype(cat.categories))
# corner cases
cat = pd.Categorical([1])
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
cat = pd.Categorical(["a"])
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == "a")
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
# Scalars should be converted to lists
cat = pd.Categorical(1)
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
cat = pd.Categorical([1], categories=1)
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
# Catch old style constructor useage: two arrays, codes + categories
# We can only catch two cases:
# - when the first is an integer dtype and the second is not
# - when the resulting codes are all -1/NaN
with tm.assert_produces_warning(RuntimeWarning):
c_old = Categorical([0, 1, 2, 0, 1, 2],
categories=["a", "b", "c"]) # noqa
with tm.assert_produces_warning(RuntimeWarning):
c_old = Categorical([0, 1, 2, 0, 1, 2], # noqa
categories=[3, 4, 5])
# the next one are from the old docs, but unfortunately these don't
# trigger :-(
with tm.assert_produces_warning(None):
c_old2 = Categorical([0, 1, 2, 0, 1, 2], [1, 2, 3]) # noqa
cat = Categorical([1, 2], categories=[1, 2, 3])
# this is a legitimate constructor
with tm.assert_produces_warning(None):
c = Categorical(np.array([], dtype='int64'), # noqa
categories=[3, 2, 1], ordered=True)
def test_constructor_with_index(self):
ci = CategoricalIndex(list('aabbca'), categories=list('cab'))
self.assertTrue(ci.values.equals(Categorical(ci)))
ci = CategoricalIndex(list('aabbca'), categories=list('cab'))
self.assertTrue(ci.values.equals(Categorical(
ci.astype(object), categories=ci.categories)))
def test_constructor_with_generator(self):
# This was raising an Error in isnull(single_val).any() because isnull
# returned a scalar for a generator
xrange = range
exp = Categorical([0, 1, 2])
cat = Categorical((x for x in [0, 1, 2]))
self.assertTrue(cat.equals(exp))
cat = Categorical(xrange(3))
self.assertTrue(cat.equals(exp))
# This uses xrange internally
from pandas.core.index import MultiIndex
MultiIndex.from_product([range(5), ['a', 'b', 'c']])
# check that categories accept generators and sequences
cat = pd.Categorical([0, 1, 2], categories=(x for x in [0, 1, 2]))
self.assertTrue(cat.equals(exp))
cat = pd.Categorical([0, 1, 2], categories=xrange(3))
self.assertTrue(cat.equals(exp))
def test_constructor_with_datetimelike(self):
# 12077
# constructor wwth a datetimelike and NaT
for dtl in [pd.date_range('1995-01-01 00:00:00',
periods=5, freq='s'),
pd.date_range('1995-01-01 00:00:00',
periods=5, freq='s', tz='US/Eastern'),
pd.timedelta_range('1 day', periods=5, freq='s')]:
s = Series(dtl)
c = Categorical(s)
expected = type(dtl)(s)
expected.freq = None
tm.assert_index_equal(c.categories, expected)
self.assert_numpy_array_equal(c.codes, np.arange(5, dtype='int8'))
# with NaT
s2 = s.copy()
s2.iloc[-1] = pd.NaT
c = Categorical(s2)
expected = type(dtl)(s2.dropna())
expected.freq = None
tm.assert_index_equal(c.categories, expected)
self.assert_numpy_array_equal(c.codes,
np.concatenate([np.arange(4, dtype='int8'),
[-1]]))
result = repr(c)
self.assertTrue('NaT' in result)
def test_constructor_from_index_series_datetimetz(self):
idx = pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern')
result = pd.Categorical.from_array(idx)
tm.assert_index_equal(result.categories, idx)
result = pd.Categorical.from_array(pd.Series(idx))
tm.assert_index_equal(result.categories, idx)
def test_constructor_from_index_series_timedelta(self):
idx = pd.timedelta_range('1 days', freq='D', periods=3)
result = pd.Categorical.from_array(idx)
tm.assert_index_equal(result.categories, idx)
result = pd.Categorical.from_array(pd.Series(idx))
tm.assert_index_equal(result.categories, idx)
def test_constructor_from_index_series_period(self):
idx = pd.period_range('2015-01-01', freq='D', periods=3)
result = pd.Categorical.from_array(idx)
tm.assert_index_equal(result.categories, idx)
result = pd.Categorical.from_array(pd.Series(idx))
tm.assert_index_equal(result.categories, idx)
def test_from_codes(self):
# too few categories
def f():
Categorical.from_codes([1, 2], [1, 2])
self.assertRaises(ValueError, f)
# no int codes
def f():
Categorical.from_codes(["a"], [1, 2])
self.assertRaises(ValueError, f)
# no unique categories
def f():
Categorical.from_codes([0, 1, 2], ["a", "a", "b"])
self.assertRaises(ValueError, f)
# too negative
def f():
Categorical.from_codes([-2, 1, 2], ["a", "b", "c"])
self.assertRaises(ValueError, f)
exp = Categorical(["a", "b", "c"], ordered=False)
res = Categorical.from_codes([0, 1, 2], ["a", "b", "c"])
self.assertTrue(exp.equals(res))
# Not available in earlier numpy versions
if hasattr(np.random, "choice"):
codes = np.random.choice([0, 1], 5, p=[0.9, 0.1])
pd.Categorical.from_codes(codes, categories=["train", "test"])
def test_comparisons(self):
result = self.factor[self.factor == 'a']
expected = self.factor[np.asarray(self.factor) == 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor != 'a']
expected = self.factor[np.asarray(self.factor) != 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor < 'c']
expected = self.factor[np.asarray(self.factor) < 'c']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor > 'a']
expected = self.factor[np.asarray(self.factor) > 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor >= 'b']
expected = self.factor[np.asarray(self.factor) >= 'b']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor <= 'b']
expected = self.factor[np.asarray(self.factor) <= 'b']
self.assertTrue(result.equals(expected))
n = len(self.factor)
other = self.factor[np.random.permutation(n)]
result = self.factor == other
expected = np.asarray(self.factor) == np.asarray(other)
self.assert_numpy_array_equal(result, expected)
result = self.factor == 'd'
expected = np.repeat(False, len(self.factor))
self.assert_numpy_array_equal(result, expected)
# comparisons with categoricals
cat_rev = pd.Categorical(["a", "b", "c"], categories=["c", "b", "a"],
ordered=True)
cat_rev_base = pd.Categorical(
["b", "b", "b"], categories=["c", "b", "a"], ordered=True)
cat = pd.Categorical(["a", "b", "c"], ordered=True)
cat_base = pd.Categorical(["b", "b", "b"], categories=cat.categories,
ordered=True)
# comparisons need to take categories ordering into account
res_rev = cat_rev > cat_rev_base
exp_rev = np.array([True, False, False])
self.assert_numpy_array_equal(res_rev, exp_rev)
res_rev = cat_rev < cat_rev_base
exp_rev = np.array([False, False, True])
self.assert_numpy_array_equal(res_rev, exp_rev)
res = cat > cat_base
exp = np.array([False, False, True])
self.assert_numpy_array_equal(res, exp)
# Only categories with same categories can be compared
def f():
cat > cat_rev
self.assertRaises(TypeError, f)
cat_rev_base2 = pd.Categorical(
["b", "b", "b"], categories=["c", "b", "a", "d"])
def f():
cat_rev > cat_rev_base2
self.assertRaises(TypeError, f)
# Only categories with same ordering information can be compared
cat_unorderd = cat.set_ordered(False)
self.assertFalse((cat > cat).any())
def f():
cat > cat_unorderd
self.assertRaises(TypeError, f)
# comparison (in both directions) with Series will raise
s = Series(["b", "b", "b"])
self.assertRaises(TypeError, lambda: cat > s)
self.assertRaises(TypeError, lambda: cat_rev > s)
self.assertRaises(TypeError, lambda: s < cat)
self.assertRaises(TypeError, lambda: s < cat_rev)
# comparison with numpy.array will raise in both direction, but only on
# newer numpy versions
a = np.array(["b", "b", "b"])
self.assertRaises(TypeError, lambda: cat > a)
self.assertRaises(TypeError, lambda: cat_rev > a)
# The following work via '__array_priority__ = 1000'
# works only on numpy >= 1.7.1
if LooseVersion(np.__version__) > "1.7.1":
self.assertRaises(TypeError, lambda: a < cat)
self.assertRaises(TypeError, lambda: a < cat_rev)
# Make sure that unequal comparison take the categories order in
# account
cat_rev = pd.Categorical(
list("abc"), categories=list("cba"), ordered=True)
exp = np.array([True, False, False])
res = cat_rev > "b"
self.assert_numpy_array_equal(res, exp)
def test_argsort(self):
c = Categorical([5, 3, 1, 4, 2], ordered=True)
expected = np.array([2, 4, 1, 3, 0])
tm.assert_numpy_array_equal(c.argsort(
ascending=True), expected)
expected = expected[::-1]
tm.assert_numpy_array_equal(c.argsort(
ascending=False), expected)
def test_numpy_argsort(self):
c = Categorical([5, 3, 1, 4, 2], ordered=True)
expected = np.array([2, 4, 1, 3, 0])
tm.assert_numpy_array_equal(np.argsort(c), expected)
msg = "the 'kind' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, np.argsort,
c, kind='mergesort')
msg = "the 'axis' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, np.argsort,
c, axis=0)
msg = "the 'order' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, np.argsort,
c, order='C')
def test_na_flags_int_categories(self):
# #1457
categories = lrange(10)
labels = np.random.randint(0, 10, 20)
labels[::5] = -1
cat = Categorical(labels, categories, fastpath=True)
repr(cat)
self.assert_numpy_array_equal(com.isnull(cat), labels == -1)
def test_categories_none(self):
factor = Categorical(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], ordered=True)
self.assertTrue(factor.equals(self.factor))
def test_describe(self):
# string type
desc = self.factor.describe()
expected = DataFrame({'counts': [3, 2, 3],
'freqs': [3 / 8., 2 / 8., 3 / 8.]},
index=pd.CategoricalIndex(['a', 'b', 'c'],
name='categories'))
tm.assert_frame_equal(desc, expected)
# check unused categories
cat = self.factor.copy()
cat.set_categories(["a", "b", "c", "d"], inplace=True)
desc = cat.describe()
expected = DataFrame({'counts': [3, 2, 3, 0],
'freqs': [3 / 8., 2 / 8., 3 / 8., 0]},
index=pd.CategoricalIndex(['a', 'b', 'c', 'd'],
name='categories'))
tm.assert_frame_equal(desc, expected)
# check an integer one
desc = Categorical([1, 2, 3, 1, 2, 3, 3, 2, 1, 1, 1]).describe()
expected = DataFrame({'counts': [5, 3, 3],
'freqs': [5 / 11., 3 / 11., 3 / 11.]},
index=pd.CategoricalIndex([1, 2, 3],
name='categories'))
tm.assert_frame_equal(desc, expected)
# https://github.com/pydata/pandas/issues/3678
# describe should work with NaN
cat = pd.Categorical([np.nan, 1, 2, 2])
desc = cat.describe()
expected = DataFrame({'counts': [1, 2, 1],
'freqs': [1 / 4., 2 / 4., 1 / 4.]},
index=pd.CategoricalIndex([1, 2, np.nan],
categories=[1, 2],
name='categories'))
tm.assert_frame_equal(desc, expected)
# NA as a category
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical(["a", "c", "c", np.nan],
categories=["b", "a", "c", np.nan])
result = cat.describe()
expected = DataFrame([[0, 0], [1, 0.25], [2, 0.5], [1, 0.25]],
columns=['counts', 'freqs'],
index=pd.CategoricalIndex(['b', 'a', 'c', np.nan],
name='categories'))
tm.assert_frame_equal(result, expected)
# NA as an unused category
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical(["a", "c", "c"],
categories=["b", "a", "c", np.nan])
result = cat.describe()
exp_idx = pd.CategoricalIndex(
['b', 'a', 'c', np.nan], name='categories')
expected = DataFrame([[0, 0], [1, 1 / 3.], [2, 2 / 3.], [0, 0]],
columns=['counts', 'freqs'], index=exp_idx)
tm.assert_frame_equal(result, expected)
def test_print(self):
expected = ["[a, b, b, a, a, c, c, c]",
"Categories (3, object): [a < b < c]"]
expected = "\n".join(expected)
actual = repr(self.factor)
self.assertEqual(actual, expected)
def test_big_print(self):
factor = Categorical([0, 1, 2, 0, 1, 2] * 100, ['a', 'b', 'c'],
name='cat', fastpath=True)
expected = ["[a, b, c, a, b, ..., b, c, a, b, c]", "Length: 600",
"Categories (3, object): [a, b, c]"]
expected = "\n".join(expected)
actual = repr(factor)
self.assertEqual(actual, expected)
def test_empty_print(self):
factor = Categorical([], ["a", "b", "c"])
expected = ("[], Categories (3, object): [a, b, c]")
# hack because array_repr changed in numpy > 1.6.x
actual = repr(factor)
self.assertEqual(actual, expected)
self.assertEqual(expected, actual)
factor = Categorical([], ["a", "b", "c"], ordered=True)
expected = ("[], Categories (3, object): [a < b < c]")
actual = repr(factor)
self.assertEqual(expected, actual)
factor = Categorical([], [])
expected = ("[], Categories (0, object): []")
self.assertEqual(expected, repr(factor))
def test_print_none_width(self):
# GH10087
a = pd.Series(pd.Categorical([1, 2, 3, 4]))
exp = u("0 1\n1 2\n2 3\n3 4\n" +
"dtype: category\nCategories (4, int64): [1, 2, 3, 4]")
with option_context("display.width", None):
self.assertEqual(exp, repr(a))
def test_unicode_print(self):
if PY3:
_rep = repr
else:
_rep = unicode # noqa
c = pd.Categorical(['aaaaa', 'bb', 'cccc'] * 20)
expected = u"""\
[aaaaa, bb, cccc, aaaaa, bb, ..., bb, cccc, aaaaa, bb, cccc]
Length: 60
Categories (3, object): [aaaaa, bb, cccc]"""
self.assertEqual(_rep(c), expected)
c = pd.Categorical([u'ああああ', u'いいいいい', u'ううううううう']
* 20)
expected = u"""\
[ああああ, いいいいい, ううううううう, ああああ, いいいいい, ..., いいいいい, ううううううう, ああああ, いいいいい, ううううううう]
Length: 60
Categories (3, object): [ああああ, いいいいい, ううううううう]""" # noqa
self.assertEqual(_rep(c), expected)
# unicode option should not affect to Categorical, as it doesn't care
# the repr width
with option_context('display.unicode.east_asian_width', True):
c = pd.Categorical([u'ああああ', u'いいいいい', u'ううううううう']
* 20)
expected = u"""[ああああ, いいいいい, ううううううう, ああああ, いいいいい, ..., いいいいい, ううううううう, ああああ, いいいいい, ううううううう]
Length: 60
Categories (3, object): [ああああ, いいいいい, ううううううう]""" # noqa
self.assertEqual(_rep(c), expected)
def test_periodindex(self):
idx1 = PeriodIndex(['2014-01', '2014-01', '2014-02', '2014-02',
'2014-03', '2014-03'], freq='M')
cat1 = Categorical.from_array(idx1)
str(cat1)
exp_arr = np.array([0, 0, 1, 1, 2, 2], dtype='int64')
exp_idx = PeriodIndex(['2014-01', '2014-02', '2014-03'], freq='M')
self.assert_numpy_array_equal(cat1._codes, exp_arr)
self.assertTrue(cat1.categories.equals(exp_idx))
idx2 = PeriodIndex(['2014-03', '2014-03', '2014-02', '2014-01',
'2014-03', '2014-01'], freq='M')
cat2 = Categorical.from_array(idx2, ordered=True)
str(cat2)
exp_arr = np.array([2, 2, 1, 0, 2, 0], dtype='int64')
exp_idx2 = PeriodIndex(['2014-01', '2014-02', '2014-03'], freq='M')
self.assert_numpy_array_equal(cat2._codes, exp_arr)
self.assertTrue(cat2.categories.equals(exp_idx2))
idx3 = PeriodIndex(['2013-12', '2013-11', '2013-10', '2013-09',
'2013-08', '2013-07', '2013-05'], freq='M')
cat3 = Categorical.from_array(idx3, ordered=True)
exp_arr = np.array([6, 5, 4, 3, 2, 1, 0], dtype='int64')
exp_idx = PeriodIndex(['2013-05', '2013-07', '2013-08', '2013-09',
'2013-10', '2013-11', '2013-12'], freq='M')
self.assert_numpy_array_equal(cat3._codes, exp_arr)
self.assertTrue(cat3.categories.equals(exp_idx))
def test_categories_assigments(self):
s = pd.Categorical(["a", "b", "c", "a"])
exp = np.array([1, 2, 3, 1])
s.categories = [1, 2, 3]
self.assert_numpy_array_equal(s.__array__(), exp)
self.assert_numpy_array_equal(s.categories, np.array([1, 2, 3]))
# lengthen
def f():
s.categories = [1, 2, 3, 4]
self.assertRaises(ValueError, f)
# shorten
def f():
s.categories = [1, 2]
self.assertRaises(ValueError, f)
def test_construction_with_ordered(self):
# GH 9347, 9190
cat = Categorical([0, 1, 2])
self.assertFalse(cat.ordered)
cat = Categorical([0, 1, 2], ordered=False)
self.assertFalse(cat.ordered)
cat = Categorical([0, 1, 2], ordered=True)
self.assertTrue(cat.ordered)
def test_ordered_api(self):
# GH 9347
cat1 = pd.Categorical(["a", "c", "b"], ordered=False)
self.assertTrue(cat1.categories.equals(Index(['a', 'b', 'c'])))
self.assertFalse(cat1.ordered)
cat2 = pd.Categorical(["a", "c", "b"], categories=['b', 'c', 'a'],
ordered=False)
self.assertTrue(cat2.categories.equals(Index(['b', 'c', 'a'])))
self.assertFalse(cat2.ordered)
cat3 = pd.Categorical(["a", "c", "b"], ordered=True)
self.assertTrue(cat3.categories.equals(Index(['a', 'b', 'c'])))
self.assertTrue(cat3.ordered)
cat4 = pd.Categorical(["a", "c", "b"], categories=['b', 'c', 'a'],
ordered=True)
self.assertTrue(cat4.categories.equals(Index(['b', 'c', 'a'])))
self.assertTrue(cat4.ordered)
def test_set_ordered(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
cat2 = cat.as_unordered()
self.assertFalse(cat2.ordered)
cat2 = cat.as_ordered()
self.assertTrue(cat2.ordered)
cat2.as_unordered(inplace=True)
self.assertFalse(cat2.ordered)
cat2.as_ordered(inplace=True)
self.assertTrue(cat2.ordered)
self.assertTrue(cat2.set_ordered(True).ordered)
self.assertFalse(cat2.set_ordered(False).ordered)
cat2.set_ordered(True, inplace=True)
self.assertTrue(cat2.ordered)
cat2.set_ordered(False, inplace=True)
self.assertFalse(cat2.ordered)
# deperecated in v0.16.0
with tm.assert_produces_warning(FutureWarning):
cat.ordered = False
self.assertFalse(cat.ordered)
with tm.assert_produces_warning(FutureWarning):
cat.ordered = True
self.assertTrue(cat.ordered)
def test_set_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
exp_categories = np.array(["c", "b", "a"])
exp_values = np.array(["a", "b", "c", "a"])
res = cat.set_categories(["c", "b", "a"], inplace=True)
self.assert_numpy_array_equal(cat.categories, exp_categories)
self.assert_numpy_array_equal(cat.__array__(), exp_values)
self.assertIsNone(res)
res = cat.set_categories(["a", "b", "c"])
# cat must be the same as before
self.assert_numpy_array_equal(cat.categories, exp_categories)
self.assert_numpy_array_equal(cat.__array__(), exp_values)
# only res is changed
exp_categories_back = np.array(["a", "b", "c"])
self.assert_numpy_array_equal(res.categories, exp_categories_back)
self.assert_numpy_array_equal(res.__array__(), exp_values)
# not all "old" included in "new" -> all not included ones are now
# np.nan
cat = Categorical(["a", "b", "c", "a"], ordered=True)
res = cat.set_categories(["a"])
self.assert_numpy_array_equal(res.codes, np.array([0, -1, -1, 0]))
# still not all "old" in "new"
res = cat.set_categories(["a", "b", "d"])
self.assert_numpy_array_equal(res.codes, np.array([0, 1, -1, 0]))
self.assert_numpy_array_equal(res.categories,
np.array(["a", "b", "d"]))
# all "old" included in "new"
cat = cat.set_categories(["a", "b", "c", "d"])
exp_categories = np.array(["a", "b", "c", "d"])
self.assert_numpy_array_equal(cat.categories, exp_categories)
# internals...
c = Categorical([1, 2, 3, 4, 1], categories=[1, 2, 3, 4], ordered=True)
self.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 3, 0]))
self.assert_numpy_array_equal(c.categories, np.array([1, 2, 3, 4]))
self.assert_numpy_array_equal(c.get_values(),
np.array([1, 2, 3, 4, 1]))
c = c.set_categories(
[4, 3, 2, 1
]) # all "pointers" to '4' must be changed from 3 to 0,...
self.assert_numpy_array_equal(c._codes, np.array([3, 2, 1, 0, 3])
) # positions are changed
self.assert_numpy_array_equal(c.categories, np.array([4, 3, 2, 1])
) # categories are now in new order
self.assert_numpy_array_equal(c.get_values(), np.array([1, 2, 3, 4, 1])
) # output is the same
self.assertTrue(c.min(), 4)
self.assertTrue(c.max(), 1)
# set_categories should set the ordering if specified
c2 = c.set_categories([4, 3, 2, 1], ordered=False)
self.assertFalse(c2.ordered)
self.assert_numpy_array_equal(c.get_values(), c2.get_values())
# set_categories should pass thru the ordering
c2 = c.set_ordered(False).set_categories([4, 3, 2, 1])
self.assertFalse(c2.ordered)
self.assert_numpy_array_equal(c.get_values(), c2.get_values())
def test_rename_categories(self):
cat = pd.Categorical(["a", "b", "c", "a"])
# inplace=False: the old one must not be changed
res = cat.rename_categories([1, 2, 3])
self.assert_numpy_array_equal(res.__array__(), np.array([1, 2, 3, 1]))
self.assert_numpy_array_equal(res.categories, np.array([1, 2, 3]))
self.assert_numpy_array_equal(cat.__array__(),
np.array(["a", "b", "c", "a"]))
self.assert_numpy_array_equal(cat.categories,
np.array(["a", "b", "c"]))
res = cat.rename_categories([1, 2, 3], inplace=True)
# and now inplace
self.assertIsNone(res)
self.assert_numpy_array_equal(cat.__array__(), np.array([1, 2, 3, 1]))
self.assert_numpy_array_equal(cat.categories, np.array([1, 2, 3]))
# lengthen
def f():
cat.rename_categories([1, 2, 3, 4])
self.assertRaises(ValueError, f)
# shorten
def f():
cat.rename_categories([1, 2])
self.assertRaises(ValueError, f)
def test_reorder_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(["a", "b", "c", "a"], categories=["c", "b", "a"],
ordered=True)
# first inplace == False
res = cat.reorder_categories(["c", "b", "a"])
# cat must be the same as before
self.assert_categorical_equal(cat, old)
# only res is changed
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.reorder_categories(["c", "b", "a"], inplace=True)
self.assertIsNone(res)
self.assert_categorical_equal(cat, new)
# not all "old" included in "new"
cat = Categorical(["a", "b", "c", "a"], ordered=True)
def f():
cat.reorder_categories(["a"])
self.assertRaises(ValueError, f)
# still not all "old" in "new"
def f():
cat.reorder_categories(["a", "b", "d"])
self.assertRaises(ValueError, f)
# all "old" included in "new", but too long
def f():
cat.reorder_categories(["a", "b", "c", "d"])
self.assertRaises(ValueError, f)
def test_add_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(["a", "b", "c", "a"],
categories=["a", "b", "c", "d"], ordered=True)
# first inplace == False
res = cat.add_categories("d")
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
res = cat.add_categories(["d"])
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.add_categories("d", inplace=True)
self.assert_categorical_equal(cat, new)
self.assertIsNone(res)
# new is in old categories
def f():
cat.add_categories(["d"])
self.assertRaises(ValueError, f)
# GH 9927
cat = Categorical(list("abc"), ordered=True)
expected = Categorical(
list("abc"), categories=list("abcde"), ordered=True)
# test with Series, np.array, index, list
res = cat.add_categories(Series(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(np.array(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(Index(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(["d", "e"])
self.assert_categorical_equal(res, expected)
def test_remove_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(["a", "b", np.nan, "a"], categories=["a", "b"],
ordered=True)
# first inplace == False
res = cat.remove_categories("c")
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
res = cat.remove_categories(["c"])
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.remove_categories("c", inplace=True)
self.assert_categorical_equal(cat, new)
self.assertIsNone(res)
# removal is not in categories
def f():
cat.remove_categories(["c"])
self.assertRaises(ValueError, f)
def test_remove_unused_categories(self):
c = Categorical(["a", "b", "c", "d", "a"],
categories=["a", "b", "c", "d", "e"])
exp_categories_all = np.array(["a", "b", "c", "d", "e"])
exp_categories_dropped = np.array(["a", "b", "c", "d"])
self.assert_numpy_array_equal(c.categories, exp_categories_all)
res = c.remove_unused_categories()
self.assert_numpy_array_equal(res.categories, exp_categories_dropped)
self.assert_numpy_array_equal(c.categories, exp_categories_all)
res = c.remove_unused_categories(inplace=True)
self.assert_numpy_array_equal(c.categories, exp_categories_dropped)
self.assertIsNone(res)
# with NaN values (GH11599)
c = Categorical(["a", "b", "c", np.nan],
categories=["a", "b", "c", "d", "e"])
res = c.remove_unused_categories()
self.assert_numpy_array_equal(res.categories,
np.array(["a", "b", "c"]))
self.assert_numpy_array_equal(c.categories, exp_categories_all)
val = ['F', np.nan, 'D', 'B', 'D', 'F', np.nan]
cat = pd.Categorical(values=val, categories=list('ABCDEFG'))
out = cat.remove_unused_categories()
self.assert_numpy_array_equal(out.categories, ['B', 'D', 'F'])
self.assert_numpy_array_equal(out.codes, [2, -1, 1, 0, 1, 2, -1])
self.assertEqual(out.get_values().tolist(), val)
alpha = list('abcdefghijklmnopqrstuvwxyz')
val = np.random.choice(alpha[::2], 10000).astype('object')
val[np.random.choice(len(val), 100)] = np.nan
cat = pd.Categorical(values=val, categories=alpha)
out = cat.remove_unused_categories()
self.assertEqual(out.get_values().tolist(), val.tolist())
def test_nan_handling(self):
# Nans are represented as -1 in codes
c = Categorical(["a", "b", np.nan, "a"])
self.assert_numpy_array_equal(c.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0]))
c[1] = np.nan
self.assert_numpy_array_equal(c.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(c._codes, np.array([0, -1, -1, 0]))
# If categories have nan included, the code should point to that
# instead
with tm.assert_produces_warning(FutureWarning):
c = Categorical(["a", "b", np.nan, "a"],
categories=["a", "b", np.nan])
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 0]))
c[1] = np.nan
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 2, 2, 0]))
# Changing categories should also make the replaced category np.nan
c = Categorical(["a", "b", "c", "a"])
with tm.assert_produces_warning(FutureWarning):
c.categories = ["a", "b", np.nan] # noqa
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 0]))
# Adding nan to categories should make assigned nan point to the
# category!
c = Categorical(["a", "b", np.nan, "a"])
self.assert_numpy_array_equal(c.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0]))
with tm.assert_produces_warning(FutureWarning):
c.set_categories(["a", "b", np.nan], rename=True, inplace=True)
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0]))
c[1] = np.nan
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 2, -1, 0]))
# Remove null categories (GH 10156)
cases = [
([1.0, 2.0, np.nan], [1.0, 2.0]),
(['a', 'b', None], ['a', 'b']),
([pd.Timestamp('2012-05-01'), pd.NaT],
[pd.Timestamp('2012-05-01')])
]
null_values = [np.nan, None, pd.NaT]
for with_null, without in cases:
with tm.assert_produces_warning(FutureWarning):
base = Categorical([], with_null)
expected = Categorical([], without)
for nullval in null_values:
result = base.remove_categories(nullval)
self.assert_categorical_equal(result, expected)
# Different null values are indistinguishable
for i, j in [(0, 1), (0, 2), (1, 2)]:
nulls = [null_values[i], null_values[j]]
def f():
with tm.assert_produces_warning(FutureWarning):
Categorical([], categories=nulls)
self.assertRaises(ValueError, f)
def test_isnull(self):
exp = np.array([False, False, True])
c = Categorical(["a", "b", np.nan])
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
with tm.assert_produces_warning(FutureWarning):
c = Categorical(["a", "b", np.nan], categories=["a", "b", np.nan])
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
# test both nan in categories and as -1
exp = np.array([True, False, True])
c = Categorical(["a", "b", np.nan])
with tm.assert_produces_warning(FutureWarning):
c.set_categories(["a", "b", np.nan], rename=True, inplace=True)
c[0] = np.nan
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
def test_codes_immutable(self):
# Codes should be read only
c = Categorical(["a", "b", "c", "a", np.nan])
exp = np.array([0, 1, 2, 0, -1], dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
# Assignments to codes should raise
def f():
c.codes = np.array([0, 1, 2, 0, 1], dtype='int8')
self.assertRaises(ValueError, f)
# changes in the codes array should raise
# np 1.6.1 raises RuntimeError rather than ValueError
codes = c.codes
def f():
codes[4] = 1
self.assertRaises(ValueError, f)
# But even after getting the codes, the original array should still be
# writeable!
c[4] = "a"
exp = np.array([0, 1, 2, 0, 0], dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
c._codes[4] = 2
exp = np.array([0, 1, 2, 0, 2], dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
def test_min_max(self):
# unordered cats have no min/max
cat = Categorical(["a", "b", "c", "d"], ordered=False)
self.assertRaises(TypeError, lambda: cat.min())
self.assertRaises(TypeError, lambda: cat.max())
cat = Categorical(["a", "b", "c", "d"], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "a")
self.assertEqual(_max, "d")
cat = Categorical(["a", "b", "c", "d"],
categories=['d', 'c', 'b', 'a'], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "d")
self.assertEqual(_max, "a")
cat = Categorical([np.nan, "b", "c", np.nan],
categories=['d', 'c', 'b', 'a'], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, "b")
_min = cat.min(numeric_only=True)
self.assertEqual(_min, "c")
_max = cat.max(numeric_only=True)
self.assertEqual(_max, "b")
cat = Categorical([np.nan, 1, 2, np.nan], categories=[5, 4, 3, 2, 1],
ordered=True)
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, 1)
_min = cat.min(numeric_only=True)
self.assertEqual(_min, 2)
_max = cat.max(numeric_only=True)
self.assertEqual(_max, 1)
def test_unique(self):
# categories are reordered based on value when ordered=False
cat = Categorical(["a", "b"])
exp = np.asarray(["a", "b"])
res = cat.unique()
self.assert_numpy_array_equal(res, exp)
cat = Categorical(["a", "b", "a", "a"], categories=["a", "b", "c"])
res = cat.unique()
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, Categorical(exp))
cat = Categorical(["c", "a", "b", "a", "a"],
categories=["a", "b", "c"])
exp = np.asarray(["c", "a", "b"])
res = cat.unique()
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, Categorical(
exp, categories=['c', 'a', 'b']))
# nan must be removed
cat = Categorical(["b", np.nan, "b", np.nan, "a"],
categories=["a", "b", "c"])
res = cat.unique()
exp = np.asarray(["b", np.nan, "a"], dtype=object)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, Categorical(
["b", np.nan, "a"], categories=["b", "a"]))
def test_unique_ordered(self):
# keep categories order when ordered=True
cat = Categorical(['b', 'a', 'b'], categories=['a', 'b'], ordered=True)
res = cat.unique()
exp = np.asarray(['b', 'a'])
exp_cat = Categorical(exp, categories=['a', 'b'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(['c', 'b', 'a', 'a'], categories=['a', 'b', 'c'],
ordered=True)
res = cat.unique()
exp = np.asarray(['c', 'b', 'a'])
exp_cat = Categorical(exp, categories=['a', 'b', 'c'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(['b', 'a', 'a'], categories=['a', 'b', 'c'],
ordered=True)
res = cat.unique()
exp = np.asarray(['b', 'a'])
exp_cat = Categorical(exp, categories=['a', 'b'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(['b', 'b', np.nan, 'a'], categories=['a', 'b', 'c'],
ordered=True)
res = cat.unique()
exp = np.asarray(['b', np.nan, 'a'], dtype=object)
exp_cat = Categorical(exp, categories=['a', 'b'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
def test_mode(self):
s = Categorical([1, 1, 2, 4, 5, 5, 5], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([5], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([1, 1, 1, 4, 5, 5, 5], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([5, 1], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([1, 2, 3, 4, 5], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
# NaN should not become the mode!
s = Categorical([np.nan, np.nan, np.nan, 4, 5],
categories=[5, 4, 3, 2, 1], ordered=True)
res = s.mode()
exp = Categorical([], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([np.nan, np.nan, np.nan, 4, 5, 4],
categories=[5, 4, 3, 2, 1], ordered=True)
res = s.mode()
exp = Categorical([4], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([np.nan, np.nan, 4, 5, 4], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([4], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
def test_sort_values(self):
# unordered cats are sortable
cat = Categorical(["a", "b", "b", "a"], ordered=False)
cat.sort_values()
cat = Categorical(["a", "c", "b", "d"], ordered=True)
# sort_values
res = cat.sort_values()
exp = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp)
cat = Categorical(["a", "c", "b", "d"],
categories=["a", "b", "c", "d"], ordered=True)
res = cat.sort_values()
exp = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp)
res = cat.sort_values(ascending=False)
exp = np.array(["d", "c", "b", "a"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp)
# sort (inplace order)
cat1 = cat.copy()
cat1.sort_values(inplace=True)
exp = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(cat1.__array__(), exp)
# reverse
cat = Categorical(["a", "c", "c", "b", "d"], ordered=True)
res = cat.sort_values(ascending=False)
exp_val = np.array(["d", "c", "c", "b", "a"], dtype=object)
exp_categories = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp_val)
self.assert_numpy_array_equal(res.categories, exp_categories)
def test_sort_values_na_position(self):
# see gh-12882
cat = Categorical([5, 2, np.nan, 2, np.nan], ordered=True)
exp_categories = np.array([2, 5])
exp = np.array([2.0, 2.0, 5.0, np.nan, np.nan])
res = cat.sort_values() # default arguments
self.assert_numpy_array_equal(res.__array__(), exp)
self.assert_numpy_array_equal(res.categories, exp_categories)
exp = np.array([np.nan, np.nan, 2.0, 2.0, 5.0])
res = cat.sort_values(ascending=True, na_position='first')
self.assert_numpy_array_equal(res.__array__(), exp)
self.assert_numpy_array_equal(res.categories, exp_categories)
exp = np.array([np.nan, np.nan, 5.0, 2.0, 2.0])
res = cat.sort_values(ascending=False, na_position='first')
self.assert_numpy_array_equal(res.__array__(), exp)
self.assert_numpy_array_equal(res.categories, exp_categories)
exp = np.array([2.0, 2.0, 5.0, np.nan, np.nan])
res = cat.sort_values(ascending=True, na_position='last')
self.assert_numpy_array_equal(res.__array__(), exp)
self.assert_numpy_array_equal(res.categories, exp_categories)
exp = np.array([5.0, 2.0, 2.0, np.nan, np.nan])
res = cat.sort_values(ascending=False, na_position='last')
self.assert_numpy_array_equal(res.__array__(), exp)
self.assert_numpy_array_equal(res.categories, exp_categories)
cat = Categorical(["a", "c", "b", "d", np.nan], ordered=True)
res = cat.sort_values(ascending=False, na_position='last')
exp_val = np.array(["d", "c", "b", "a", np.nan], dtype=object)
exp_categories = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp_val)
self.assert_numpy_array_equal(res.categories, exp_categories)
cat = Categorical(["a", "c", "b", "d", np.nan], ordered=True)
res = cat.sort_values(ascending=False, na_position='first')
exp_val = np.array([np.nan, "d", "c", "b", "a"], dtype=object)
exp_categories = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp_val)
self.assert_numpy_array_equal(res.categories, exp_categories)
def test_slicing_directly(self):
cat = Categorical(["a", "b", "c", "d", "a", "b", "c"])
sliced = cat[3]
tm.assert_equal(sliced, "d")
sliced = cat[3:5]
expected = Categorical(["d", "a"], categories=['a', 'b', 'c', 'd'])
self.assert_numpy_array_equal(sliced._codes, expected._codes)
tm.assert_index_equal(sliced.categories, expected.categories)
def test_set_item_nan(self):
cat = pd.Categorical([1, 2, 3])
exp = pd.Categorical([1, np.nan, 3], categories=[1, 2, 3])
cat[1] = np.nan
self.assertTrue(cat.equals(exp))
# if nan in categories, the proper code should be set!
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1] = np.nan
exp = np.array([0, 3, 2, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1:3] = np.nan
exp = np.array([0, 3, 3, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1:3] = [np.nan, 1]
exp = np.array([0, 3, 0, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1:3] = [np.nan, np.nan]
exp = np.array([0, 3, 3, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, np.nan, 3], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[pd.isnull(cat)] = np.nan
exp = np.array([0, 1, 3, 2])
self.assert_numpy_array_equal(cat.codes, exp)
def test_shift(self):
# GH 9416
cat = pd.Categorical(['a', 'b', 'c', 'd', 'a'])
# shift forward
sp1 = cat.shift(1)
xp1 = pd.Categorical([np.nan, 'a', 'b', 'c', 'd'])
self.assert_categorical_equal(sp1, xp1)
self.assert_categorical_equal(cat[:-1], sp1[1:])
# shift back
sn2 = cat.shift(-2)
xp2 = pd.Categorical(['c', 'd', 'a', np.nan, np.nan],
categories=['a', 'b', 'c', 'd'])
self.assert_categorical_equal(sn2, xp2)
self.assert_categorical_equal(cat[2:], sn2[:-2])
# shift by zero
self.assert_categorical_equal(cat, cat.shift(0))
def test_nbytes(self):
cat = pd.Categorical([1, 2, 3])
exp = cat._codes.nbytes + cat._categories.values.nbytes
self.assertEqual(cat.nbytes, exp)
def test_memory_usage(self):
cat = pd.Categorical([1, 2, 3])
self.assertEqual(cat.nbytes, cat.memory_usage())
self.assertEqual(cat.nbytes, cat.memory_usage(deep=True))
cat = pd.Categorical(['foo', 'foo', 'bar'])
self.assertEqual(cat.nbytes, cat.memory_usage())
self.assertTrue(cat.memory_usage(deep=True) > cat.nbytes)
# sys.getsizeof will call the .memory_usage with
# deep=True, and add on some GC overhead
diff = cat.memory_usage(deep=True) - sys.getsizeof(cat)
self.assertTrue(abs(diff) < 100)
def test_searchsorted(self):
# https://github.com/pydata/pandas/issues/8420
s1 = pd.Series(['apple', 'bread', 'bread', 'cheese', 'milk'])
s2 = pd.Series(['apple', 'bread', 'bread', 'cheese', 'milk', 'donuts'])
c1 = pd.Categorical(s1, ordered=True)
c2 = pd.Categorical(s2, ordered=True)
# Single item array
res = c1.searchsorted(['bread'])
chk = s1.searchsorted(['bread'])
exp = np.array([1])
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# Scalar version of single item array
# Categorical return np.array like pd.Series, but different from
# np.array.searchsorted()
res = c1.searchsorted('bread')
chk = s1.searchsorted('bread')
exp = np.array([1])
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# Searching for a value that is not present in the Categorical
res = c1.searchsorted(['bread', 'eggs'])
chk = s1.searchsorted(['bread', 'eggs'])
exp = np.array([1, 4])
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# Searching for a value that is not present, to the right
res = c1.searchsorted(['bread', 'eggs'], side='right')
chk = s1.searchsorted(['bread', 'eggs'], side='right')
exp = np.array([3, 4]) # eggs before milk
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# As above, but with a sorter array to reorder an unsorted array
res = c2.searchsorted(['bread', 'eggs'], side='right',
sorter=[0, 1, 2, 3, 5, 4])
chk = s2.searchsorted(['bread', 'eggs'], side='right',
sorter=[0, 1, 2, 3, 5, 4])
exp = np.array([3, 5]
) # eggs after donuts, after switching milk and donuts
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
def test_deprecated_labels(self):
# TODO: labels is deprecated and should be removed in 0.18 or 2017,
# whatever is earlier
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
exp = cat.codes
with tm.assert_produces_warning(FutureWarning):
res = cat.labels
self.assert_numpy_array_equal(res, exp)
def test_deprecated_levels(self):
# TODO: levels is deprecated and should be removed in 0.18 or 2017,
# whatever is earlier
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
exp = cat.categories
with tm.assert_produces_warning(FutureWarning):
res = cat.levels
self.assert_numpy_array_equal(res, exp)
with tm.assert_produces_warning(FutureWarning):
res = pd.Categorical([1, 2, 3, np.nan], levels=[1, 2, 3])
self.assert_numpy_array_equal(res.categories, exp)
def test_removed_names_produces_warning(self):
# 10482
with tm.assert_produces_warning(UserWarning):
Categorical([0, 1], name="a")
with tm.assert_produces_warning(UserWarning):
Categorical.from_codes([1, 2], ["a", "b", "c"], name="a")
def test_datetime_categorical_comparison(self):
dt_cat = pd.Categorical(
pd.date_range('2014-01-01', periods=3), ordered=True)
self.assert_numpy_array_equal(dt_cat > dt_cat[0], [False, True, True])
self.assert_numpy_array_equal(dt_cat[0] < dt_cat, [False, True, True])
def test_reflected_comparison_with_scalars(self):
# GH8658
cat = pd.Categorical([1, 2, 3], ordered=True)
self.assert_numpy_array_equal(cat > cat[0], [False, True, True])
self.assert_numpy_array_equal(cat[0] < cat, [False, True, True])
def test_comparison_with_unknown_scalars(self):
# https://github.com/pydata/pandas/issues/9836#issuecomment-92123057
# and following comparisons with scalars not in categories should raise
# for unequal comps, but not for equal/not equal
cat = pd.Categorical([1, 2, 3], ordered=True)
self.assertRaises(TypeError, lambda: cat < 4)
self.assertRaises(TypeError, lambda: cat > 4)
self.assertRaises(TypeError, lambda: 4 < cat)
self.assertRaises(TypeError, lambda: 4 > cat)
self.assert_numpy_array_equal(cat == 4, [False, False, False])
self.assert_numpy_array_equal(cat != 4, [True, True, True])
def test_map(self):
c = pd.Categorical(list('ABABC'), categories=list('CBA'),
ordered=True)
result = c.map(lambda x: x.lower())
exp = pd.Categorical(list('ababc'), categories=list('cba'),
ordered=True)
tm.assert_categorical_equal(result, exp)
c = pd.Categorical(list('ABABC'), categories=list('ABC'),
ordered=False)
result = c.map(lambda x: x.lower())
exp = pd.Categorical(list('ababc'), categories=list('abc'),
ordered=False)
tm.assert_categorical_equal(result, exp)
result = c.map(lambda x: 1)
tm.assert_numpy_array_equal(result, np.array([1] * 5))
class TestCategoricalAsBlock(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.factor = Categorical.from_array(['a', 'b', 'b', 'a', 'a', 'c',
'c', 'c'])
df = DataFrame({'value': np.random.randint(0, 10000, 100)})
labels = ["{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500)]
df = df.sort_values(by=['value'], ascending=True)
df['value_group'] = pd.cut(df.value, range(0, 10500, 500), right=False,
labels=labels)
self.cat = df
def test_dtypes(self):
# GH8143
index = ['cat', 'obj', 'num']
cat = pd.Categorical(['a', 'b', 'c'])
obj = pd.Series(['a', 'b', 'c'])
num = pd.Series([1, 2, 3])
df = pd.concat([pd.Series(cat), obj, num], axis=1, keys=index)
result = df.dtypes == 'object'
expected = Series([False, True, False], index=index)
tm.assert_series_equal(result, expected)
result = df.dtypes == 'int64'
expected = Series([False, False, True], index=index)
tm.assert_series_equal(result, expected)
result = df.dtypes == 'category'
expected = Series([True, False, False], index=index)
tm.assert_series_equal(result, expected)
def test_codes_dtypes(self):
# GH 8453
result = Categorical(['foo', 'bar', 'baz'])
self.assertTrue(result.codes.dtype == 'int8')
result = Categorical(['foo%05d' % i for i in range(400)])
self.assertTrue(result.codes.dtype == 'int16')
result = Categorical(['foo%05d' % i for i in range(40000)])
self.assertTrue(result.codes.dtype == 'int32')
# adding cats
result = Categorical(['foo', 'bar', 'baz'])
self.assertTrue(result.codes.dtype == 'int8')
result = result.add_categories(['foo%05d' % i for i in range(400)])
self.assertTrue(result.codes.dtype == 'int16')
# removing cats
result = result.remove_categories(['foo%05d' % i for i in range(300)])
self.assertTrue(result.codes.dtype == 'int8')
def test_basic(self):
# test basic creation / coercion of categoricals
s = Series(self.factor, name='A')
self.assertEqual(s.dtype, 'category')
self.assertEqual(len(s), len(self.factor))
str(s.values)
str(s)
# in a frame
df = DataFrame({'A': self.factor})
result = df['A']
tm.assert_series_equal(result, s)
result = df.iloc[:, 0]
tm.assert_series_equal(result, s)
self.assertEqual(len(df), len(self.factor))
str(df.values)
str(df)
df = DataFrame({'A': s})
result = df['A']
tm.assert_series_equal(result, s)
self.assertEqual(len(df), len(self.factor))
str(df.values)
str(df)
# multiples
df = DataFrame({'A': s, 'B': s, 'C': 1})
result1 = df['A']
result2 = df['B']
tm.assert_series_equal(result1, s)
tm.assert_series_equal(result2, s, check_names=False)
self.assertEqual(result2.name, 'B')
self.assertEqual(len(df), len(self.factor))
str(df.values)
str(df)
# GH8623
x = pd.DataFrame([[1, 'John P. Doe'], [2, 'Jane Dove'],
[1, 'John P. Doe']],
columns=['person_id', 'person_name'])
x['person_name'] = pd.Categorical(x.person_name
) # doing this breaks transform
expected = x.iloc[0].person_name
result = x.person_name.iloc[0]
self.assertEqual(result, expected)
result = x.person_name[0]
self.assertEqual(result, expected)
result = x.person_name.loc[0]
self.assertEqual(result, expected)
def test_creation_astype(self):
l = ["a", "b", "c", "a"]
s = pd.Series(l)
exp = pd.Series(Categorical(l))
res = s.astype('category')
tm.assert_series_equal(res, exp)
l = [1, 2, 3, 1]
s = pd.Series(l)
exp = pd.Series(Categorical(l))
res = s.astype('category')
tm.assert_series_equal(res, exp)
df = pd.DataFrame({"cats": [1, 2, 3, 4, 5, 6],
"vals": [1, 2, 3, 4, 5, 6]})
cats = Categorical([1, 2, 3, 4, 5, 6])
exp_df = pd.DataFrame({"cats": cats, "vals": [1, 2, 3, 4, 5, 6]})
df["cats"] = df["cats"].astype("category")
tm.assert_frame_equal(exp_df, df)
df = pd.DataFrame({"cats": ['a', 'b', 'b', 'a', 'a', 'd'],
"vals": [1, 2, 3, 4, 5, 6]})
cats = Categorical(['a', 'b', 'b', 'a', 'a', 'd'])
exp_df = pd.DataFrame({"cats": cats, "vals": [1, 2, 3, 4, 5, 6]})
df["cats"] = df["cats"].astype("category")
tm.assert_frame_equal(exp_df, df)
# with keywords
l = ["a", "b", "c", "a"]
s = pd.Series(l)
exp = pd.Series(Categorical(l, ordered=True))
res = s.astype('category', ordered=True)
tm.assert_series_equal(res, exp)
exp = pd.Series(Categorical(
l, categories=list('abcdef'), ordered=True))
res = s.astype('category', categories=list('abcdef'), ordered=True)
tm.assert_series_equal(res, exp)
def test_construction_series(self):
l = [1, 2, 3, 1]
exp = Series(l).astype('category')
res = Series(l, dtype='category')
tm.assert_series_equal(res, exp)
l = ["a", "b", "c", "a"]
exp = Series(l).astype('category')
res = Series(l, dtype='category')
tm.assert_series_equal(res, exp)
# insert into frame with different index
# GH 8076
index = pd.date_range('20000101', periods=3)
expected = Series(Categorical(values=[np.nan, np.nan, np.nan],
categories=['a', 'b', 'c']))
expected.index = index
expected = DataFrame({'x': expected})
df = DataFrame(
{'x': Series(['a', 'b', 'c'], dtype='category')}, index=index)
tm.assert_frame_equal(df, expected)
def test_construction_frame(self):
# GH8626
# dict creation
df = DataFrame({'A': list('abc')}, dtype='category')
expected = Series(list('abc'), dtype='category', name='A')
tm.assert_series_equal(df['A'], expected)
# to_frame
s = Series(list('abc'), dtype='category')
result = s.to_frame()
expected = Series(list('abc'), dtype='category', name=0)
tm.assert_series_equal(result[0], expected)
result = s.to_frame(name='foo')
expected = Series(list('abc'), dtype='category', name='foo')
tm.assert_series_equal(result['foo'], expected)
# list-like creation
df = DataFrame(list('abc'), dtype='category')
expected = Series(list('abc'), dtype='category', name=0)
tm.assert_series_equal(df[0], expected)
# ndim != 1
df = DataFrame([pd.Categorical(list('abc'))])
expected = DataFrame({0: Series(list('abc'), dtype='category')})
tm.assert_frame_equal(df, expected)
df = DataFrame([pd.Categorical(list('abc')), pd.Categorical(list(
'abd'))])
expected = DataFrame({0: Series(list('abc'), dtype='category'),
1: Series(list('abd'), dtype='category')},
columns=[0, 1])
tm.assert_frame_equal(df, expected)
# mixed
df = DataFrame([pd.Categorical(list('abc')), list('def')])
expected = DataFrame({0: Series(list('abc'), dtype='category'),
1: list('def')}, columns=[0, 1])
tm.assert_frame_equal(df, expected)
# invalid (shape)
self.assertRaises(
ValueError,
lambda: DataFrame([pd.Categorical(list('abc')),
pd.Categorical(list('abdefg'))]))
# ndim > 1
self.assertRaises(NotImplementedError,
lambda: pd.Categorical(np.array([list('abcd')])))
def test_reshaping(self):
p = tm.makePanel()
p['str'] = 'foo'
df = p.to_frame()
df['category'] = df['str'].astype('category')
result = df['category'].unstack()
c = Categorical(['foo'] * len(p.major_axis))
expected = DataFrame({'A': c.copy(),
'B': c.copy(),
'C': c.copy(),
'D': c.copy()},
columns=Index(list('ABCD'), name='minor'),
index=p.major_axis.set_names('major'))
tm.assert_frame_equal(result, expected)
def test_reindex(self):
index = pd.date_range('20000101', periods=3)
# reindexing to an invalid Categorical
s = Series(['a', 'b', 'c'], dtype='category')
result = s.reindex(index)
expected = Series(Categorical(values=[np.nan, np.nan, np.nan],
categories=['a', 'b', 'c']))
expected.index = index
tm.assert_series_equal(result, expected)
# partial reindexing
expected = Series(Categorical(values=['b', 'c'], categories=['a', 'b',
'c']))
expected.index = [1, 2]
result = s.reindex([1, 2])
tm.assert_series_equal(result, expected)
expected = Series(Categorical(
values=['c', np.nan], categories=['a', 'b', 'c']))
expected.index = [2, 3]
result = s.reindex([2, 3])
tm.assert_series_equal(result, expected)
def test_sideeffects_free(self):
# Passing a categorical to a Series and then changing values in either
# the series or the categorical should not change the values in the
# other one, IF you specify copy!
cat = Categorical(["a", "b", "c", "a"])
s = pd.Series(cat, copy=True)
self.assertFalse(s.cat is cat)
s.cat.categories = [1, 2, 3]
exp_s = np.array([1, 2, 3, 1])
exp_cat = np.array(["a", "b", "c", "a"])
self.assert_numpy_array_equal(s.__array__(), exp_s)
self.assert_numpy_array_equal(cat.__array__(), exp_cat)
# setting
s[0] = 2
exp_s2 = np.array([2, 2, 3, 1])
self.assert_numpy_array_equal(s.__array__(), exp_s2)
self.assert_numpy_array_equal(cat.__array__(), exp_cat)
# however, copy is False by default
# so this WILL change values
cat = Categorical(["a", "b", "c", "a"])
s = pd.Series(cat)
self.assertTrue(s.values is cat)
s.cat.categories = [1, 2, 3]
exp_s = np.array([1, 2, 3, 1])
self.assert_numpy_array_equal(s.__array__(), exp_s)
self.assert_numpy_array_equal(cat.__array__(), exp_s)
s[0] = 2
exp_s2 = np.array([2, 2, 3, 1])
self.assert_numpy_array_equal(s.__array__(), exp_s2)
self.assert_numpy_array_equal(cat.__array__(), exp_s2)
def test_nan_handling(self):
# Nans are represented as -1 in labels
s = Series(Categorical(["a", "b", np.nan, "a"]))
self.assert_numpy_array_equal(s.cat.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(s.values.codes, np.array([0, 1, -1, 0]))
# If categories have nan included, the label should point to that
# instead
with tm.assert_produces_warning(FutureWarning):
s2 = Series(Categorical(
["a", "b", np.nan, "a"], categories=["a", "b", np.nan]))
self.assert_numpy_array_equal(s2.cat.categories, np.array(
["a", "b", np.nan], dtype=np.object_))
self.assert_numpy_array_equal(s2.values.codes, np.array([0, 1, 2, 0]))
# Changing categories should also make the replaced category np.nan
s3 = Series(Categorical(["a", "b", "c", "a"]))
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
s3.cat.categories = ["a", "b", np.nan]
self.assert_numpy_array_equal(s3.cat.categories, np.array(
["a", "b", np.nan], dtype=np.object_))
self.assert_numpy_array_equal(s3.values.codes, np.array([0, 1, 2, 0]))
def test_cat_accessor(self):
s = Series(Categorical(["a", "b", np.nan, "a"]))
self.assert_numpy_array_equal(s.cat.categories, np.array(["a", "b"]))
self.assertEqual(s.cat.ordered, False)
exp = Categorical(["a", "b", np.nan, "a"], categories=["b", "a"])
s.cat.set_categories(["b", "a"], inplace=True)
self.assertTrue(s.values.equals(exp))
res = s.cat.set_categories(["b", "a"])
self.assertTrue(res.values.equals(exp))
exp = Categorical(["a", "b", np.nan, "a"], categories=["b", "a"])
s[:] = "a"
s = s.cat.remove_unused_categories()
self.assert_numpy_array_equal(s.cat.categories, np.array(["a"]))
def test_sequence_like(self):
# GH 7839
# make sure can iterate
df = DataFrame({"id": [1, 2, 3, 4, 5, 6],
"raw_grade": ['a', 'b', 'b', 'a', 'a', 'e']})
df['grade'] = Categorical(df['raw_grade'])
# basic sequencing testing
result = list(df.grade.values)
expected = np.array(df.grade.values).tolist()
tm.assert_almost_equal(result, expected)
# iteration
for t in df.itertuples(index=False):
str(t)
for row, s in df.iterrows():
str(s)
for c, col in df.iteritems():
str(s)
def test_series_delegations(self):
# invalid accessor
self.assertRaises(AttributeError, lambda: Series([1, 2, 3]).cat)
tm.assertRaisesRegexp(
AttributeError,
r"Can only use .cat accessor with a 'category' dtype",
lambda: Series([1, 2, 3]).cat)
self.assertRaises(AttributeError, lambda: Series(['a', 'b', 'c']).cat)
self.assertRaises(AttributeError, lambda: Series(np.arange(5.)).cat)
self.assertRaises(AttributeError,
lambda: Series([Timestamp('20130101')]).cat)
# Series should delegate calls to '.categories', '.codes', '.ordered'
# and the methods '.set_categories()' 'drop_unused_categories()' to the
# categorical
s = Series(Categorical(["a", "b", "c", "a"], ordered=True))
exp_categories = np.array(["a", "b", "c"])
self.assert_numpy_array_equal(s.cat.categories, exp_categories)
s.cat.categories = [1, 2, 3]
exp_categories = np.array([1, 2, 3])
self.assert_numpy_array_equal(s.cat.categories, exp_categories)
exp_codes = Series([0, 1, 2, 0], dtype='int8')
tm.assert_series_equal(s.cat.codes, exp_codes)
self.assertEqual(s.cat.ordered, True)
s = s.cat.as_unordered()
self.assertEqual(s.cat.ordered, False)
s.cat.as_ordered(inplace=True)
self.assertEqual(s.cat.ordered, True)
# reorder
s = Series(Categorical(["a", "b", "c", "a"], ordered=True))
exp_categories = np.array(["c", "b", "a"])
exp_values = np.array(["a", "b", "c", "a"])
s = s.cat.set_categories(["c", "b", "a"])
self.assert_numpy_array_equal(s.cat.categories, exp_categories)
self.assert_numpy_array_equal(s.values.__array__(), exp_values)
self.assert_numpy_array_equal(s.__array__(), exp_values)
# remove unused categories
s = Series(Categorical(["a", "b", "b", "a"], categories=["a", "b", "c"
]))
exp_categories = np.array(["a", "b"])
exp_values = np.array(["a", "b", "b", "a"])
s = s.cat.remove_unused_categories()
self.assert_numpy_array_equal(s.cat.categories, exp_categories)
self.assert_numpy_array_equal(s.values.__array__(), exp_values)
self.assert_numpy_array_equal(s.__array__(), exp_values)
# This method is likely to be confused, so test that it raises an error
# on wrong inputs:
def f():
s.set_categories([4, 3, 2, 1])
self.assertRaises(Exception, f)
# right: s.cat.set_categories([4,3,2,1])
def test_series_functions_no_warnings(self):
df = pd.DataFrame({'value': np.random.randint(0, 100, 20)})
labels = ["{0} - {1}".format(i, i + 9) for i in range(0, 100, 10)]
with tm.assert_produces_warning(False):
df['group'] = pd.cut(df.value, range(0, 105, 10), right=False,
labels=labels)
def test_assignment_to_dataframe(self):
# assignment
df = DataFrame({'value': np.array(
np.random.randint(0, 10000, 100), dtype='int32')})
labels = ["{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500)]
df = df.sort_values(by=['value'], ascending=True)
s = pd.cut(df.value, range(0, 10500, 500), right=False, labels=labels)
d = s.values
df['D'] = d
str(df)
result = df.dtypes
expected = Series(
[np.dtype('int32'), com.CategoricalDtype()], index=['value', 'D'])
tm.assert_series_equal(result, expected)
df['E'] = s
str(df)
result = df.dtypes
expected = Series([np.dtype('int32'), com.CategoricalDtype(),
com.CategoricalDtype()],
index=['value', 'D', 'E'])
tm.assert_series_equal(result, expected)
result1 = df['D']
result2 = df['E']
self.assertTrue(result1._data._block.values.equals(d))
# sorting
s.name = 'E'
self.assertTrue(result2.sort_index().equals(s.sort_index()))
cat = pd.Categorical([1, 2, 3, 10], categories=[1, 2, 3, 4, 10])
df = pd.DataFrame(pd.Series(cat))
def test_describe(self):
# Categoricals should not show up together with numerical columns
result = self.cat.describe()
self.assertEqual(len(result.columns), 1)
# In a frame, describe() for the cat should be the same as for string
# arrays (count, unique, top, freq)
cat = Categorical(["a", "b", "b", "b"], categories=['a', 'b', 'c'],
ordered=True)
s = Series(cat)
result = s.describe()
expected = Series([4, 2, "b", 3],
index=['count', 'unique', 'top', 'freq'])
tm.assert_series_equal(result, expected)
cat = pd.Series(pd.Categorical(["a", "b", "c", "c"]))
df3 = pd.DataFrame({"cat": cat, "s": ["a", "b", "c", "c"]})
res = df3.describe()
self.assert_numpy_array_equal(res["cat"].values, res["s"].values)
def test_repr(self):
a = pd.Series(pd.Categorical([1, 2, 3, 4]))
exp = u("0 1\n1 2\n2 3\n3 4\n" +
"dtype: category\nCategories (4, int64): [1, 2, 3, 4]")
self.assertEqual(exp, a.__unicode__())
a = pd.Series(pd.Categorical(["a", "b"] * 25))
exp = u("0 a\n1 b\n" + " ..\n" + "48 a\n49 b\n" +
"dtype: category\nCategories (2, object): [a, b]")
with option_context("display.max_rows", 5):
self.assertEqual(exp, repr(a))
levs = list("abcdefghijklmnopqrstuvwxyz")
a = pd.Series(pd.Categorical(
["a", "b"], categories=levs, ordered=True))
exp = u("0 a\n1 b\n" + "dtype: category\n"
"Categories (26, object): [a < b < c < d ... w < x < y < z]")
self.assertEqual(exp, a.__unicode__())
def test_categorical_repr(self):
c = pd.Categorical([1, 2, 3])
exp = """[1, 2, 3]
Categories (3, int64): [1, 2, 3]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical([1, 2, 3, 1, 2, 3], categories=[1, 2, 3])
exp = """[1, 2, 3, 1, 2, 3]
Categories (3, int64): [1, 2, 3]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical([1, 2, 3, 4, 5] * 10)
exp = """[1, 2, 3, 4, 5, ..., 1, 2, 3, 4, 5]
Length: 50
Categories (5, int64): [1, 2, 3, 4, 5]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(np.arange(20))
exp = """[0, 1, 2, 3, 4, ..., 15, 16, 17, 18, 19]
Length: 20
Categories (20, int64): [0, 1, 2, 3, ..., 16, 17, 18, 19]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_ordered(self):
c = pd.Categorical([1, 2, 3], ordered=True)
exp = """[1, 2, 3]
Categories (3, int64): [1 < 2 < 3]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical([1, 2, 3, 1, 2, 3], categories=[1, 2, 3],
ordered=True)
exp = """[1, 2, 3, 1, 2, 3]
Categories (3, int64): [1 < 2 < 3]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical([1, 2, 3, 4, 5] * 10, ordered=True)
exp = """[1, 2, 3, 4, 5, ..., 1, 2, 3, 4, 5]
Length: 50
Categories (5, int64): [1 < 2 < 3 < 4 < 5]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(np.arange(20), ordered=True)
exp = """[0, 1, 2, 3, 4, ..., 15, 16, 17, 18, 19]
Length: 20
Categories (20, int64): [0 < 1 < 2 < 3 ... 16 < 17 < 18 < 19]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_datetime(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
c = pd.Categorical(idx)
# TODO(wesm): exceeding 80 characters in the console is not good
# behavior
exp = (
"[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, "
"2011-01-01 12:00:00, 2011-01-01 13:00:00]\n"
"Categories (5, datetime64[ns]): [2011-01-01 09:00:00, "
"2011-01-01 10:00:00, 2011-01-01 11:00:00,\n"
" 2011-01-01 12:00:00, "
"2011-01-01 13:00:00]""")
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = (
"[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, "
"2011-01-01 12:00:00, 2011-01-01 13:00:00, 2011-01-01 09:00:00, "
"2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, "
"2011-01-01 13:00:00]\n"
"Categories (5, datetime64[ns]): [2011-01-01 09:00:00, "
"2011-01-01 10:00:00, 2011-01-01 11:00:00,\n"
" 2011-01-01 12:00:00, "
"2011-01-01 13:00:00]")
self.assertEqual(repr(c), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
c = pd.Categorical(idx)
exp = (
"[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, "
"2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, "
"2011-01-01 13:00:00-05:00]\n"
"Categories (5, datetime64[ns, US/Eastern]): "
"[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00,\n"
" "
"2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00,\n"
" "
"2011-01-01 13:00:00-05:00]")
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = (
"[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, "
"2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, "
"2011-01-01 13:00:00-05:00, 2011-01-01 09:00:00-05:00, "
"2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, "
"2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00]\n"
"Categories (5, datetime64[ns, US/Eastern]): "
"[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00,\n"
" "
"2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00,\n"
" "
"2011-01-01 13:00:00-05:00]")
self.assertEqual(repr(c), exp)
def test_categorical_repr_datetime_ordered(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
c = pd.Categorical(idx, ordered=True)
exp = """[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00]
Categories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 <
2011-01-01 12:00:00 < 2011-01-01 13:00:00]""" # noqa
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00, 2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00]
Categories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 <
2011-01-01 12:00:00 < 2011-01-01 13:00:00]""" # noqa
self.assertEqual(repr(c), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
c = pd.Categorical(idx, ordered=True)
exp = """[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00]
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 <
2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 <
2011-01-01 13:00:00-05:00]""" # noqa
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00, 2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00]
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 <
2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 <
2011-01-01 13:00:00-05:00]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_period(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
c = pd.Categorical(idx)
exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
Categories (5, period): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00,
2011-01-01 13:00]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00, 2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
Categories (5, period): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00,
2011-01-01 13:00]"""
self.assertEqual(repr(c), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
c = pd.Categorical(idx)
exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
Categories (5, period): [2011-01, 2011-02, 2011-03, 2011-04, 2011-05]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05, 2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
Categories (5, period): [2011-01, 2011-02, 2011-03, 2011-04, 2011-05]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_period_ordered(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
c = pd.Categorical(idx, ordered=True)
exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
Categories (5, period): [2011-01-01 09:00 < 2011-01-01 10:00 < 2011-01-01 11:00 < 2011-01-01 12:00 <
2011-01-01 13:00]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00, 2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
Categories (5, period): [2011-01-01 09:00 < 2011-01-01 10:00 < 2011-01-01 11:00 < 2011-01-01 12:00 <
2011-01-01 13:00]"""
self.assertEqual(repr(c), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
c = pd.Categorical(idx, ordered=True)
exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
Categories (5, period): [2011-01 < 2011-02 < 2011-03 < 2011-04 < 2011-05]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05, 2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
Categories (5, period): [2011-01 < 2011-02 < 2011-03 < 2011-04 < 2011-05]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_timedelta(self):
idx = pd.timedelta_range('1 days', periods=5)
c = pd.Categorical(idx)
exp = """[1 days, 2 days, 3 days, 4 days, 5 days]
Categories (5, timedelta64[ns]): [1 days, 2 days, 3 days, 4 days, 5 days]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = """[1 days, 2 days, 3 days, 4 days, 5 days, 1 days, 2 days, 3 days, 4 days, 5 days]
Categories (5, timedelta64[ns]): [1 days, 2 days, 3 days, 4 days, 5 days]"""
self.assertEqual(repr(c), exp)
idx = pd.timedelta_range('1 hours', periods=20)
c = pd.Categorical(idx)
exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
Length: 20
Categories (20, timedelta64[ns]): [0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00,
3 days 01:00:00, ..., 16 days 01:00:00, 17 days 01:00:00,
18 days 01:00:00, 19 days 01:00:00]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
Length: 40
Categories (20, timedelta64[ns]): [0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00,
3 days 01:00:00, ..., 16 days 01:00:00, 17 days 01:00:00,
18 days 01:00:00, 19 days 01:00:00]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_timedelta_ordered(self):
idx = pd.timedelta_range('1 days', periods=5)
c = pd.Categorical(idx, ordered=True)
exp = """[1 days, 2 days, 3 days, 4 days, 5 days]
Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[1 days, 2 days, 3 days, 4 days, 5 days, 1 days, 2 days, 3 days, 4 days, 5 days]
Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]"""
self.assertEqual(repr(c), exp)
idx = pd.timedelta_range('1 hours', periods=20)
c = pd.Categorical(idx, ordered=True)
exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
Length: 20
Categories (20, timedelta64[ns]): [0 days 01:00:00 < 1 days 01:00:00 < 2 days 01:00:00 <
3 days 01:00:00 ... 16 days 01:00:00 < 17 days 01:00:00 <
18 days 01:00:00 < 19 days 01:00:00]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
Length: 40
Categories (20, timedelta64[ns]): [0 days 01:00:00 < 1 days 01:00:00 < 2 days 01:00:00 <
3 days 01:00:00 ... 16 days 01:00:00 < 17 days 01:00:00 <
18 days 01:00:00 < 19 days 01:00:00]"""
self.assertEqual(repr(c), exp)
def test_categorical_series_repr(self):
s = pd.Series(pd.Categorical([1, 2, 3]))
exp = """0 1
1 2
2 3
dtype: category
Categories (3, int64): [1, 2, 3]"""
self.assertEqual(repr(s), exp)
s = pd.Series(pd.Categorical(np.arange(10)))
exp = """0 0
1 1
2 2
3 3
4 4
5 5
6 6
7 7
8 8
9 9
dtype: category
Categories (10, int64): [0, 1, 2, 3, ..., 6, 7, 8, 9]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_ordered(self):
s = pd.Series(pd.Categorical([1, 2, 3], ordered=True))
exp = """0 1
1 2
2 3
dtype: category
Categories (3, int64): [1 < 2 < 3]"""
self.assertEqual(repr(s), exp)
s = pd.Series(pd.Categorical(np.arange(10), ordered=True))
exp = """0 0
1 1
2 2
3 3
4 4
5 5
6 6
7 7
8 8
9 9
dtype: category
Categories (10, int64): [0 < 1 < 2 < 3 ... 6 < 7 < 8 < 9]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_datetime(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
s = pd.Series(pd.Categorical(idx))
exp = """0 2011-01-01 09:00:00
1 2011-01-01 10:00:00
2 2011-01-01 11:00:00
3 2011-01-01 12:00:00
4 2011-01-01 13:00:00
dtype: category
Categories (5, datetime64[ns]): [2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00,
2011-01-01 12:00:00, 2011-01-01 13:00:00]"""
self.assertEqual(repr(s), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
s = pd.Series(pd.Categorical(idx))
exp = """0 2011-01-01 09:00:00-05:00
1 2011-01-01 10:00:00-05:00
2 2011-01-01 11:00:00-05:00
3 2011-01-01 12:00:00-05:00
4 2011-01-01 13:00:00-05:00
dtype: category
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00,
2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00,
2011-01-01 13:00:00-05:00]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_datetime_ordered(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 2011-01-01 09:00:00
1 2011-01-01 10:00:00
2 2011-01-01 11:00:00
3 2011-01-01 12:00:00
4 2011-01-01 13:00:00
dtype: category
Categories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 <
2011-01-01 12:00:00 < 2011-01-01 13:00:00]"""
self.assertEqual(repr(s), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 2011-01-01 09:00:00-05:00
1 2011-01-01 10:00:00-05:00
2 2011-01-01 11:00:00-05:00
3 2011-01-01 12:00:00-05:00
4 2011-01-01 13:00:00-05:00
dtype: category
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 <
2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 <
2011-01-01 13:00:00-05:00]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_period(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
s = pd.Series(pd.Categorical(idx))
exp = """0 2011-01-01 09:00
1 2011-01-01 10:00
2 2011-01-01 11:00
3 2011-01-01 12:00
4 2011-01-01 13:00
dtype: category
Categories (5, period): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00,
2011-01-01 13:00]"""
self.assertEqual(repr(s), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
s = pd.Series(pd.Categorical(idx))
exp = """0 2011-01
1 2011-02
2 2011-03
3 2011-04
4 2011-05
dtype: category
Categories (5, period): [2011-01, 2011-02, 2011-03, 2011-04, 2011-05]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_period_ordered(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 2011-01-01 09:00
1 2011-01-01 10:00
2 2011-01-01 11:00
3 2011-01-01 12:00
4 2011-01-01 13:00
dtype: category
Categories (5, period): [2011-01-01 09:00 < 2011-01-01 10:00 < 2011-01-01 11:00 < 2011-01-01 12:00 <
2011-01-01 13:00]"""
self.assertEqual(repr(s), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 2011-01
1 2011-02
2 2011-03
3 2011-04
4 2011-05
dtype: category
Categories (5, period): [2011-01 < 2011-02 < 2011-03 < 2011-04 < 2011-05]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_timedelta(self):
idx = pd.timedelta_range('1 days', periods=5)
s = pd.Series(pd.Categorical(idx))
exp = """0 1 days
1 2 days
2 3 days
3 4 days
4 5 days
dtype: category
Categories (5, timedelta64[ns]): [1 days, 2 days, 3 days, 4 days, 5 days]"""
self.assertEqual(repr(s), exp)
idx = pd.timedelta_range('1 hours', periods=10)
s = pd.Series(pd.Categorical(idx))
exp = """0 0 days 01:00:00
1 1 days 01:00:00
2 2 days 01:00:00
3 3 days 01:00:00
4 4 days 01:00:00
5 5 days 01:00:00
6 6 days 01:00:00
7 7 days 01:00:00
8 8 days 01:00:00
9 9 days 01:00:00
dtype: category
Categories (10, timedelta64[ns]): [0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00,
3 days 01:00:00, ..., 6 days 01:00:00, 7 days 01:00:00,
8 days 01:00:00, 9 days 01:00:00]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_timedelta_ordered(self):
idx = pd.timedelta_range('1 days', periods=5)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 1 days
1 2 days
2 3 days
3 4 days
4 5 days
dtype: category
Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]"""
self.assertEqual(repr(s), exp)
idx = pd.timedelta_range('1 hours', periods=10)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 0 days 01:00:00
1 1 days 01:00:00
2 2 days 01:00:00
3 3 days 01:00:00
4 4 days 01:00:00
5 5 days 01:00:00
6 6 days 01:00:00
7 7 days 01:00:00
8 8 days 01:00:00
9 9 days 01:00:00
dtype: category
Categories (10, timedelta64[ns]): [0 days 01:00:00 < 1 days 01:00:00 < 2 days 01:00:00 <
3 days 01:00:00 ... 6 days 01:00:00 < 7 days 01:00:00 <
8 days 01:00:00 < 9 days 01:00:00]"""
self.assertEqual(repr(s), exp)
def test_categorical_index_repr(self):
idx = pd.CategoricalIndex(pd.Categorical([1, 2, 3]))
exp = """CategoricalIndex([1, 2, 3], categories=[1, 2, 3], ordered=False, dtype='category')"""
self.assertEqual(repr(idx), exp)
i = pd.CategoricalIndex(pd.Categorical(np.arange(10)))
exp = """CategoricalIndex([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], categories=[0, 1, 2, 3, 4, 5, 6, 7, ...], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_ordered(self):
i = pd.CategoricalIndex(pd.Categorical([1, 2, 3], ordered=True))
exp = """CategoricalIndex([1, 2, 3], categories=[1, 2, 3], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
i = pd.CategoricalIndex(pd.Categorical(np.arange(10), ordered=True))
exp = """CategoricalIndex([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], categories=[0, 1, 2, 3, 4, 5, 6, 7, ...], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_datetime(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00:00', '2011-01-01 10:00:00',
'2011-01-01 11:00:00', '2011-01-01 12:00:00',
'2011-01-01 13:00:00'],
categories=[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00',
'2011-01-01 11:00:00-05:00', '2011-01-01 12:00:00-05:00',
'2011-01-01 13:00:00-05:00'],
categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_datetime_ordered(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['2011-01-01 09:00:00', '2011-01-01 10:00:00',
'2011-01-01 11:00:00', '2011-01-01 12:00:00',
'2011-01-01 13:00:00'],
categories=[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00',
'2011-01-01 11:00:00-05:00', '2011-01-01 12:00:00-05:00',
'2011-01-01 13:00:00-05:00'],
categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
i = pd.CategoricalIndex(pd.Categorical(idx.append(idx), ordered=True))
exp = """CategoricalIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00',
'2011-01-01 11:00:00-05:00', '2011-01-01 12:00:00-05:00',
'2011-01-01 13:00:00-05:00', '2011-01-01 09:00:00-05:00',
'2011-01-01 10:00:00-05:00', '2011-01-01 11:00:00-05:00',
'2011-01-01 12:00:00-05:00', '2011-01-01 13:00:00-05:00'],
categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_period(self):
# test all length
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=1)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00'], categories=[2011-01-01 09:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=2)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00'], categories=[2011-01-01 09:00, 2011-01-01 10:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=3)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'], categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00',
'2011-01-01 12:00', '2011-01-01 13:00'],
categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
i = pd.CategoricalIndex(pd.Categorical(idx.append(idx)))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00',
'2011-01-01 12:00', '2011-01-01 13:00', '2011-01-01 09:00',
'2011-01-01 10:00', '2011-01-01 11:00', '2011-01-01 12:00',
'2011-01-01 13:00'],
categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01', '2011-02', '2011-03', '2011-04', '2011-05'], categories=[2011-01, 2011-02, 2011-03, 2011-04, 2011-05], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_period_ordered(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00',
'2011-01-01 12:00', '2011-01-01 13:00'],
categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['2011-01', '2011-02', '2011-03', '2011-04', '2011-05'], categories=[2011-01, 2011-02, 2011-03, 2011-04, 2011-05], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_timedelta(self):
idx = pd.timedelta_range('1 days', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['1 days', '2 days', '3 days', '4 days', '5 days'], categories=[1 days 00:00:00, 2 days 00:00:00, 3 days 00:00:00, 4 days 00:00:00, 5 days 00:00:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.timedelta_range('1 hours', periods=10)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['0 days 01:00:00', '1 days 01:00:00', '2 days 01:00:00',
'3 days 01:00:00', '4 days 01:00:00', '5 days 01:00:00',
'6 days 01:00:00', '7 days 01:00:00', '8 days 01:00:00',
'9 days 01:00:00'],
categories=[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, 5 days 01:00:00, 6 days 01:00:00, 7 days 01:00:00, ...], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_timedelta_ordered(self):
idx = pd.timedelta_range('1 days', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['1 days', '2 days', '3 days', '4 days', '5 days'], categories=[1 days 00:00:00, 2 days 00:00:00, 3 days 00:00:00, 4 days 00:00:00, 5 days 00:00:00], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.timedelta_range('1 hours', periods=10)
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['0 days 01:00:00', '1 days 01:00:00', '2 days 01:00:00',
'3 days 01:00:00', '4 days 01:00:00', '5 days 01:00:00',
'6 days 01:00:00', '7 days 01:00:00', '8 days 01:00:00',
'9 days 01:00:00'],
categories=[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, 5 days 01:00:00, 6 days 01:00:00, 7 days 01:00:00, ...], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_frame(self):
# normal DataFrame
dt = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
p = pd.period_range('2011-01', freq='M', periods=5)
df = pd.DataFrame({'dt': dt, 'p': p})
exp = """ dt p
0 2011-01-01 09:00:00-05:00 2011-01
1 2011-01-01 10:00:00-05:00 2011-02
2 2011-01-01 11:00:00-05:00 2011-03
3 2011-01-01 12:00:00-05:00 2011-04
4 2011-01-01 13:00:00-05:00 2011-05"""
df = pd.DataFrame({'dt': pd.Categorical(dt), 'p': pd.Categorical(p)})
self.assertEqual(repr(df), exp)
def test_info(self):
# make sure it works
n = 2500
df = DataFrame({'int64': np.random.randint(100, size=n)})
df['category'] = Series(np.array(list('abcdefghij')).take(
np.random.randint(0, 10, size=n))).astype('category')
df.isnull()
df.info()
df2 = df[df['category'] == 'd']
df2.info()
def test_groupby_sort(self):
# http://stackoverflow.com/questions/23814368/sorting-pandas-categorical-labels-after-groupby
# This should result in a properly sorted Series so that the plot
# has a sorted x axis
# self.cat.groupby(['value_group'])['value_group'].count().plot(kind='bar')
res = self.cat.groupby(['value_group'])['value_group'].count()
exp = res[sorted(res.index, key=lambda x: float(x.split()[0]))]
exp.index = pd.CategoricalIndex(exp.index, name=exp.index.name)
tm.assert_series_equal(res, exp)
def test_min_max(self):
# unordered cats have no min/max
cat = Series(Categorical(["a", "b", "c", "d"], ordered=False))
self.assertRaises(TypeError, lambda: cat.min())
self.assertRaises(TypeError, lambda: cat.max())
cat = Series(Categorical(["a", "b", "c", "d"], ordered=True))
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "a")
self.assertEqual(_max, "d")
cat = Series(Categorical(["a", "b", "c", "d"], categories=[
'd', 'c', 'b', 'a'], ordered=True))
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "d")
self.assertEqual(_max, "a")
cat = Series(Categorical(
[np.nan, "b", "c", np.nan], categories=['d', 'c', 'b', 'a'
], ordered=True))
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, "b")
cat = Series(Categorical(
[np.nan, 1, 2, np.nan], categories=[5, 4, 3, 2, 1], ordered=True))
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, 1)
def test_mode(self):
s = Series(Categorical([1, 1, 2, 4, 5, 5, 5],
categories=[5, 4, 3, 2, 1], ordered=True))
res = s.mode()
exp = Series(Categorical([5], categories=[
5, 4, 3, 2, 1], ordered=True))
tm.assert_series_equal(res, exp)
s = Series(Categorical([1, 1, 1, 4, 5, 5, 5],
categories=[5, 4, 3, 2, 1], ordered=True))
res = s.mode()
exp = Series(Categorical([5, 1], categories=[
5, 4, 3, 2, 1], ordered=True))
tm.assert_series_equal(res, exp)
s = Series(Categorical([1, 2, 3, 4, 5], categories=[5, 4, 3, 2, 1],
ordered=True))
res = s.mode()
exp = Series(Categorical([], categories=[5, 4, 3, 2, 1], ordered=True))
tm.assert_series_equal(res, exp)
def test_value_counts(self):
# GH 12835
cats = pd.Categorical(["a", "b", "c", "c", "c", "b"],
categories=["c", "a", "b", "d"])
s = pd.Series(cats, name='xxx')
res = s.value_counts(sort=False)
exp = Series([3, 1, 2, 0], name='xxx',
index=pd.CategoricalIndex(["c", "a", "b", "d"]))
tm.assert_series_equal(res, exp)
res = s.value_counts(sort=True)
exp = Series([3, 2, 1, 0], name='xxx',
index=pd.CategoricalIndex(["c", "b", "a", "d"]))
tm.assert_series_equal(res, exp)
# check object dtype handles the Series.name as the same
# (tested in test_base.py)
s = pd.Series(["a", "b", "c", "c", "c", "b"], name='xxx')
res = s.value_counts()
exp = Series([3, 2, 1], name='xxx', index=["c", "b", "a"])
tm.assert_series_equal(res, exp)
def test_value_counts_with_nan(self):
# https://github.com/pydata/pandas/issues/9443
s = pd.Series(["a", "b", "a"], dtype="category")
tm.assert_series_equal(
s.value_counts(dropna=True),
pd.Series([2, 1], index=pd.CategoricalIndex(["a", "b"])))
tm.assert_series_equal(
s.value_counts(dropna=False),
pd.Series([2, 1], index=pd.CategoricalIndex(["a", "b"])))
s = pd.Series(["a", "b", None, "a", None, None], dtype="category")
tm.assert_series_equal(
s.value_counts(dropna=True),
pd.Series([2, 1], index=pd.CategoricalIndex(["a", "b"])))
tm.assert_series_equal(
s.value_counts(dropna=False),
pd.Series([3, 2, 1], index=pd.CategoricalIndex([np.nan, "a", "b"])))
# When we aren't sorting by counts, and np.nan isn't a
# category, it should be last.
tm.assert_series_equal(
s.value_counts(dropna=False, sort=False),
pd.Series([2, 1, 3],
index=pd.CategoricalIndex(["a", "b", np.nan])))
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
s = pd.Series(pd.Categorical(
["a", "b", "a"], categories=["a", "b", np.nan]))
tm.assert_series_equal(
s.value_counts(dropna=True),
pd.Series([2, 1], index=pd.CategoricalIndex(["a", "b"])))
tm.assert_series_equal(
s.value_counts(dropna=False),
pd.Series([2, 1, 0],
index=pd.CategoricalIndex(["a", "b", np.nan])))
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
s = pd.Series(pd.Categorical(
["a", "b", None, "a", None, None], categories=["a", "b", np.nan
]))
tm.assert_series_equal(
s.value_counts(dropna=True),
pd.Series([2, 1], index=pd.CategoricalIndex(["a", "b"])))
tm.assert_series_equal(
s.value_counts(dropna=False),
pd.Series([3, 2, 1],
index=pd.CategoricalIndex([np.nan, "a", "b"])))
def test_groupby(self):
cats = Categorical(
["a", "a", "a", "b", "b", "b", "c", "c", "c"
], categories=["a", "b", "c", "d"], ordered=True)
data = DataFrame({"a": [1, 1, 1, 2, 2, 2, 3, 4, 5], "b": cats})
expected = DataFrame({'a': Series(
[1, 2, 4, np.nan], index=pd.CategoricalIndex(
['a', 'b', 'c', 'd'], name='b'))})
result = data.groupby("b").mean()
tm.assert_frame_equal(result, expected)
raw_cat1 = Categorical(["a", "a", "b", "b"],
categories=["a", "b", "z"], ordered=True)
raw_cat2 = Categorical(["c", "d", "c", "d"],
categories=["c", "d", "y"], ordered=True)
df = DataFrame({"A": raw_cat1, "B": raw_cat2, "values": [1, 2, 3, 4]})
# single grouper
gb = df.groupby("A")
exp_idx = pd.CategoricalIndex(['a', 'b', 'z'], name='A')
expected = DataFrame({'values': Series([3, 7, np.nan], index=exp_idx)})
result = gb.sum()
tm.assert_frame_equal(result, expected)
# multiple groupers
gb = df.groupby(['A', 'B'])
expected = DataFrame({'values': Series(
[1, 2, np.nan, 3, 4, np.nan, np.nan, np.nan, np.nan
], index=pd.MultiIndex.from_product(
[['a', 'b', 'z'], ['c', 'd', 'y']], names=['A', 'B']))})
result = gb.sum()
tm.assert_frame_equal(result, expected)
# multiple groupers with a non-cat
df = df.copy()
df['C'] = ['foo', 'bar'] * 2
gb = df.groupby(['A', 'B', 'C'])
expected = DataFrame({'values': Series(
np.nan, index=pd.MultiIndex.from_product(
[['a', 'b', 'z'], ['c', 'd', 'y'], ['foo', 'bar']
], names=['A', 'B', 'C']))}).sortlevel()
expected.iloc[[1, 2, 7, 8], 0] = [1, 2, 3, 4]
result = gb.sum()
tm.assert_frame_equal(result, expected)
# GH 8623
x = pd.DataFrame([[1, 'John P. Doe'], [2, 'Jane Dove'],
[1, 'John P. Doe']],
columns=['person_id', 'person_name'])
x['person_name'] = pd.Categorical(x.person_name)
g = x.groupby(['person_id'])
result = g.transform(lambda x: x)
tm.assert_frame_equal(result, x[['person_name']])
result = x.drop_duplicates('person_name')
expected = x.iloc[[0, 1]]
tm.assert_frame_equal(result, expected)
def f(x):
return x.drop_duplicates('person_name').iloc[0]
result = g.apply(f)
expected = x.iloc[[0, 1]].copy()
expected.index = Index([1, 2], name='person_id')
expected['person_name'] = expected['person_name'].astype('object')
tm.assert_frame_equal(result, expected)
# GH 9921
# Monotonic
df = DataFrame({"a": [5, 15, 25]})
c = pd.cut(df.a, bins=[0, 10, 20, 30, 40])
result = df.a.groupby(c).transform(sum)
tm.assert_series_equal(result, df['a'], check_names=False)
self.assertTrue(result.name is None)
tm.assert_series_equal(
df.a.groupby(c).transform(lambda xs: np.sum(xs)), df['a'])
tm.assert_frame_equal(df.groupby(c).transform(sum), df[['a']])
tm.assert_frame_equal(
df.groupby(c).transform(lambda xs: np.max(xs)), df[['a']])
# Filter
tm.assert_series_equal(df.a.groupby(c).filter(np.all), df['a'])
tm.assert_frame_equal(df.groupby(c).filter(np.all), df)
# Non-monotonic
df = DataFrame({"a": [5, 15, 25, -5]})
c = pd.cut(df.a, bins=[-10, 0, 10, 20, 30, 40])
result = df.a.groupby(c).transform(sum)
tm.assert_series_equal(result, df['a'], check_names=False)
self.assertTrue(result.name is None)
tm.assert_series_equal(
df.a.groupby(c).transform(lambda xs: np.sum(xs)), df['a'])
tm.assert_frame_equal(df.groupby(c).transform(sum), df[['a']])
tm.assert_frame_equal(
df.groupby(c).transform(lambda xs: np.sum(xs)), df[['a']])
# GH 9603
df = pd.DataFrame({'a': [1, 0, 0, 0]})
c = pd.cut(df.a, [0, 1, 2, 3, 4])
result = df.groupby(c).apply(len)
expected = pd.Series([1, 0, 0, 0],
index=pd.CategoricalIndex(c.values.categories))
expected.index.name = 'a'
tm.assert_series_equal(result, expected)
def test_pivot_table(self):
raw_cat1 = Categorical(["a", "a", "b", "b"],
categories=["a", "b", "z"], ordered=True)
raw_cat2 = Categorical(["c", "d", "c", "d"],
categories=["c", "d", "y"], ordered=True)
df = DataFrame({"A": raw_cat1, "B": raw_cat2, "values": [1, 2, 3, 4]})
result = pd.pivot_table(df, values='values', index=['A', 'B'])
expected = Series([1, 2, np.nan, 3, 4, np.nan, np.nan, np.nan, np.nan],
index=pd.MultiIndex.from_product(
[['a', 'b', 'z'], ['c', 'd', 'y']],
names=['A', 'B']),
name='values')
tm.assert_series_equal(result, expected)
def test_count(self):
s = Series(Categorical([np.nan, 1, 2, np.nan],
categories=[5, 4, 3, 2, 1], ordered=True))
result = s.count()
self.assertEqual(result, 2)
def test_sort_values(self):
c = Categorical(["a", "b", "b", "a"], ordered=False)
cat = Series(c.copy())
# 'order' was deprecated in gh-10726
# 'sort' was deprecated in gh-12882
for func in ('order', 'sort'):
with tm.assert_produces_warning(FutureWarning):
getattr(c, func)()
# sort in the categories order
expected = Series(
Categorical(["a", "a", "b", "b"],
ordered=False), index=[0, 3, 1, 2])
result = cat.sort_values()
tm.assert_series_equal(result, expected)
cat = Series(Categorical(["a", "c", "b", "d"], ordered=True))
res = cat.sort_values()
exp = np.array(["a", "b", "c", "d"])
self.assert_numpy_array_equal(res.__array__(), exp)
cat = Series(Categorical(["a", "c", "b", "d"], categories=[
"a", "b", "c", "d"], ordered=True))
res = cat.sort_values()
exp = np.array(["a", "b", "c", "d"])
self.assert_numpy_array_equal(res.__array__(), exp)
res = cat.sort_values(ascending=False)
exp = np.array(["d", "c", "b", "a"])
self.assert_numpy_array_equal(res.__array__(), exp)
raw_cat1 = Categorical(["a", "b", "c", "d"],
categories=["a", "b", "c", "d"], ordered=False)
raw_cat2 = Categorical(["a", "b", "c", "d"],
categories=["d", "c", "b", "a"], ordered=True)
s = ["a", "b", "c", "d"]
df = DataFrame({"unsort": raw_cat1,
"sort": raw_cat2,
"string": s,
"values": [1, 2, 3, 4]})
# Cats must be sorted in a dataframe
res = df.sort_values(by=["string"], ascending=False)
exp = np.array(["d", "c", "b", "a"])
self.assert_numpy_array_equal(res["sort"].values.__array__(), exp)
self.assertEqual(res["sort"].dtype, "category")
res = df.sort_values(by=["sort"], ascending=False)
exp = df.sort_values(by=["string"], ascending=True)
self.assert_numpy_array_equal(res["values"], exp["values"])
self.assertEqual(res["sort"].dtype, "category")
self.assertEqual(res["unsort"].dtype, "category")
# unordered cat, but we allow this
df.sort_values(by=["unsort"], ascending=False)
# multi-columns sort
# GH 7848
df = DataFrame({"id": [6, 5, 4, 3, 2, 1],
"raw_grade": ['a', 'b', 'b', 'a', 'a', 'e']})
df["grade"] = pd.Categorical(df["raw_grade"], ordered=True)
df['grade'] = df['grade'].cat.set_categories(['b', 'e', 'a'])
# sorts 'grade' according to the order of the categories
result = df.sort_values(by=['grade'])
expected = df.iloc[[1, 2, 5, 0, 3, 4]]
tm.assert_frame_equal(result, expected)
# multi
result = df.sort_values(by=['grade', 'id'])
expected = df.iloc[[2, 1, 5, 4, 3, 0]]
tm.assert_frame_equal(result, expected)
def test_slicing(self):
cat = Series(Categorical([1, 2, 3, 4]))
reversed = cat[::-1]
exp = np.array([4, 3, 2, 1])
self.assert_numpy_array_equal(reversed.__array__(), exp)
df = DataFrame({'value': (np.arange(100) + 1).astype('int64')})
df['D'] = pd.cut(df.value, bins=[0, 25, 50, 75, 100])
expected = Series([11, '(0, 25]'], index=['value', 'D'], name=10)
result = df.iloc[10]
tm.assert_series_equal(result, expected)
expected = DataFrame({'value': np.arange(11, 21).astype('int64')},
index=np.arange(10, 20).astype('int64'))
expected['D'] = pd.cut(expected.value, bins=[0, 25, 50, 75, 100])
result = df.iloc[10:20]
tm.assert_frame_equal(result, expected)
expected = Series([9, '(0, 25]'], index=['value', 'D'], name=8)
result = df.loc[8]
tm.assert_series_equal(result, expected)
def test_slicing_and_getting_ops(self):
# systematically test the slicing operations:
# for all slicing ops:
# - returning a dataframe
# - returning a column
# - returning a row
# - returning a single value
cats = pd.Categorical(
["a", "c", "b", "c", "c", "c", "c"], categories=["a", "b", "c"])
idx = pd.Index(["h", "i", "j", "k", "l", "m", "n"])
values = [1, 2, 3, 4, 5, 6, 7]
df = pd.DataFrame({"cats": cats, "values": values}, index=idx)
# the expected values
cats2 = pd.Categorical(["b", "c"], categories=["a", "b", "c"])
idx2 = pd.Index(["j", "k"])
values2 = [3, 4]
# 2:4,: | "j":"k",:
exp_df = pd.DataFrame({"cats": cats2, "values": values2}, index=idx2)
# :,"cats" | :,0
exp_col = pd.Series(cats, index=idx, name='cats')
# "j",: | 2,:
exp_row = pd.Series(["b", 3], index=["cats", "values"], dtype="object",
name="j")
# "j","cats | 2,0
exp_val = "b"
# iloc
# frame
res_df = df.iloc[2:4, :]
tm.assert_frame_equal(res_df, exp_df)
self.assertTrue(com.is_categorical_dtype(res_df["cats"]))
# row
res_row = df.iloc[2, :]
tm.assert_series_equal(res_row, exp_row)
tm.assertIsInstance(res_row["cats"], compat.string_types)
# col
res_col = df.iloc[:, 0]
tm.assert_series_equal(res_col, exp_col)
self.assertTrue(com.is_categorical_dtype(res_col))
# single value
res_val = df.iloc[2, 0]
self.assertEqual(res_val, exp_val)
# loc
# frame
res_df = df.loc["j":"k", :]
tm.assert_frame_equal(res_df, exp_df)
self.assertTrue(com.is_categorical_dtype(res_df["cats"]))
# row
res_row = df.loc["j", :]
tm.assert_series_equal(res_row, exp_row)
tm.assertIsInstance(res_row["cats"], compat.string_types)
# col
res_col = df.loc[:, "cats"]
tm.assert_series_equal(res_col, exp_col)
self.assertTrue(com.is_categorical_dtype(res_col))
# single value
res_val = df.loc["j", "cats"]
self.assertEqual(res_val, exp_val)
# ix
# frame
# res_df = df.ix["j":"k",[0,1]] # doesn't work?
res_df = df.ix["j":"k", :]
tm.assert_frame_equal(res_df, exp_df)
self.assertTrue(com.is_categorical_dtype(res_df["cats"]))
# row
res_row = df.ix["j", :]
tm.assert_series_equal(res_row, exp_row)
tm.assertIsInstance(res_row["cats"], compat.string_types)
# col
res_col = df.ix[:, "cats"]
tm.assert_series_equal(res_col, exp_col)
self.assertTrue(com.is_categorical_dtype(res_col))
# single value
res_val = df.ix["j", 0]
self.assertEqual(res_val, exp_val)
# iat
res_val = df.iat[2, 0]
self.assertEqual(res_val, exp_val)
# at
res_val = df.at["j", "cats"]
self.assertEqual(res_val, exp_val)
# fancy indexing
exp_fancy = df.iloc[[2]]
res_fancy = df[df["cats"] == "b"]
tm.assert_frame_equal(res_fancy, exp_fancy)
res_fancy = df[df["values"] == 3]
tm.assert_frame_equal(res_fancy, exp_fancy)
# get_value
res_val = df.get_value("j", "cats")
self.assertEqual(res_val, exp_val)
# i : int, slice, or sequence of integers
res_row = df.iloc[2]
tm.assert_series_equal(res_row, exp_row)
tm.assertIsInstance(res_row["cats"], compat.string_types)
res_df = df.iloc[slice(2, 4)]
tm.assert_frame_equal(res_df, exp_df)
self.assertTrue(com.is_categorical_dtype(res_df["cats"]))
res_df = df.iloc[[2, 3]]
tm.assert_frame_equal(res_df, exp_df)
self.assertTrue(com.is_categorical_dtype(res_df["cats"]))
res_col = df.iloc[:, 0]
tm.assert_series_equal(res_col, exp_col)
self.assertTrue(com.is_categorical_dtype(res_col))
res_df = df.iloc[:, slice(0, 2)]
tm.assert_frame_equal(res_df, df)
self.assertTrue(com.is_categorical_dtype(res_df["cats"]))
res_df = df.iloc[:, [0, 1]]
tm.assert_frame_equal(res_df, df)
self.assertTrue(com.is_categorical_dtype(res_df["cats"]))
def test_slicing_doc_examples(self):
# GH 7918
cats = Categorical(
["a", "b", "b", "b", "c", "c", "c"], categories=["a", "b", "c"])
idx = Index(["h", "i", "j", "k", "l", "m", "n", ])
values = [1, 2, 2, 2, 3, 4, 5]
df = DataFrame({"cats": cats, "values": values}, index=idx)
result = df.iloc[2:4, :]
expected = DataFrame(
{"cats": Categorical(
['b', 'b'], categories=['a', 'b', 'c']),
"values": [2, 2]}, index=['j', 'k'])
tm.assert_frame_equal(result, expected)
result = df.iloc[2:4, :].dtypes
expected = Series(['category', 'int64'], ['cats', 'values'])
tm.assert_series_equal(result, expected)
result = df.loc["h":"j", "cats"]
expected = Series(Categorical(['a', 'b', 'b'],
categories=['a', 'b', 'c']),
index=['h', 'i', 'j'], name='cats')
tm.assert_series_equal(result, expected)
result = df.ix["h":"j", 0:1]
expected = DataFrame({'cats': Series(
Categorical(
['a', 'b', 'b'], categories=['a', 'b', 'c']), index=['h', 'i',
'j'])})
tm.assert_frame_equal(result, expected)
def test_assigning_ops(self):
# systematically test the assigning operations:
# for all slicing ops:
# for value in categories and value not in categories:
# - assign a single value -> exp_single_cats_value
# - assign a complete row (mixed values) -> exp_single_row
# assign multiple rows (mixed values) (-> array) -> exp_multi_row
# assign a part of a column with dtype == categorical ->
# exp_parts_cats_col
# assign a part of a column with dtype != categorical ->
# exp_parts_cats_col
cats = pd.Categorical(
["a", "a", "a", "a", "a", "a", "a"], categories=["a", "b"])
idx = pd.Index(["h", "i", "j", "k", "l", "m", "n"])
values = [1, 1, 1, 1, 1, 1, 1]
orig = pd.DataFrame({"cats": cats, "values": values}, index=idx)
# the expected values
# changed single row
cats1 = pd.Categorical(
["a", "a", "b", "a", "a", "a", "a"], categories=["a", "b"])
idx1 = pd.Index(["h", "i", "j", "k", "l", "m", "n"])
values1 = [1, 1, 2, 1, 1, 1, 1]
exp_single_row = pd.DataFrame(
{"cats": cats1,
"values": values1}, index=idx1)
# changed multiple rows
cats2 = pd.Categorical(
["a", "a", "b", "b", "a", "a", "a"], categories=["a", "b"])
idx2 = pd.Index(["h", "i", "j", "k", "l", "m", "n"])
values2 = [1, 1, 2, 2, 1, 1, 1]
exp_multi_row = pd.DataFrame(
{"cats": cats2,
"values": values2}, index=idx2)
# changed part of the cats column
cats3 = pd.Categorical(
["a", "a", "b", "b", "a", "a", "a"], categories=["a", "b"])
idx3 = pd.Index(["h", "i", "j", "k", "l", "m", "n"])
values3 = [1, 1, 1, 1, 1, 1, 1]
exp_parts_cats_col = pd.DataFrame(
{"cats": cats3,
"values": values3}, index=idx3)
# changed single value in cats col
cats4 = pd.Categorical(
["a", "a", "b", "a", "a", "a", "a"], categories=["a", "b"])
idx4 = pd.Index(["h", "i", "j", "k", "l", "m", "n"])
values4 = [1, 1, 1, 1, 1, 1, 1]
exp_single_cats_value = pd.DataFrame(
{"cats": cats4,
"values": values4}, index=idx4)
# iloc
# ###############
# - assign a single value -> exp_single_cats_value
df = orig.copy()
df.iloc[2, 0] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
df = orig.copy()
df.iloc[df.index == "j", 0] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
# - assign a single value not in the current categories set
def f():
df = orig.copy()
df.iloc[2, 0] = "c"
self.assertRaises(ValueError, f)
# - assign a complete row (mixed values) -> exp_single_row
df = orig.copy()
df.iloc[2, :] = ["b", 2]
tm.assert_frame_equal(df, exp_single_row)
# - assign a complete row (mixed values) not in categories set
def f():
df = orig.copy()
df.iloc[2, :] = ["c", 2]
self.assertRaises(ValueError, f)
# - assign multiple rows (mixed values) -> exp_multi_row
df = orig.copy()
df.iloc[2:4, :] = [["b", 2], ["b", 2]]
tm.assert_frame_equal(df, exp_multi_row)
def f():
df = orig.copy()
df.iloc[2:4, :] = [["c", 2], ["c", 2]]
self.assertRaises(ValueError, f)
# assign a part of a column with dtype == categorical ->
# exp_parts_cats_col
df = orig.copy()
df.iloc[2:4, 0] = pd.Categorical(["b", "b"], categories=["a", "b"])
tm.assert_frame_equal(df, exp_parts_cats_col)
with tm.assertRaises(ValueError):
# different categories -> not sure if this should fail or pass
df = orig.copy()
df.iloc[2:4, 0] = pd.Categorical(
["b", "b"], categories=["a", "b", "c"])
with tm.assertRaises(ValueError):
# different values
df = orig.copy()
df.iloc[2:4, 0] = pd.Categorical(
["c", "c"], categories=["a", "b", "c"])
# assign a part of a column with dtype != categorical ->
# exp_parts_cats_col
df = orig.copy()
df.iloc[2:4, 0] = ["b", "b"]
tm.assert_frame_equal(df, exp_parts_cats_col)
with tm.assertRaises(ValueError):
df.iloc[2:4, 0] = ["c", "c"]
# loc
# ##############
# - assign a single value -> exp_single_cats_value
df = orig.copy()
df.loc["j", "cats"] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
df = orig.copy()
df.loc[df.index == "j", "cats"] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
# - assign a single value not in the current categories set
def f():
df = orig.copy()
df.loc["j", "cats"] = "c"
self.assertRaises(ValueError, f)
# - assign a complete row (mixed values) -> exp_single_row
df = orig.copy()
df.loc["j", :] = ["b", 2]
tm.assert_frame_equal(df, exp_single_row)
# - assign a complete row (mixed values) not in categories set
def f():
df = orig.copy()
df.loc["j", :] = ["c", 2]
self.assertRaises(ValueError, f)
# - assign multiple rows (mixed values) -> exp_multi_row
df = orig.copy()
df.loc["j":"k", :] = [["b", 2], ["b", 2]]
tm.assert_frame_equal(df, exp_multi_row)
def f():
df = orig.copy()
df.loc["j":"k", :] = [["c", 2], ["c", 2]]
self.assertRaises(ValueError, f)
# assign a part of a column with dtype == categorical ->
# exp_parts_cats_col
df = orig.copy()
df.loc["j":"k", "cats"] = pd.Categorical(
["b", "b"], categories=["a", "b"])
tm.assert_frame_equal(df, exp_parts_cats_col)
with tm.assertRaises(ValueError):
# different categories -> not sure if this should fail or pass
df = orig.copy()
df.loc["j":"k", "cats"] = pd.Categorical(
["b", "b"], categories=["a", "b", "c"])
with tm.assertRaises(ValueError):
# different values
df = orig.copy()
df.loc["j":"k", "cats"] = pd.Categorical(
["c", "c"], categories=["a", "b", "c"])
# assign a part of a column with dtype != categorical ->
# exp_parts_cats_col
df = orig.copy()
df.loc["j":"k", "cats"] = ["b", "b"]
tm.assert_frame_equal(df, exp_parts_cats_col)
with tm.assertRaises(ValueError):
df.loc["j":"k", "cats"] = ["c", "c"]
# ix
# ##############
# - assign a single value -> exp_single_cats_value
df = orig.copy()
df.ix["j", 0] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
df = orig.copy()
df.ix[df.index == "j", 0] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
# - assign a single value not in the current categories set
def f():
df = orig.copy()
df.ix["j", 0] = "c"
self.assertRaises(ValueError, f)
# - assign a complete row (mixed values) -> exp_single_row
df = orig.copy()
df.ix["j", :] = ["b", 2]
tm.assert_frame_equal(df, exp_single_row)
# - assign a complete row (mixed values) not in categories set
def f():
df = orig.copy()
df.ix["j", :] = ["c", 2]
self.assertRaises(ValueError, f)
# - assign multiple rows (mixed values) -> exp_multi_row
df = orig.copy()
df.ix["j":"k", :] = [["b", 2], ["b", 2]]
tm.assert_frame_equal(df, exp_multi_row)
def f():
df = orig.copy()
df.ix["j":"k", :] = [["c", 2], ["c", 2]]
self.assertRaises(ValueError, f)
# assign a part of a column with dtype == categorical ->
# exp_parts_cats_col
df = orig.copy()
df.ix["j":"k", 0] = pd.Categorical(["b", "b"], categories=["a", "b"])
tm.assert_frame_equal(df, exp_parts_cats_col)
with tm.assertRaises(ValueError):
# different categories -> not sure if this should fail or pass
df = orig.copy()
df.ix["j":"k", 0] = pd.Categorical(
["b", "b"], categories=["a", "b", "c"])
with tm.assertRaises(ValueError):
# different values
df = orig.copy()
df.ix["j":"k", 0] = pd.Categorical(
["c", "c"], categories=["a", "b", "c"])
# assign a part of a column with dtype != categorical ->
# exp_parts_cats_col
df = orig.copy()
df.ix["j":"k", 0] = ["b", "b"]
tm.assert_frame_equal(df, exp_parts_cats_col)
with tm.assertRaises(ValueError):
df.ix["j":"k", 0] = ["c", "c"]
# iat
df = orig.copy()
df.iat[2, 0] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
# - assign a single value not in the current categories set
def f():
df = orig.copy()
df.iat[2, 0] = "c"
self.assertRaises(ValueError, f)
# at
# - assign a single value -> exp_single_cats_value
df = orig.copy()
df.at["j", "cats"] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
# - assign a single value not in the current categories set
def f():
df = orig.copy()
df.at["j", "cats"] = "c"
self.assertRaises(ValueError, f)
# fancy indexing
catsf = pd.Categorical(
["a", "a", "c", "c", "a", "a", "a"], categories=["a", "b", "c"])
idxf = pd.Index(["h", "i", "j", "k", "l", "m", "n"])
valuesf = [1, 1, 3, 3, 1, 1, 1]
df = pd.DataFrame({"cats": catsf, "values": valuesf}, index=idxf)
exp_fancy = exp_multi_row.copy()
exp_fancy["cats"].cat.set_categories(["a", "b", "c"], inplace=True)
df[df["cats"] == "c"] = ["b", 2]
tm.assert_frame_equal(df, exp_multi_row)
# set_value
df = orig.copy()
df.set_value("j", "cats", "b")
tm.assert_frame_equal(df, exp_single_cats_value)
def f():
df = orig.copy()
df.set_value("j", "cats", "c")
self.assertRaises(ValueError, f)
# Assigning a Category to parts of a int/... column uses the values of
# the Catgorical
df = pd.DataFrame({"a": [1, 1, 1, 1, 1],
"b": ["a", "a", "a", "a", "a"]})
exp = pd.DataFrame({"a": [1, "b", "b", 1, 1],
"b": ["a", "a", "b", "b", "a"]})
df.loc[1:2, "a"] = pd.Categorical(["b", "b"], categories=["a", "b"])
df.loc[2:3, "b"] = pd.Categorical(["b", "b"], categories=["a", "b"])
tm.assert_frame_equal(df, exp)
# Series
orig = Series(pd.Categorical(["b", "b"], categories=["a", "b"]))
s = orig.copy()
s[:] = "a"
exp = Series(pd.Categorical(["a", "a"], categories=["a", "b"]))
tm.assert_series_equal(s, exp)
s = orig.copy()
s[1] = "a"
exp = Series(pd.Categorical(["b", "a"], categories=["a", "b"]))
tm.assert_series_equal(s, exp)
s = orig.copy()
s[s.index > 0] = "a"
exp = Series(pd.Categorical(["b", "a"], categories=["a", "b"]))
tm.assert_series_equal(s, exp)
s = orig.copy()
s[[False, True]] = "a"
exp = Series(pd.Categorical(["b", "a"], categories=["a", "b"]))
tm.assert_series_equal(s, exp)
s = orig.copy()
s.index = ["x", "y"]
s["y"] = "a"
exp = Series(
pd.Categorical(["b", "a"],
categories=["a", "b"]), index=["x", "y"])
tm.assert_series_equal(s, exp)
# ensure that one can set something to np.nan
s = Series(Categorical([1, 2, 3]))
exp = Series(Categorical([1, np.nan, 3]))
s[1] = np.nan
tm.assert_series_equal(s, exp)
def test_comparisons(self):
tests_data = [(list("abc"), list("cba"), list("bbb")),
([1, 2, 3], [3, 2, 1], [2, 2, 2])]
for data, reverse, base in tests_data:
cat_rev = pd.Series(pd.Categorical(data, categories=reverse,
ordered=True))
cat_rev_base = pd.Series(pd.Categorical(base, categories=reverse,
ordered=True))
cat = pd.Series(pd.Categorical(data, ordered=True))
cat_base = pd.Series(pd.Categorical(
base, categories=cat.cat.categories, ordered=True))
s = Series(base)
a = np.array(base)
# comparisons need to take categories ordering into account
res_rev = cat_rev > cat_rev_base
exp_rev = Series([True, False, False])
tm.assert_series_equal(res_rev, exp_rev)
res_rev = cat_rev < cat_rev_base
exp_rev = Series([False, False, True])
tm.assert_series_equal(res_rev, exp_rev)
res = cat > cat_base
exp = Series([False, False, True])
tm.assert_series_equal(res, exp)
scalar = base[1]
res = cat > scalar
exp = Series([False, False, True])
exp2 = cat.values > scalar
tm.assert_series_equal(res, exp)
tm.assert_numpy_array_equal(res.values, exp2)
res_rev = cat_rev > scalar
exp_rev = Series([True, False, False])
exp_rev2 = cat_rev.values > scalar
tm.assert_series_equal(res_rev, exp_rev)
tm.assert_numpy_array_equal(res_rev.values, exp_rev2)
# Only categories with same categories can be compared
def f():
cat > cat_rev
self.assertRaises(TypeError, f)
# categorical cannot be compared to Series or numpy array, and also
# not the other way around
self.assertRaises(TypeError, lambda: cat > s)
self.assertRaises(TypeError, lambda: cat_rev > s)
self.assertRaises(TypeError, lambda: cat > a)
self.assertRaises(TypeError, lambda: cat_rev > a)
self.assertRaises(TypeError, lambda: s < cat)
self.assertRaises(TypeError, lambda: s < cat_rev)
self.assertRaises(TypeError, lambda: a < cat)
self.assertRaises(TypeError, lambda: a < cat_rev)
# unequal comparison should raise for unordered cats
cat = Series(Categorical(list("abc")))
def f():
cat > "b"
self.assertRaises(TypeError, f)
cat = Series(Categorical(list("abc"), ordered=False))
def f():
cat > "b"
self.assertRaises(TypeError, f)
# https://github.com/pydata/pandas/issues/9836#issuecomment-92123057
# and following comparisons with scalars not in categories should raise
# for unequal comps, but not for equal/not equal
cat = Series(Categorical(list("abc"), ordered=True))
self.assertRaises(TypeError, lambda: cat < "d")
self.assertRaises(TypeError, lambda: cat > "d")
self.assertRaises(TypeError, lambda: "d" < cat)
self.assertRaises(TypeError, lambda: "d" > cat)
self.assert_series_equal(cat == "d", Series([False, False, False]))
self.assert_series_equal(cat != "d", Series([True, True, True]))
# And test NaN handling...
cat = Series(Categorical(["a", "b", "c", np.nan]))
exp = Series([True, True, True, False])
res = (cat == cat)
tm.assert_series_equal(res, exp)
def test_cat_equality(self):
# GH 8938
# allow equality comparisons
a = Series(list('abc'), dtype="category")
b = Series(list('abc'), dtype="object")
c = Series(['a', 'b', 'cc'], dtype="object")
d = Series(list('acb'), dtype="object")
e = Categorical(list('abc'))
f = Categorical(list('acb'))
# vs scalar
self.assertFalse((a == 'a').all())
self.assertTrue(((a != 'a') == ~(a == 'a')).all())
self.assertFalse(('a' == a).all())
self.assertTrue((a == 'a')[0])
self.assertTrue(('a' == a)[0])
self.assertFalse(('a' != a)[0])
# vs list-like
self.assertTrue((a == a).all())
self.assertFalse((a != a).all())
self.assertTrue((a == list(a)).all())
self.assertTrue((a == b).all())
self.assertTrue((b == a).all())
self.assertTrue(((~(a == b)) == (a != b)).all())
self.assertTrue(((~(b == a)) == (b != a)).all())
self.assertFalse((a == c).all())
self.assertFalse((c == a).all())
self.assertFalse((a == d).all())
self.assertFalse((d == a).all())
# vs a cat-like
self.assertTrue((a == e).all())
self.assertTrue((e == a).all())
self.assertFalse((a == f).all())
self.assertFalse((f == a).all())
self.assertTrue(((~(a == e) == (a != e)).all()))
self.assertTrue(((~(e == a) == (e != a)).all()))
self.assertTrue(((~(a == f) == (a != f)).all()))
self.assertTrue(((~(f == a) == (f != a)).all()))
# non-equality is not comparable
self.assertRaises(TypeError, lambda: a < b)
self.assertRaises(TypeError, lambda: b < a)
self.assertRaises(TypeError, lambda: a > b)
self.assertRaises(TypeError, lambda: b > a)
def test_concat(self):
cat = pd.Categorical(["a", "b"], categories=["a", "b"])
vals = [1, 2]
df = pd.DataFrame({"cats": cat, "vals": vals})
cat2 = pd.Categorical(["a", "b", "a", "b"], categories=["a", "b"])
vals2 = [1, 2, 1, 2]
exp = pd.DataFrame({"cats": cat2,
"vals": vals2}, index=pd.Index([0, 1, 0, 1]))
res = pd.concat([df, df])
tm.assert_frame_equal(exp, res)
# Concat should raise if the two categoricals do not have the same
# categories
cat3 = pd.Categorical(["a", "b"], categories=["a", "b", "c"])
vals3 = [1, 2]
df_wrong_categories = pd.DataFrame({"cats": cat3, "vals": vals3})
def f():
pd.concat([df, df_wrong_categories])
self.assertRaises(ValueError, f)
# GH 7864
# make sure ordering is preserverd
df = pd.DataFrame({"id": [1, 2, 3, 4, 5, 6],
"raw_grade": ['a', 'b', 'b', 'a', 'a', 'e']})
df["grade"] = pd.Categorical(df["raw_grade"])
df['grade'].cat.set_categories(['e', 'a', 'b'])
df1 = df[0:3]
df2 = df[3:]
self.assert_numpy_array_equal(df['grade'].cat.categories,
df1['grade'].cat.categories)
self.assert_numpy_array_equal(df['grade'].cat.categories,
df2['grade'].cat.categories)
dfx = pd.concat([df1, df2])
dfx['grade'].cat.categories
self.assert_numpy_array_equal(df['grade'].cat.categories,
dfx['grade'].cat.categories)
def test_concat_preserve(self):
# GH 8641
# series concat not preserving category dtype
s = Series(list('abc'), dtype='category')
s2 = Series(list('abd'), dtype='category')
def f():
pd.concat([s, s2])
self.assertRaises(ValueError, f)
result = pd.concat([s, s], ignore_index=True)
expected = Series(list('abcabc')).astype('category')
tm.assert_series_equal(result, expected)
result = pd.concat([s, s])
expected = Series(
list('abcabc'), index=[0, 1, 2, 0, 1, 2]).astype('category')
tm.assert_series_equal(result, expected)
a = Series(np.arange(6, dtype='int64'))
b = Series(list('aabbca'))
df2 = DataFrame({'A': a,
'B': b.astype('category', categories=list('cab'))})
result = pd.concat([df2, df2])
expected = DataFrame({'A': pd.concat([a, a]),
'B': pd.concat([b, b]).astype(
'category', categories=list('cab'))})
tm.assert_frame_equal(result, expected)
def test_categorical_index_preserver(self):
a = Series(np.arange(6, dtype='int64'))
b = Series(list('aabbca'))
df2 = DataFrame({'A': a,
'B': b.astype('category', categories=list(
'cab'))}).set_index('B')
result = pd.concat([df2, df2])
expected = DataFrame({'A': pd.concat([a, a]),
'B': pd.concat([b, b]).astype(
'category', categories=list(
'cab'))}).set_index('B')
tm.assert_frame_equal(result, expected)
# wrong catgories
df3 = DataFrame({'A': a,
'B': b.astype('category', categories=list(
'abc'))}).set_index('B')
self.assertRaises(TypeError, lambda: pd.concat([df2, df3]))
def test_append(self):
cat = pd.Categorical(["a", "b"], categories=["a", "b"])
vals = [1, 2]
df = pd.DataFrame({"cats": cat, "vals": vals})
cat2 = pd.Categorical(["a", "b", "a", "b"], categories=["a", "b"])
vals2 = [1, 2, 1, 2]
exp = pd.DataFrame({"cats": cat2,
"vals": vals2}, index=pd.Index([0, 1, 0, 1]))
res = df.append(df)
tm.assert_frame_equal(exp, res)
# Concat should raise if the two categoricals do not have the same
# categories
cat3 = pd.Categorical(["a", "b"], categories=["a", "b", "c"])
vals3 = [1, 2]
df_wrong_categories = pd.DataFrame({"cats": cat3, "vals": vals3})
def f():
df.append(df_wrong_categories)
self.assertRaises(ValueError, f)
def test_merge(self):
# GH 9426
right = DataFrame({'c': {0: 'a',
1: 'b',
2: 'c',
3: 'd',
4: 'e'},
'd': {0: 'null',
1: 'null',
2: 'null',
3: 'null',
4: 'null'}})
left = DataFrame({'a': {0: 'f',
1: 'f',
2: 'f',
3: 'f',
4: 'f'},
'b': {0: 'g',
1: 'g',
2: 'g',
3: 'g',
4: 'g'}})
df = pd.merge(left, right, how='left', left_on='b', right_on='c')
# object-object
expected = df.copy()
# object-cat
cright = right.copy()
cright['d'] = cright['d'].astype('category')
result = pd.merge(left, cright, how='left', left_on='b', right_on='c')
tm.assert_frame_equal(result, expected)
# cat-object
cleft = left.copy()
cleft['b'] = cleft['b'].astype('category')
result = pd.merge(cleft, cright, how='left', left_on='b', right_on='c')
tm.assert_frame_equal(result, expected)
# cat-cat
cright = right.copy()
cright['d'] = cright['d'].astype('category')
cleft = left.copy()
cleft['b'] = cleft['b'].astype('category')
result = pd.merge(cleft, cright, how='left', left_on='b', right_on='c')
tm.assert_frame_equal(result, expected)
def test_repeat(self):
# GH10183
cat = pd.Categorical(["a", "b"], categories=["a", "b"])
exp = pd.Categorical(["a", "a", "b", "b"], categories=["a", "b"])
res = cat.repeat(2)
self.assert_categorical_equal(res, exp)
def test_numpy_repeat(self):
cat = pd.Categorical(["a", "b"], categories=["a", "b"])
exp = pd.Categorical(["a", "a", "b", "b"], categories=["a", "b"])
self.assert_categorical_equal(np.repeat(cat, 2), exp)
msg = "the 'axis' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, np.repeat, cat, 2, axis=1)
def test_numpy_reshape(self):
cat = pd.Categorical(["a", "b"], categories=["a", "b"])
self.assert_categorical_equal(np.reshape(cat, cat.shape), cat)
msg = "the 'order' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, np.reshape,
cat, cat.shape, order='F')
def test_na_actions(self):
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
vals = ["a", "b", np.nan, "d"]
df = pd.DataFrame({"cats": cat, "vals": vals})
cat2 = pd.Categorical([1, 2, 3, 3], categories=[1, 2, 3])
vals2 = ["a", "b", "b", "d"]
df_exp_fill = pd.DataFrame({"cats": cat2, "vals": vals2})
cat3 = pd.Categorical([1, 2, 3], categories=[1, 2, 3])
vals3 = ["a", "b", np.nan]
df_exp_drop_cats = pd.DataFrame({"cats": cat3, "vals": vals3})
cat4 = pd.Categorical([1, 2], categories=[1, 2, 3])
vals4 = ["a", "b"]
df_exp_drop_all = pd.DataFrame({"cats": cat4, "vals": vals4})
# fillna
res = df.fillna(value={"cats": 3, "vals": "b"})
tm.assert_frame_equal(res, df_exp_fill)
def f():
df.fillna(value={"cats": 4, "vals": "c"})
self.assertRaises(ValueError, f)
res = df.fillna(method='pad')
tm.assert_frame_equal(res, df_exp_fill)
res = df.dropna(subset=["cats"])
tm.assert_frame_equal(res, df_exp_drop_cats)
res = df.dropna()
tm.assert_frame_equal(res, df_exp_drop_all)
# make sure that fillna takes both missing values and NA categories
# into account
c = Categorical(["a", "b", np.nan])
with tm.assert_produces_warning(FutureWarning):
c.set_categories(["a", "b", np.nan], rename=True, inplace=True)
c[0] = np.nan
df = pd.DataFrame({"cats": c, "vals": [1, 2, 3]})
df_exp = pd.DataFrame({"cats": Categorical(["a", "b", "a"]),
"vals": [1, 2, 3]})
res = df.fillna("a")
tm.assert_frame_equal(res, df_exp)
def test_astype_to_other(self):
s = self.cat['value_group']
expected = s
tm.assert_series_equal(s.astype('category'), expected)
tm.assert_series_equal(s.astype(com.CategoricalDtype()), expected)
self.assertRaises(ValueError, lambda: s.astype('float64'))
cat = Series(Categorical(['a', 'b', 'b', 'a', 'a', 'c', 'c', 'c']))
exp = Series(['a', 'b', 'b', 'a', 'a', 'c', 'c', 'c'])
tm.assert_series_equal(cat.astype('str'), exp)
s2 = Series(Categorical.from_array(['1', '2', '3', '4']))
exp2 = Series([1, 2, 3, 4]).astype(int)
tm.assert_series_equal(s2.astype('int'), exp2)
# object don't sort correctly, so just compare that we have the same
# values
def cmp(a, b):
tm.assert_almost_equal(
np.sort(np.unique(a)), np.sort(np.unique(b)))
expected = Series(np.array(s.values), name='value_group')
cmp(s.astype('object'), expected)
cmp(s.astype(np.object_), expected)
# array conversion
tm.assert_almost_equal(np.array(s), np.array(s.values))
# valid conversion
for valid in [lambda x: x.astype('category'),
lambda x: x.astype(com.CategoricalDtype()),
lambda x: x.astype('object').astype('category'),
lambda x: x.astype('object').astype(
com.CategoricalDtype())
]:
result = valid(s)
tm.assert_series_equal(result, s)
# invalid conversion (these are NOT a dtype)
for invalid in [lambda x: x.astype(pd.Categorical),
lambda x: x.astype('object').astype(pd.Categorical)]:
self.assertRaises(TypeError, lambda: invalid(s))
def test_astype_categorical(self):
cat = Categorical(['a', 'b', 'b', 'a', 'a', 'c', 'c', 'c'])
tm.assert_categorical_equal(cat, cat.astype('category'))
tm.assert_almost_equal(np.array(cat), cat.astype('object'))
self.assertRaises(ValueError, lambda: cat.astype(float))
def test_to_records(self):
# GH8626
# dict creation
df = DataFrame({'A': list('abc')}, dtype='category')
expected = Series(list('abc'), dtype='category', name='A')
tm.assert_series_equal(df['A'], expected)
# list-like creation
df = DataFrame(list('abc'), dtype='category')
expected = Series(list('abc'), dtype='category', name=0)
tm.assert_series_equal(df[0], expected)
# to record array
# this coerces
result = df.to_records()
expected = np.rec.array([(0, 'a'), (1, 'b'), (2, 'c')],
dtype=[('index', '=i8'), ('0', 'O')])
tm.assert_almost_equal(result, expected)
def test_numeric_like_ops(self):
# numeric ops should not succeed
for op in ['__add__', '__sub__', '__mul__', '__truediv__']:
self.assertRaises(TypeError,
lambda: getattr(self.cat, op)(self.cat))
# reduction ops should not succeed (unless specifically defined, e.g.
# min/max)
s = self.cat['value_group']
for op in ['kurt', 'skew', 'var', 'std', 'mean', 'sum', 'median']:
self.assertRaises(TypeError,
lambda: getattr(s, op)(numeric_only=False))
# mad technically works because it takes always the numeric data
# numpy ops
s = pd.Series(pd.Categorical([1, 2, 3, 4]))
self.assertRaises(TypeError, lambda: np.sum(s))
# numeric ops on a Series
for op in ['__add__', '__sub__', '__mul__', '__truediv__']:
self.assertRaises(TypeError, lambda: getattr(s, op)(2))
# invalid ufunc
self.assertRaises(TypeError, lambda: np.log(s))
def test_cat_tab_completition(self):
# test the tab completion display
ok_for_cat = ['categories', 'codes', 'ordered', 'set_categories',
'add_categories', 'remove_categories',
'rename_categories', 'reorder_categories',
'remove_unused_categories', 'as_ordered', 'as_unordered']
def get_dir(s):
results = [r for r in s.cat.__dir__() if not r.startswith('_')]
return list(sorted(set(results)))
s = Series(list('aabbcde')).astype('category')
results = get_dir(s)
tm.assert_almost_equal(results, list(sorted(set(ok_for_cat))))
def test_cat_accessor_api(self):
# GH 9322
from pandas.core.categorical import CategoricalAccessor
self.assertIs(Series.cat, CategoricalAccessor)
s = Series(list('aabbcde')).astype('category')
self.assertIsInstance(s.cat, CategoricalAccessor)
invalid = Series([1])
with tm.assertRaisesRegexp(AttributeError, "only use .cat accessor"):
invalid.cat
self.assertFalse(hasattr(invalid, 'cat'))
def test_cat_accessor_no_new_attributes(self):
# https://github.com/pydata/pandas/issues/10673
c = Series(list('aabbcde')).astype('category')
with tm.assertRaisesRegexp(AttributeError,
"You cannot add any new attribute"):
c.cat.xlabel = "a"
def test_str_accessor_api_for_categorical(self):
# https://github.com/pydata/pandas/issues/10661
from pandas.core.strings import StringMethods
s = Series(list('aabb'))
s = s + " " + s
c = s.astype('category')
self.assertIsInstance(c.str, StringMethods)
# str functions, which need special arguments
special_func_defs = [
('cat', (list("zyxw"),), {"sep": ","}),
('center', (10,), {}),
('contains', ("a",), {}),
('count', ("a",), {}),
('decode', ("UTF-8",), {}),
('encode', ("UTF-8",), {}),
('endswith', ("a",), {}),
('extract', ("([a-z]*) ",), {"expand":False}),
('extract', ("([a-z]*) ",), {"expand":True}),
('extractall', ("([a-z]*) ",), {}),
('find', ("a",), {}),
('findall', ("a",), {}),
('index', (" ",), {}),
('ljust', (10,), {}),
('match', ("a"), {}), # deprecated...
('normalize', ("NFC",), {}),
('pad', (10,), {}),
('partition', (" ",), {"expand": False}), # not default
('partition', (" ",), {"expand": True}), # default
('repeat', (3,), {}),
('replace', ("a", "z"), {}),
('rfind', ("a",), {}),
('rindex', (" ",), {}),
('rjust', (10,), {}),
('rpartition', (" ",), {"expand": False}), # not default
('rpartition', (" ",), {"expand": True}), # default
('slice', (0, 1), {}),
('slice_replace', (0, 1, "z"), {}),
('split', (" ",), {"expand": False}), # default
('split', (" ",), {"expand": True}), # not default
('startswith', ("a",), {}),
('wrap', (2,), {}),
('zfill', (10,), {})
]
_special_func_names = [f[0] for f in special_func_defs]
# * get, join: they need a individual elements of type lists, but
# we can't make a categorical with lists as individual categories.
# -> `s.str.split(" ").astype("category")` will error!
# * `translate` has different interfaces for py2 vs. py3
_ignore_names = ["get", "join", "translate"]
str_func_names = [f
for f in dir(s.str)
if not (f.startswith("_") or f in _special_func_names
or f in _ignore_names)]
func_defs = [(f, (), {}) for f in str_func_names]
func_defs.extend(special_func_defs)
for func, args, kwargs in func_defs:
res = getattr(c.str, func)(*args, **kwargs)
exp = getattr(s.str, func)(*args, **kwargs)
if isinstance(res, pd.DataFrame):
tm.assert_frame_equal(res, exp)
else:
tm.assert_series_equal(res, exp)
invalid = Series([1, 2, 3]).astype('category')
with tm.assertRaisesRegexp(AttributeError,
"Can only use .str accessor with string"):
invalid.str
self.assertFalse(hasattr(invalid, 'str'))
def test_dt_accessor_api_for_categorical(self):
# https://github.com/pydata/pandas/issues/10661
from pandas.tseries.common import Properties
from pandas.tseries.index import date_range, DatetimeIndex
from pandas.tseries.period import period_range, PeriodIndex
from pandas.tseries.tdi import timedelta_range, TimedeltaIndex
s_dr = Series(date_range('1/1/2015', periods=5, tz="MET"))
c_dr = s_dr.astype("category")
s_pr = Series(period_range('1/1/2015', freq='D', periods=5))
c_pr = s_pr.astype("category")
s_tdr = Series(timedelta_range('1 days', '10 days'))
c_tdr = s_tdr.astype("category")
test_data = [
("Datetime", DatetimeIndex._datetimelike_ops, s_dr, c_dr),
("Period", PeriodIndex._datetimelike_ops, s_pr, c_pr),
("Timedelta", TimedeltaIndex._datetimelike_ops, s_tdr, c_tdr)]
self.assertIsInstance(c_dr.dt, Properties)
special_func_defs = [
('strftime', ("%Y-%m-%d",), {}),
('tz_convert', ("EST",), {}),
('round', ("D",), {}),
('floor', ("D",), {}),
('ceil', ("D",), {}),
# ('tz_localize', ("UTC",), {}),
]
_special_func_names = [f[0] for f in special_func_defs]
# the series is already localized
_ignore_names = ['tz_localize']
for name, attr_names, s, c in test_data:
func_names = [f
for f in dir(s.dt)
if not (f.startswith("_") or f in attr_names or f in
_special_func_names or f in _ignore_names)]
func_defs = [(f, (), {}) for f in func_names]
for f_def in special_func_defs:
if f_def[0] in dir(s.dt):
func_defs.append(f_def)
for func, args, kwargs in func_defs:
res = getattr(c.dt, func)(*args, **kwargs)
exp = getattr(s.dt, func)(*args, **kwargs)
if isinstance(res, pd.DataFrame):
tm.assert_frame_equal(res, exp)
elif isinstance(res, pd.Series):
tm.assert_series_equal(res, exp)
else:
tm.assert_numpy_array_equal(res, exp)
for attr in attr_names:
try:
res = getattr(c.dt, attr)
exp = getattr(s.dt, attr)
except Exception as e:
print(name, attr)
raise e
if isinstance(res, pd.DataFrame):
tm.assert_frame_equal(res, exp)
elif isinstance(res, pd.Series):
tm.assert_series_equal(res, exp)
else:
tm.assert_numpy_array_equal(res, exp)
invalid = Series([1, 2, 3]).astype('category')
with tm.assertRaisesRegexp(
AttributeError, "Can only use .dt accessor with datetimelike"):
invalid.dt
self.assertFalse(hasattr(invalid, 'str'))
def test_pickle_v0_14_1(self):
# we have the name warning
# 10482
with tm.assert_produces_warning(UserWarning):
cat = pd.Categorical(values=['a', 'b', 'c'],
categories=['a', 'b', 'c', 'd'],
name='foobar', ordered=False)
pickle_path = os.path.join(tm.get_data_path(),
'categorical_0_14_1.pickle')
# This code was executed once on v0.14.1 to generate the pickle:
#
# cat = Categorical(labels=np.arange(3), levels=['a', 'b', 'c', 'd'],
# name='foobar')
# with open(pickle_path, 'wb') as f: pickle.dump(cat, f)
#
self.assert_categorical_equal(cat, pd.read_pickle(pickle_path))
def test_pickle_v0_15_2(self):
# ordered -> _ordered
# GH 9347
# we have the name warning
# 10482
with tm.assert_produces_warning(UserWarning):
cat = pd.Categorical(values=['a', 'b', 'c'],
categories=['a', 'b', 'c', 'd'],
name='foobar', ordered=False)
pickle_path = os.path.join(tm.get_data_path(),
'categorical_0_15_2.pickle')
# This code was executed once on v0.15.2 to generate the pickle:
#
# cat = Categorical(labels=np.arange(3), levels=['a', 'b', 'c', 'd'],
# name='foobar')
# with open(pickle_path, 'wb') as f: pickle.dump(cat, f)
#
self.assert_categorical_equal(cat, pd.read_pickle(pickle_path))
def test_concat_categorical(self):
# See GH 10177
df1 = pd.DataFrame(
np.arange(18, dtype='int64').reshape(6,
3), columns=["a", "b", "c"])
df2 = pd.DataFrame(
np.arange(14, dtype='int64').reshape(7, 2), columns=["a", "c"])
df2['h'] = pd.Series(pd.Categorical(["one", "one", "two", "one", "two",
"two", "one"]))
df_concat = pd.concat((df1, df2), axis=0).reset_index(drop=True)
df_expected = pd.DataFrame(
{'a': [0, 3, 6, 9, 12, 15, 0, 2, 4, 6, 8, 10, 12],
'b': [1, 4, 7, 10, 13, 16, np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan],
'c': [2, 5, 8, 11, 14, 17, 1, 3, 5, 7, 9, 11, 13]})
df_expected['h'] = pd.Series(pd.Categorical(
[None, None, None, None, None, None, "one", "one", "two", "one",
"two", "two", "one"]))
tm.assert_frame_equal(df_expected, df_concat)
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
# '--with-coverage', '--cover-package=pandas.core']
exit=False)
| mit |
dblalock/dig | tests/datasets.py | 1 | 17068 | #!/bin/env python
# import functools
import os
import numpy as np
from sklearn.datasets.samples_generator import make_blobs
from joblib import Memory
_memory = Memory('.', verbose=1)
DATA_DIR = os.path.expanduser('~/Desktop/datasets/nn-search')
join = os.path.join
class Random:
UNIFORM = 'uniform'
GAUSS = 'gauss'
WALK = 'walk'
BLOBS = 'blobs'
class Gist:
DIR = join(DATA_DIR, 'gist')
TRAIN = join(DIR, 'gist_train.npy') # noqa
TEST = join(DIR, 'gist.npy') # noqa
TEST_100 = join(DIR, 'gist_100k.npy') # noqa
TEST_200 = join(DIR, 'gist_200k.npy') # noqa
QUERIES = join(DIR, 'gist_queries.npy') # noqa
TRUTH = join(DIR, 'gist_truth.npy') # noqa
class Sift1M:
DIR = join(DATA_DIR, 'sift1m')
TRAIN = join(DIR, 'sift_learn.npy') # noqa
TEST = join(DIR, 'sift_base.npy') # noqa
TEST_100 = join(DIR, 'sift_100k.txt') # noqa
TEST_200 = join(DIR, 'sift_200k.txt') # noqa
QUERIES = join(DIR, 'sift_queries.npy') # noqa
TRUTH = join(DIR, 'sift_groundtruth.npy') # noqa
class Sift10M:
DIR = join(DATA_DIR, 'sift1b')
# TRAIN = join(DIR, 'big_ann_learn_10M.npy') # noqa
TRAIN = join(DIR, 'big_ann_learn_1M.npy') # noqa # TODO use 10M?
TRAIN_1M = join(DIR, 'big_ann_learn_1M.npy') # noqa
TEST = join(DIR, 'sift_10M.npy') # noqa
QUERIES = join(DIR, 'sift_queries.npy') # noqa
TRUTH = join(DIR, 'true_nn_idxs_10M.npy') # noqa
class Deep1M:
"""256D PCA of convnet activations; see OTQ paper supporting
webiste, http://sites.skoltech.ru/compvision/projects/aqtq/"""
DIR = join(DATA_DIR, 'deep1m') # noqa
TRAIN = join(DIR, 'deep1M_learn.npy') # noqa
TEST = join(DIR, 'deep1M_base.npy') # noqa
TEST_100 = join(DIR, 'deep1M_test_100k.npy') # noqa
QUERIES = join(DIR, 'deep1M_queries.npy') # noqa
TRUTH_TRAIN = join(DIR, 'deep1M_truth_train.npy') # noqa
TRUTH = join(DIR, 'deep1M_groundtruth.npy') # noqa
class Convnet1M:
DIR = join(DATA_DIR, 'convnet1m') # noqa
TRAIN = join(DIR, 'convnet_train.npy') # noqa
TEST = join(DIR, 'convnet_test.npy') # noqa
TEST_100 = join(DIR, 'convnet_test_100k.npy') # noqa
QUERIES = join(DIR, 'convnet_queries.npy') # noqa
TRUTH_TRAIN = join(DIR, 'truth_train.npy') # noqa
TRUTH = join(DIR, 'truth_test.npy') # noqa
class Mnist:
# following other papers (eg, "revisiting additive quantization"),
# use mnist test set as queries and training set as database
DIR = join(DATA_DIR, 'mnist') # noqa
TEST = join(DIR, 'X_train.npy') # noqa
QUERIES = join(DIR, 'X_test.npy') # noqa
TRUTH = join(DIR, 'truth_Q=test_X=train.npy') # noqa
class LabelMe:
DIR = join(DATA_DIR, 'labelme') # noqa
TRAIN = join(DIR, 'labelme_train.npy') # noqa
TEST = join(DIR, 'labelme_train.npy') # noqa
QUERIES = join(DIR, 'labelme_test.npy') # noqa
TRUTH = join(DIR, 'labelme_truth.npy') # noqa
class Glove:
DIR = join(DATA_DIR, 'glove') # noqa
TEST = join(DIR, 'glove_test.npy') # noqa
TEST_100 = join(DIR, 'glove_100k.txt') # noqa
TEST_200 = join(DIR, 'glove_200k.txt') # noqa
QUERIES = join(DIR, 'glove_queries.npy') # noqa
TRUTH = join(DIR, 'glove_truth.npy') # noqa
# note that we've only run the real experiments on the ones reported
# in the paper(i.e., no cherrypicking)
ALL_REAL_DATASETS = [
Gist, Sift1M, Sift10M, Deep1M, Convnet1M, Mnist, LabelMe, Glove]
# @_memory.cache # cache this more efficiently than as text
def cached_load_txt(*args, **kwargs):
return np.loadtxt(*args, **kwargs)
def load_file(fname, *args, **kwargs):
if fname.split('.')[-1] == 'txt':
return np.loadtxt(fname, *args, **kwargs)
return np.load(fname, *args, **kwargs)
def extract_random_rows(X, how_many, remove_from_X=True):
split_start = np.random.randint(len(X) - how_many - 1)
split_end = split_start + how_many
rows = np.copy(X[split_start:split_end])
if remove_from_X:
return np.vstack((X[:split_start], X[split_end:])), rows
return X, rows
# which_rows = np.random.randint(len(X), size=how_many)
# rows = np.copy(X[which_rows])
# mask = np.ones(len(X), dtype=np.bool)
# mask[which_rows] = False
# return X[mask].copy(), rows
# @_memory.cache
def _load_complete_dataset(which_dataset, num_queries=10):
X_test = np.load(which_dataset.TEST)
try:
X_train = np.load(which_dataset.TRAIN)
print "using separate test set!"
except AttributeError:
print "No training set found for dataset {}".format(str(which_dataset))
X_train = np.copy(X_test)
try:
Q = np.load(which_dataset.QUERIES)
except AttributeError:
assert False # TODO rm
assert num_queries > 1
X_train, Q = extract_random_rows(X_train, how_many=num_queries)
try:
true_nn = np.load(which_dataset.TRUTH)
except AttributeError:
true_nn = None
# np.set_printoptions(precision=6)
# print "start of Q:", Q[:5, :5]
# print "start of X_test:", X_test[:5, :5]
return X_train, Q, X_test, true_nn
def _ground_truth_for_dataset(which_dataset):
return None # TODO
# XXX: not clear whether this function is correct in general, but works for
# 784D with the nzeros we get for 32 and 64 codebooks
def _insert_zeros(X, nzeros):
N, D = X.shape
D_new = D + nzeros
X_new = np.zeros((N, D_new), dtype=X.dtype)
step = int(D / (nzeros + 1)) - 1
# assert step * nzeros < D
# print "step = ", step
# idxs = np.arange(D_new)
# for i in range(nzeros):
# in_idx = 0
# out_idx = 0
# out_offset = 0
# for i, in_idx in enumerate(range(0, D - step, step)):
for i in range(nzeros):
in_start = step * i
in_end = in_start + step
out_start = in_start + i + 1
out_end = out_start + step
X_new[:, out_start:out_end] = X[:, in_start:in_end]
# print "in start, out start = ", in_start, out_start
remaining_len = D - in_end
out_remaining_len = D_new - out_end
# print "remaining_len:", remaining_len
# print "out remaining len: ", D_new - out_end
assert remaining_len == out_remaining_len
assert remaining_len >= 0
if remaining_len:
X_new[:, out_end:out_end+remaining_len] = X[:, in_end:D]
# zero_idxs = np.linspace(0, D_new, nzeros).astype(np.int)
# gaps = (zero_idxs[1:] - zero_idxs[:-1])
# print "_insert_zeros(): D, D_new =", D, D_new
# print "zero idxs:", zero_idxs
# print "gaps:", gaps
# # print "sum of gaps: ", np.sum(gaps)
# assert np.sum(gaps) == D_new
# in_idx = 0
# out_idx = 1 # fist col always 0
# for gap in gaps:
# in_end = min(D, in_idx + gap)
# out_end = min(D_new - 1, out_idx + gap) # last col always 0
# X_new[:, out_idx:out_end] = X[:, in_idx:in_end]
# in_idx += gap
# out_idx += gap + 1
# print "in idx, out_idx: ", in_idx, out_idx
# check that we copied both the beginning and end properly
# assert np.array_equal(X[:, 0], X_new[:, 1])
assert np.array_equal(X[:, 0], X_new[:, 0])
assert np.array_equal(X[:, -1], X_new[:, -1])
# print "in idx, out_idx: ", in_idx, out_idx
# assert in_idx == D
# assert out_idx == D_new
return X_new
def ensure_num_cols_multiple_of(X, multiple_of):
remainder = X.shape[1] % multiple_of
if remainder > 0:
return _insert_zeros(X, multiple_of - remainder)
return X
# @_memory.cache
def load_dataset(which_dataset, N=-1, D=-1, norm_mean=False, norm_len=False,
num_queries=10, Ntrain=-1, D_multiple_of=-1):
true_nn = None
# randomly generated datasets
if which_dataset == Random.UNIFORM:
X_test = np.random.rand(N, D)
X_train = np.random.rand(Ntrain, D) if Ntrain > 0 else X_test
Q = np.random.rand(num_queries, D)
elif which_dataset == Random.GAUSS:
X_test = np.random.randn(N, D)
X_train = np.random.randn(Ntrain, D) if Ntrain > 0 else X_test
Q = np.random.randn(num_queries, D)
elif which_dataset == Random.WALK:
X_test = np.random.randn(N, D)
X_test = np.cumsum(X_test, axis=1)
X_train = np.copy(X_test)
if Ntrain > 0:
X_train = np.random.randn(Ntrain, D)
X_train = np.cumsum(X_train)
Q = np.random.randn(num_queries, D)
Q = np.cumsum(Q, axis=-1)
elif which_dataset == Random.BLOBS:
# centers is D x D, and centers[i, j] = (i + j)
centers = np.arange(D)
centers = np.sum(np.meshgrid(centers, centers), axis=0)
X_test, _ = make_blobs(n_samples=N, centers=centers)
X_train = np.copy(X_test)
if Ntrain > 0:
X_train, _ = make_blobs(n_samples=Ntrain, centers=centers)
Q, true_nn = make_blobs(n_samples=num_queries, centers=centers)
# datasets that are just one block of a "real" dataset
elif isinstance(which_dataset, str):
# assert False # TODO rm after real experiments
X_test = load_file(which_dataset)
X_test, Q = extract_random_rows(X_test, how_many=num_queries)
X_train = np.copy(X_test)
true_nn = _ground_truth_for_dataset(which_dataset)
# "real" datasets with predefined train, test, queries, truth
elif which_dataset in ALL_REAL_DATASETS:
X_train, Q, X_test, true_nn = _load_complete_dataset(
which_dataset, num_queries=num_queries)
else:
raise ValueError("unrecognized dataset {}".format(which_dataset))
N = X_test.shape[0] if N < 1 else N
D = X_test.shape[1] if D < 1 else D
X_test, X_train = np.copy(X_test)[:N, :D], X_train[:N, :D]
Q = Q[:, :D] if len(Q.shape) > 1 else Q[:D]
train_is_test = X_train.base is X_test or X_test.base is X_train
train_is_test = train_is_test or np.array_equal(X_train[:100], X_test[:100])
if train_is_test:
print "WARNING: Training data is also the test data!"
# np.set_printoptions(precision=6)
# print "start of Q:", Q[:5, :5]
# print "start of X_test:", X_test[:5, :5]
if norm_mean:
means = np.mean(X_train, axis=0)
X_train -= means
X_test -= means
# if not train_is_test:
# X_test -= means
Q -= means
if norm_len:
assert False # TODO rm
X_test /= np.linalg.norm(X_test, axis=1, keepdims=True)
X_train /= np.linalg.norm(X_train, axis=1, keepdims=True)
# if not train_is_test:
# X_train /= np.linalg.norm(X_train, axis=1, keepdims=True)
Q /= np.linalg.norm(Q, axis=-1, keepdims=True)
# np.set_printoptions(precision=6)
# print "start of Q:", Q[:5, :5]
# print "start of X_test:", X_test[:5, :5]
# TODO don't convert datasets that are originally uint8s to floats
X_train = X_train.astype(np.float32)
X_test = X_test.astype(np.float32)
# Q = np.squeeze(Q.astype(np.float32))
Q = Q.astype(np.float32)
if D_multiple_of > 1:
X_train = ensure_num_cols_multiple_of(X_train, D_multiple_of)
X_test = ensure_num_cols_multiple_of(X_test, D_multiple_of)
Q = ensure_num_cols_multiple_of(Q, D_multiple_of)
return X_train, Q, X_test, true_nn
def read_yael_vecs(path, c_contiguous=True, limit_rows=-1, dtype=None):
dim = np.fromfile(path, dtype=np.int32, count=2)[0]
print "vector length = {}".format(dim)
if dtype is None:
if 'fvecs' in path:
dtype = np.float32
elif 'ivecs' in path:
dtype = np.int32
elif 'bvecs' in path:
dtype = np.uint8
else:
raise ValueError("couldn't infer dtype from path {}".format(path))
itemsize = np.dtype(dtype).itemsize
assert dim > 0
assert itemsize in (1, 2, 4)
cols_for_dim = 4 // itemsize
row_size_bytes = 4 + dim * itemsize
row_size_elems = row_size_bytes // itemsize
limit = int(limit_rows) * row_size_elems if limit_rows > 0 else -1
fv = np.fromfile(path, dtype=dtype, count=limit)
fv = fv.reshape((-1, row_size_elems))
if not all(fv.view(np.int32)[:, 0] == dim):
raise IOError("Non-uniform vector sizes in " + path)
fv = fv[:, cols_for_dim:]
if c_contiguous:
fv = fv.copy()
return fv
if __name__ == '__main__':
pass
# ------------------------ clean up sift1b (bigann)
# data_dir = '/Volumes/MacHDD/datasets/sift1b/'
# out_dir = '/Volumes/MacSSD_OS/Users/davis/Desktop/datasets/sift1b/'
# path = data_dir + 'bigann_learn.bvecs'
# path = data_dir + 'bigann_base.bvecs'
# path = data_dir + 'queries.bvecs'
# out_path = out_dir + 'big_ann_learn_1M.npy'
# out_path = out_dir + 'big_ann_learn_10M.npy'
# out_path = out_dir + 'sift_10M.npy'
# out_path = out_dir + 'sift_queries.npy'
# limit_rows = int(1e6)
# limit_rows = int(10e6)
# X = read_yael_vecs(path, limit_rows=limit_rows)
# X = read_yael_vecs(path)
# print X.shape
# np.save(out_path, X)
# truth_dir = data_dir + 'gnd/'
# # truth_idxs_files = ['idx_1M', 'idx_10M', 'idx_100M']
# truth_idxs_files = ['idx_1000M']
# for f in truth_idxs_files:
# path = truth_dir + f + '.ivecs'
# out_path = out_dir + f + '.npy'
# print "unpacking {} to {}".format(path, out_path)
# X = read_yael_vecs(path)
# print X.shape
# np.save(out_path, X)
# ------------------------ clean up sift1m
# data_dir = '/Volumes/MacHDD/datasets/sift1m/'
# out_dir = '/Volumes/MacSSD_OS/Users/davis/Desktop/datasets/sift1m/'
# for fname in os.listdir(data_dir):
# in_path = data_dir + fname
# out_path = out_dir + fname.split('.')[0] + '.npy'
# print "unpacking {} to {}".format(in_path, out_path)
# X = read_yael_vecs(in_path)
# print X.shape
# np.save(out_path, X)
# # ------------------------ clean up Deep1M
# data_dir = os.path.expanduser('~/Desktop/datasets/nn-search/deep1M-raw/')
# out_dir = os.path.expanduser('~/Desktop/datasets/nn-search/deep1M/')
# print "in dir, out dir:", data_dir, out_dir
# for fname in os.listdir(data_dir):
# in_path = data_dir + fname
# out_path = out_dir + fname.split('.')[0] + '.npy'
# print "unpacking {} to {}".format(in_path, out_path)
# X = read_yael_vecs(in_path)
# print X.shape
# np.save(out_path, X)
# # ------------------------ clean up Convnet1M
# >>> import numpy as np
# >>> from scipy.io import loadmat
# >>> d = loadmat('features_m_128.mat')
# >>> contig = np.ascontiguousarray
# >>> savedir = '../convnet1m/'
# >>> np.save(savedir + 'convnet_train.npy', contig(d['feats_m_128_train']))
# >>> np.save(savedir + 'convnet_test.npy', contig(d['feats_m_128_test']))
# >>> np.save(savedir + 'convnet_base.npy', contig(d['feats_m_128_base']))
# ------------------------ clean up deep1b
# data_dir = '/Volumes/MacHDD/datasets/deep1b/'
# out_dir = '/Volumes/MacSSD_OS/Users/davis/Desktop/datasets/deep1b/'
# # expected_cols = 96
# # equivalent_elements_in_first_1M = int(1e6) * (1 + expected_cols)
# arrays = []
# # arrays.append(('deep1B_queries.fvecs', 'deep_queries.npy', -1))
# # arrays.append(('deep1B_groundtruth.ivecs', 'deep_true_nn_idxs.npy', -1))
# # arrays.append(('deep10M.fvecs', 'deep_1M.npy', 1e6))
# arrays.append(('deep10M.fvecs', 'deep_10M.npy', -1))
# for in_file, out_file, limit in arrays:
# in_path = data_dir + in_file
# out_path = out_dir + out_file
# X = read_yael_vecs(in_path, limit_rows=limit)
# print "unpacking {} to {}".format(in_path, out_path)
# print X.shape
# np.save(out_path, X)
# ------------------------ clean up LabelMe
# >>> from scipy.io import loadmat
# >>> d = loadmat('LabelMe_gist.mat')
# >>> for k, v in d.iteritems():
# ... try:
# ... print k, v.shape
# ... except:
# ... pass
# ...
# gist (22019, 512)
# img (32, 32, 3, 22019)
# nmat (1000, 1000, 20)
# __header__ param (1, 1)
# __globals__ seg (32, 32, 22019)
# names (1, 3597)
# DistLM (22019, 22019)
# __version__ ndxtrain (1, 20019)
# ndxtest (1, 2000)
#
# okay, no idea what most of these are even with the readme...
#
# >>> np.save('labelme_train_idxs', d['ndxtrain']) # training data idxs
# >>> np.save('labelme_test_idxs', d['ndxtest']) # test data idxs
# >>> np.save('labelme_all_gists', d['gist']) # actual gist descriptors
| mit |
jseabold/scikit-learn | examples/cross_decomposition/plot_compare_cross_decomposition.py | 128 | 4761 | """
===================================
Compare cross decomposition methods
===================================
Simple usage of various cross decomposition algorithms:
- PLSCanonical
- PLSRegression, with multivariate response, a.k.a. PLS2
- PLSRegression, with univariate response, a.k.a. PLS1
- CCA
Given 2 multivariate covarying two-dimensional datasets, X, and Y,
PLS extracts the 'directions of covariance', i.e. the components of each
datasets that explain the most shared variance between both datasets.
This is apparent on the **scatterplot matrix** display: components 1 in
dataset X and dataset Y are maximally correlated (points lie around the
first diagonal). This is also true for components 2 in both dataset,
however, the correlation across datasets for different components is
weak: the point cloud is very spherical.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cross_decomposition import PLSCanonical, PLSRegression, CCA
###############################################################################
# Dataset based latent variables model
n = 500
# 2 latents vars:
l1 = np.random.normal(size=n)
l2 = np.random.normal(size=n)
latents = np.array([l1, l1, l2, l2]).T
X = latents + np.random.normal(size=4 * n).reshape((n, 4))
Y = latents + np.random.normal(size=4 * n).reshape((n, 4))
X_train = X[:n / 2]
Y_train = Y[:n / 2]
X_test = X[n / 2:]
Y_test = Y[n / 2:]
print("Corr(X)")
print(np.round(np.corrcoef(X.T), 2))
print("Corr(Y)")
print(np.round(np.corrcoef(Y.T), 2))
###############################################################################
# Canonical (symmetric) PLS
# Transform data
# ~~~~~~~~~~~~~~
plsca = PLSCanonical(n_components=2)
plsca.fit(X_train, Y_train)
X_train_r, Y_train_r = plsca.transform(X_train, Y_train)
X_test_r, Y_test_r = plsca.transform(X_test, Y_test)
# Scatter plot of scores
# ~~~~~~~~~~~~~~~~~~~~~~
# 1) On diagonal plot X vs Y scores on each components
plt.figure(figsize=(12, 8))
plt.subplot(221)
plt.plot(X_train_r[:, 0], Y_train_r[:, 0], "ob", label="train")
plt.plot(X_test_r[:, 0], Y_test_r[:, 0], "or", label="test")
plt.xlabel("x scores")
plt.ylabel("y scores")
plt.title('Comp. 1: X vs Y (test corr = %.2f)' %
np.corrcoef(X_test_r[:, 0], Y_test_r[:, 0])[0, 1])
plt.xticks(())
plt.yticks(())
plt.legend(loc="best")
plt.subplot(224)
plt.plot(X_train_r[:, 1], Y_train_r[:, 1], "ob", label="train")
plt.plot(X_test_r[:, 1], Y_test_r[:, 1], "or", label="test")
plt.xlabel("x scores")
plt.ylabel("y scores")
plt.title('Comp. 2: X vs Y (test corr = %.2f)' %
np.corrcoef(X_test_r[:, 1], Y_test_r[:, 1])[0, 1])
plt.xticks(())
plt.yticks(())
plt.legend(loc="best")
# 2) Off diagonal plot components 1 vs 2 for X and Y
plt.subplot(222)
plt.plot(X_train_r[:, 0], X_train_r[:, 1], "*b", label="train")
plt.plot(X_test_r[:, 0], X_test_r[:, 1], "*r", label="test")
plt.xlabel("X comp. 1")
plt.ylabel("X comp. 2")
plt.title('X comp. 1 vs X comp. 2 (test corr = %.2f)'
% np.corrcoef(X_test_r[:, 0], X_test_r[:, 1])[0, 1])
plt.legend(loc="best")
plt.xticks(())
plt.yticks(())
plt.subplot(223)
plt.plot(Y_train_r[:, 0], Y_train_r[:, 1], "*b", label="train")
plt.plot(Y_test_r[:, 0], Y_test_r[:, 1], "*r", label="test")
plt.xlabel("Y comp. 1")
plt.ylabel("Y comp. 2")
plt.title('Y comp. 1 vs Y comp. 2 , (test corr = %.2f)'
% np.corrcoef(Y_test_r[:, 0], Y_test_r[:, 1])[0, 1])
plt.legend(loc="best")
plt.xticks(())
plt.yticks(())
plt.show()
###############################################################################
# PLS regression, with multivariate response, a.k.a. PLS2
n = 1000
q = 3
p = 10
X = np.random.normal(size=n * p).reshape((n, p))
B = np.array([[1, 2] + [0] * (p - 2)] * q).T
# each Yj = 1*X1 + 2*X2 + noize
Y = np.dot(X, B) + np.random.normal(size=n * q).reshape((n, q)) + 5
pls2 = PLSRegression(n_components=3)
pls2.fit(X, Y)
print("True B (such that: Y = XB + Err)")
print(B)
# compare pls2.coef_ with B
print("Estimated B")
print(np.round(pls2.coef_, 1))
pls2.predict(X)
###############################################################################
# PLS regression, with univariate response, a.k.a. PLS1
n = 1000
p = 10
X = np.random.normal(size=n * p).reshape((n, p))
y = X[:, 0] + 2 * X[:, 1] + np.random.normal(size=n * 1) + 5
pls1 = PLSRegression(n_components=3)
pls1.fit(X, y)
# note that the number of compements exceeds 1 (the dimension of y)
print("Estimated betas")
print(np.round(pls1.coef_, 1))
###############################################################################
# CCA (PLS mode B with symmetric deflation)
cca = CCA(n_components=2)
cca.fit(X_train, Y_train)
X_train_r, Y_train_r = plsca.transform(X_train, Y_train)
X_test_r, Y_test_r = plsca.transform(X_test, Y_test)
| bsd-3-clause |
samuel1208/scikit-learn | sklearn/feature_extraction/tests/test_text.py | 75 | 34122 | from __future__ import unicode_literals
import warnings
from sklearn.feature_extraction.text import strip_tags
from sklearn.feature_extraction.text import strip_accents_unicode
from sklearn.feature_extraction.text import strip_accents_ascii
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import ENGLISH_STOP_WORDS
from sklearn.cross_validation import train_test_split
from sklearn.cross_validation import cross_val_score
from sklearn.grid_search import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.base import clone
import numpy as np
from nose import SkipTest
from nose.tools import assert_equal
from nose.tools import assert_false
from nose.tools import assert_not_equal
from nose.tools import assert_true
from nose.tools import assert_almost_equal
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
from numpy.testing import assert_raises
from sklearn.utils.testing import (assert_in, assert_less, assert_greater,
assert_warns_message, assert_raise_message,
clean_warning_registry)
from collections import defaultdict, Mapping
from functools import partial
import pickle
from io import StringIO
JUNK_FOOD_DOCS = (
"the pizza pizza beer copyright",
"the pizza burger beer copyright",
"the the pizza beer beer copyright",
"the burger beer beer copyright",
"the coke burger coke copyright",
"the coke burger burger",
)
NOTJUNK_FOOD_DOCS = (
"the salad celeri copyright",
"the salad salad sparkling water copyright",
"the the celeri celeri copyright",
"the tomato tomato salad water",
"the tomato salad water copyright",
)
ALL_FOOD_DOCS = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
def uppercase(s):
return strip_accents_unicode(s).upper()
def strip_eacute(s):
return s.replace('\xe9', 'e')
def split_tokenize(s):
return s.split()
def lazy_analyze(s):
return ['the_ultimate_feature']
def test_strip_accents():
# check some classical latin accentuated symbols
a = '\xe0\xe1\xe2\xe3\xe4\xe5\xe7\xe8\xe9\xea\xeb'
expected = 'aaaaaaceeee'
assert_equal(strip_accents_unicode(a), expected)
a = '\xec\xed\xee\xef\xf1\xf2\xf3\xf4\xf5\xf6\xf9\xfa\xfb\xfc\xfd'
expected = 'iiiinooooouuuuy'
assert_equal(strip_accents_unicode(a), expected)
# check some arabic
a = '\u0625' # halef with a hamza below
expected = '\u0627' # simple halef
assert_equal(strip_accents_unicode(a), expected)
# mix letters accentuated and not
a = "this is \xe0 test"
expected = 'this is a test'
assert_equal(strip_accents_unicode(a), expected)
def test_to_ascii():
# check some classical latin accentuated symbols
a = '\xe0\xe1\xe2\xe3\xe4\xe5\xe7\xe8\xe9\xea\xeb'
expected = 'aaaaaaceeee'
assert_equal(strip_accents_ascii(a), expected)
a = '\xec\xed\xee\xef\xf1\xf2\xf3\xf4\xf5\xf6\xf9\xfa\xfb\xfc\xfd'
expected = 'iiiinooooouuuuy'
assert_equal(strip_accents_ascii(a), expected)
# check some arabic
a = '\u0625' # halef with a hamza below
expected = '' # halef has no direct ascii match
assert_equal(strip_accents_ascii(a), expected)
# mix letters accentuated and not
a = "this is \xe0 test"
expected = 'this is a test'
assert_equal(strip_accents_ascii(a), expected)
def test_word_analyzer_unigrams():
for Vectorizer in (CountVectorizer, HashingVectorizer):
wa = Vectorizer(strip_accents='ascii').build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
"c'\xe9tait pas tr\xeas bon.")
expected = ['ai', 'mange', 'du', 'kangourou', 'ce', 'midi',
'etait', 'pas', 'tres', 'bon']
assert_equal(wa(text), expected)
text = "This is a test, really.\n\n I met Harry yesterday."
expected = ['this', 'is', 'test', 'really', 'met', 'harry',
'yesterday']
assert_equal(wa(text), expected)
wa = Vectorizer(input='file').build_analyzer()
text = StringIO("This is a test with a file-like object!")
expected = ['this', 'is', 'test', 'with', 'file', 'like',
'object']
assert_equal(wa(text), expected)
# with custom preprocessor
wa = Vectorizer(preprocessor=uppercase).build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
" c'\xe9tait pas tr\xeas bon.")
expected = ['AI', 'MANGE', 'DU', 'KANGOUROU', 'CE', 'MIDI',
'ETAIT', 'PAS', 'TRES', 'BON']
assert_equal(wa(text), expected)
# with custom tokenizer
wa = Vectorizer(tokenizer=split_tokenize,
strip_accents='ascii').build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
"c'\xe9tait pas tr\xeas bon.")
expected = ["j'ai", 'mange', 'du', 'kangourou', 'ce', 'midi,',
"c'etait", 'pas', 'tres', 'bon.']
assert_equal(wa(text), expected)
def test_word_analyzer_unigrams_and_bigrams():
wa = CountVectorizer(analyzer="word", strip_accents='unicode',
ngram_range=(1, 2)).build_analyzer()
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon."
expected = ['ai', 'mange', 'du', 'kangourou', 'ce', 'midi',
'etait', 'pas', 'tres', 'bon', 'ai mange', 'mange du',
'du kangourou', 'kangourou ce', 'ce midi', 'midi etait',
'etait pas', 'pas tres', 'tres bon']
assert_equal(wa(text), expected)
def test_unicode_decode_error():
# decode_error default to strict, so this should fail
# First, encode (as bytes) a unicode string.
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon."
text_bytes = text.encode('utf-8')
# Then let the Analyzer try to decode it as ascii. It should fail,
# because we have given it an incorrect encoding.
wa = CountVectorizer(ngram_range=(1, 2), encoding='ascii').build_analyzer()
assert_raises(UnicodeDecodeError, wa, text_bytes)
ca = CountVectorizer(analyzer='char', ngram_range=(3, 6),
encoding='ascii').build_analyzer()
assert_raises(UnicodeDecodeError, ca, text_bytes)
def test_char_ngram_analyzer():
cnga = CountVectorizer(analyzer='char', strip_accents='unicode',
ngram_range=(3, 6)).build_analyzer()
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon"
expected = ["j'a", "'ai", 'ai ', 'i m', ' ma']
assert_equal(cnga(text)[:5], expected)
expected = ['s tres', ' tres ', 'tres b', 'res bo', 'es bon']
assert_equal(cnga(text)[-5:], expected)
text = "This \n\tis a test, really.\n\n I met Harry yesterday"
expected = ['thi', 'his', 'is ', 's i', ' is']
assert_equal(cnga(text)[:5], expected)
expected = [' yeste', 'yester', 'esterd', 'sterda', 'terday']
assert_equal(cnga(text)[-5:], expected)
cnga = CountVectorizer(input='file', analyzer='char',
ngram_range=(3, 6)).build_analyzer()
text = StringIO("This is a test with a file-like object!")
expected = ['thi', 'his', 'is ', 's i', ' is']
assert_equal(cnga(text)[:5], expected)
def test_char_wb_ngram_analyzer():
cnga = CountVectorizer(analyzer='char_wb', strip_accents='unicode',
ngram_range=(3, 6)).build_analyzer()
text = "This \n\tis a test, really.\n\n I met Harry yesterday"
expected = [' th', 'thi', 'his', 'is ', ' thi']
assert_equal(cnga(text)[:5], expected)
expected = ['yester', 'esterd', 'sterda', 'terday', 'erday ']
assert_equal(cnga(text)[-5:], expected)
cnga = CountVectorizer(input='file', analyzer='char_wb',
ngram_range=(3, 6)).build_analyzer()
text = StringIO("A test with a file-like object!")
expected = [' a ', ' te', 'tes', 'est', 'st ', ' tes']
assert_equal(cnga(text)[:6], expected)
def test_countvectorizer_custom_vocabulary():
vocab = {"pizza": 0, "beer": 1}
terms = set(vocab.keys())
# Try a few of the supported types.
for typ in [dict, list, iter, partial(defaultdict, int)]:
v = typ(vocab)
vect = CountVectorizer(vocabulary=v)
vect.fit(JUNK_FOOD_DOCS)
if isinstance(v, Mapping):
assert_equal(vect.vocabulary_, vocab)
else:
assert_equal(set(vect.vocabulary_), terms)
X = vect.transform(JUNK_FOOD_DOCS)
assert_equal(X.shape[1], len(terms))
def test_countvectorizer_custom_vocabulary_pipeline():
what_we_like = ["pizza", "beer"]
pipe = Pipeline([
('count', CountVectorizer(vocabulary=what_we_like)),
('tfidf', TfidfTransformer())])
X = pipe.fit_transform(ALL_FOOD_DOCS)
assert_equal(set(pipe.named_steps['count'].vocabulary_),
set(what_we_like))
assert_equal(X.shape[1], len(what_we_like))
def test_countvectorizer_custom_vocabulary_repeated_indeces():
vocab = {"pizza": 0, "beer": 0}
try:
CountVectorizer(vocabulary=vocab)
except ValueError as e:
assert_in("vocabulary contains repeated indices", str(e).lower())
def test_countvectorizer_custom_vocabulary_gap_index():
vocab = {"pizza": 1, "beer": 2}
try:
CountVectorizer(vocabulary=vocab)
except ValueError as e:
assert_in("doesn't contain index", str(e).lower())
def test_countvectorizer_stop_words():
cv = CountVectorizer()
cv.set_params(stop_words='english')
assert_equal(cv.get_stop_words(), ENGLISH_STOP_WORDS)
cv.set_params(stop_words='_bad_str_stop_')
assert_raises(ValueError, cv.get_stop_words)
cv.set_params(stop_words='_bad_unicode_stop_')
assert_raises(ValueError, cv.get_stop_words)
stoplist = ['some', 'other', 'words']
cv.set_params(stop_words=stoplist)
assert_equal(cv.get_stop_words(), stoplist)
def test_countvectorizer_empty_vocabulary():
try:
vect = CountVectorizer(vocabulary=[])
vect.fit(["foo"])
assert False, "we shouldn't get here"
except ValueError as e:
assert_in("empty vocabulary", str(e).lower())
try:
v = CountVectorizer(max_df=1.0, stop_words="english")
# fit on stopwords only
v.fit(["to be or not to be", "and me too", "and so do you"])
assert False, "we shouldn't get here"
except ValueError as e:
assert_in("empty vocabulary", str(e).lower())
def test_fit_countvectorizer_twice():
cv = CountVectorizer()
X1 = cv.fit_transform(ALL_FOOD_DOCS[:5])
X2 = cv.fit_transform(ALL_FOOD_DOCS[5:])
assert_not_equal(X1.shape[1], X2.shape[1])
def test_tf_idf_smoothing():
X = [[1, 1, 1],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=True, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
# check normalization
assert_array_almost_equal((tfidf ** 2).sum(axis=1), [1., 1., 1.])
# this is robust to features with only zeros
X = [[1, 1, 0],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=True, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
def test_tfidf_no_smoothing():
X = [[1, 1, 1],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=False, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
# check normalization
assert_array_almost_equal((tfidf ** 2).sum(axis=1), [1., 1., 1.])
# the lack of smoothing make IDF fragile in the presence of feature with
# only zeros
X = [[1, 1, 0],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=False, norm='l2')
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
1. / np.array([0.])
numpy_provides_div0_warning = len(w) == 1
in_warning_message = 'divide by zero'
tfidf = assert_warns_message(RuntimeWarning, in_warning_message,
tr.fit_transform, X).toarray()
if not numpy_provides_div0_warning:
raise SkipTest("Numpy does not provide div 0 warnings.")
def test_sublinear_tf():
X = [[1], [2], [3]]
tr = TfidfTransformer(sublinear_tf=True, use_idf=False, norm=None)
tfidf = tr.fit_transform(X).toarray()
assert_equal(tfidf[0], 1)
assert_greater(tfidf[1], tfidf[0])
assert_greater(tfidf[2], tfidf[1])
assert_less(tfidf[1], 2)
assert_less(tfidf[2], 3)
def test_vectorizer():
# raw documents as an iterator
train_data = iter(ALL_FOOD_DOCS[:-1])
test_data = [ALL_FOOD_DOCS[-1]]
n_train = len(ALL_FOOD_DOCS) - 1
# test without vocabulary
v1 = CountVectorizer(max_df=0.5)
counts_train = v1.fit_transform(train_data)
if hasattr(counts_train, 'tocsr'):
counts_train = counts_train.tocsr()
assert_equal(counts_train[0, v1.vocabulary_["pizza"]], 2)
# build a vectorizer v1 with the same vocabulary as the one fitted by v1
v2 = CountVectorizer(vocabulary=v1.vocabulary_)
# compare that the two vectorizer give the same output on the test sample
for v in (v1, v2):
counts_test = v.transform(test_data)
if hasattr(counts_test, 'tocsr'):
counts_test = counts_test.tocsr()
vocabulary = v.vocabulary_
assert_equal(counts_test[0, vocabulary["salad"]], 1)
assert_equal(counts_test[0, vocabulary["tomato"]], 1)
assert_equal(counts_test[0, vocabulary["water"]], 1)
# stop word from the fixed list
assert_false("the" in vocabulary)
# stop word found automatically by the vectorizer DF thresholding
# words that are high frequent across the complete corpus are likely
# to be not informative (either real stop words of extraction
# artifacts)
assert_false("copyright" in vocabulary)
# not present in the sample
assert_equal(counts_test[0, vocabulary["coke"]], 0)
assert_equal(counts_test[0, vocabulary["burger"]], 0)
assert_equal(counts_test[0, vocabulary["beer"]], 0)
assert_equal(counts_test[0, vocabulary["pizza"]], 0)
# test tf-idf
t1 = TfidfTransformer(norm='l1')
tfidf = t1.fit(counts_train).transform(counts_train).toarray()
assert_equal(len(t1.idf_), len(v1.vocabulary_))
assert_equal(tfidf.shape, (n_train, len(v1.vocabulary_)))
# test tf-idf with new data
tfidf_test = t1.transform(counts_test).toarray()
assert_equal(tfidf_test.shape, (len(test_data), len(v1.vocabulary_)))
# test tf alone
t2 = TfidfTransformer(norm='l1', use_idf=False)
tf = t2.fit(counts_train).transform(counts_train).toarray()
assert_equal(t2.idf_, None)
# test idf transform with unlearned idf vector
t3 = TfidfTransformer(use_idf=True)
assert_raises(ValueError, t3.transform, counts_train)
# test idf transform with incompatible n_features
X = [[1, 1, 5],
[1, 1, 0]]
t3.fit(X)
X_incompt = [[1, 3],
[1, 3]]
assert_raises(ValueError, t3.transform, X_incompt)
# L1-normalized term frequencies sum to one
assert_array_almost_equal(np.sum(tf, axis=1), [1.0] * n_train)
# test the direct tfidf vectorizer
# (equivalent to term count vectorizer + tfidf transformer)
train_data = iter(ALL_FOOD_DOCS[:-1])
tv = TfidfVectorizer(norm='l1')
tv.max_df = v1.max_df
tfidf2 = tv.fit_transform(train_data).toarray()
assert_false(tv.fixed_vocabulary_)
assert_array_almost_equal(tfidf, tfidf2)
# test the direct tfidf vectorizer with new data
tfidf_test2 = tv.transform(test_data).toarray()
assert_array_almost_equal(tfidf_test, tfidf_test2)
# test transform on unfitted vectorizer with empty vocabulary
v3 = CountVectorizer(vocabulary=None)
assert_raises(ValueError, v3.transform, train_data)
# ascii preprocessor?
v3.set_params(strip_accents='ascii', lowercase=False)
assert_equal(v3.build_preprocessor(), strip_accents_ascii)
# error on bad strip_accents param
v3.set_params(strip_accents='_gabbledegook_', preprocessor=None)
assert_raises(ValueError, v3.build_preprocessor)
# error with bad analyzer type
v3.set_params = '_invalid_analyzer_type_'
assert_raises(ValueError, v3.build_analyzer)
def test_tfidf_vectorizer_setters():
tv = TfidfVectorizer(norm='l2', use_idf=False, smooth_idf=False,
sublinear_tf=False)
tv.norm = 'l1'
assert_equal(tv._tfidf.norm, 'l1')
tv.use_idf = True
assert_true(tv._tfidf.use_idf)
tv.smooth_idf = True
assert_true(tv._tfidf.smooth_idf)
tv.sublinear_tf = True
assert_true(tv._tfidf.sublinear_tf)
def test_hashing_vectorizer():
v = HashingVectorizer()
X = v.transform(ALL_FOOD_DOCS)
token_nnz = X.nnz
assert_equal(X.shape, (len(ALL_FOOD_DOCS), v.n_features))
assert_equal(X.dtype, v.dtype)
# By default the hashed values receive a random sign and l2 normalization
# makes the feature values bounded
assert_true(np.min(X.data) > -1)
assert_true(np.min(X.data) < 0)
assert_true(np.max(X.data) > 0)
assert_true(np.max(X.data) < 1)
# Check that the rows are normalized
for i in range(X.shape[0]):
assert_almost_equal(np.linalg.norm(X[0].data, 2), 1.0)
# Check vectorization with some non-default parameters
v = HashingVectorizer(ngram_range=(1, 2), non_negative=True, norm='l1')
X = v.transform(ALL_FOOD_DOCS)
assert_equal(X.shape, (len(ALL_FOOD_DOCS), v.n_features))
assert_equal(X.dtype, v.dtype)
# ngrams generate more non zeros
ngrams_nnz = X.nnz
assert_true(ngrams_nnz > token_nnz)
assert_true(ngrams_nnz < 2 * token_nnz)
# makes the feature values bounded
assert_true(np.min(X.data) > 0)
assert_true(np.max(X.data) < 1)
# Check that the rows are normalized
for i in range(X.shape[0]):
assert_almost_equal(np.linalg.norm(X[0].data, 1), 1.0)
def test_feature_names():
cv = CountVectorizer(max_df=0.5)
# test for Value error on unfitted/empty vocabulary
assert_raises(ValueError, cv.get_feature_names)
X = cv.fit_transform(ALL_FOOD_DOCS)
n_samples, n_features = X.shape
assert_equal(len(cv.vocabulary_), n_features)
feature_names = cv.get_feature_names()
assert_equal(len(feature_names), n_features)
assert_array_equal(['beer', 'burger', 'celeri', 'coke', 'pizza',
'salad', 'sparkling', 'tomato', 'water'],
feature_names)
for idx, name in enumerate(feature_names):
assert_equal(idx, cv.vocabulary_.get(name))
def test_vectorizer_max_features():
vec_factories = (
CountVectorizer,
TfidfVectorizer,
)
expected_vocabulary = set(['burger', 'beer', 'salad', 'pizza'])
expected_stop_words = set([u'celeri', u'tomato', u'copyright', u'coke',
u'sparkling', u'water', u'the'])
for vec_factory in vec_factories:
# test bounded number of extracted features
vectorizer = vec_factory(max_df=0.6, max_features=4)
vectorizer.fit(ALL_FOOD_DOCS)
assert_equal(set(vectorizer.vocabulary_), expected_vocabulary)
assert_equal(vectorizer.stop_words_, expected_stop_words)
def test_count_vectorizer_max_features():
# Regression test: max_features didn't work correctly in 0.14.
cv_1 = CountVectorizer(max_features=1)
cv_3 = CountVectorizer(max_features=3)
cv_None = CountVectorizer(max_features=None)
counts_1 = cv_1.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
counts_3 = cv_3.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
counts_None = cv_None.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
features_1 = cv_1.get_feature_names()
features_3 = cv_3.get_feature_names()
features_None = cv_None.get_feature_names()
# The most common feature is "the", with frequency 7.
assert_equal(7, counts_1.max())
assert_equal(7, counts_3.max())
assert_equal(7, counts_None.max())
# The most common feature should be the same
assert_equal("the", features_1[np.argmax(counts_1)])
assert_equal("the", features_3[np.argmax(counts_3)])
assert_equal("the", features_None[np.argmax(counts_None)])
def test_vectorizer_max_df():
test_data = ['abc', 'dea', 'eat']
vect = CountVectorizer(analyzer='char', max_df=1.0)
vect.fit(test_data)
assert_true('a' in vect.vocabulary_.keys())
assert_equal(len(vect.vocabulary_.keys()), 6)
assert_equal(len(vect.stop_words_), 0)
vect.max_df = 0.5 # 0.5 * 3 documents -> max_doc_count == 1.5
vect.fit(test_data)
assert_true('a' not in vect.vocabulary_.keys()) # {ae} ignored
assert_equal(len(vect.vocabulary_.keys()), 4) # {bcdt} remain
assert_true('a' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 2)
vect.max_df = 1
vect.fit(test_data)
assert_true('a' not in vect.vocabulary_.keys()) # {ae} ignored
assert_equal(len(vect.vocabulary_.keys()), 4) # {bcdt} remain
assert_true('a' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 2)
def test_vectorizer_min_df():
test_data = ['abc', 'dea', 'eat']
vect = CountVectorizer(analyzer='char', min_df=1)
vect.fit(test_data)
assert_true('a' in vect.vocabulary_.keys())
assert_equal(len(vect.vocabulary_.keys()), 6)
assert_equal(len(vect.stop_words_), 0)
vect.min_df = 2
vect.fit(test_data)
assert_true('c' not in vect.vocabulary_.keys()) # {bcdt} ignored
assert_equal(len(vect.vocabulary_.keys()), 2) # {ae} remain
assert_true('c' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 4)
vect.min_df = 0.8 # 0.8 * 3 documents -> min_doc_count == 2.4
vect.fit(test_data)
assert_true('c' not in vect.vocabulary_.keys()) # {bcdet} ignored
assert_equal(len(vect.vocabulary_.keys()), 1) # {a} remains
assert_true('c' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 5)
def test_count_binary_occurrences():
# by default multiple occurrences are counted as longs
test_data = ['aaabc', 'abbde']
vect = CountVectorizer(analyzer='char', max_df=1.0)
X = vect.fit_transform(test_data).toarray()
assert_array_equal(['a', 'b', 'c', 'd', 'e'], vect.get_feature_names())
assert_array_equal([[3, 1, 1, 0, 0],
[1, 2, 0, 1, 1]], X)
# using boolean features, we can fetch the binary occurrence info
# instead.
vect = CountVectorizer(analyzer='char', max_df=1.0, binary=True)
X = vect.fit_transform(test_data).toarray()
assert_array_equal([[1, 1, 1, 0, 0],
[1, 1, 0, 1, 1]], X)
# check the ability to change the dtype
vect = CountVectorizer(analyzer='char', max_df=1.0,
binary=True, dtype=np.float32)
X_sparse = vect.fit_transform(test_data)
assert_equal(X_sparse.dtype, np.float32)
def test_hashed_binary_occurrences():
# by default multiple occurrences are counted as longs
test_data = ['aaabc', 'abbde']
vect = HashingVectorizer(analyzer='char', non_negative=True,
norm=None)
X = vect.transform(test_data)
assert_equal(np.max(X[0:1].data), 3)
assert_equal(np.max(X[1:2].data), 2)
assert_equal(X.dtype, np.float64)
# using boolean features, we can fetch the binary occurrence info
# instead.
vect = HashingVectorizer(analyzer='char', non_negative=True, binary=True,
norm=None)
X = vect.transform(test_data)
assert_equal(np.max(X.data), 1)
assert_equal(X.dtype, np.float64)
# check the ability to change the dtype
vect = HashingVectorizer(analyzer='char', non_negative=True, binary=True,
norm=None, dtype=np.float64)
X = vect.transform(test_data)
assert_equal(X.dtype, np.float64)
def test_vectorizer_inverse_transform():
# raw documents
data = ALL_FOOD_DOCS
for vectorizer in (TfidfVectorizer(), CountVectorizer()):
transformed_data = vectorizer.fit_transform(data)
inversed_data = vectorizer.inverse_transform(transformed_data)
analyze = vectorizer.build_analyzer()
for doc, inversed_terms in zip(data, inversed_data):
terms = np.sort(np.unique(analyze(doc)))
inversed_terms = np.sort(np.unique(inversed_terms))
assert_array_equal(terms, inversed_terms)
# Test that inverse_transform also works with numpy arrays
transformed_data = transformed_data.toarray()
inversed_data2 = vectorizer.inverse_transform(transformed_data)
for terms, terms2 in zip(inversed_data, inversed_data2):
assert_array_equal(np.sort(terms), np.sort(terms2))
def test_count_vectorizer_pipeline_grid_selection():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
# split the dataset for model development and final evaluation
train_data, test_data, target_train, target_test = train_test_split(
data, target, test_size=.2, random_state=0)
pipeline = Pipeline([('vect', CountVectorizer()),
('svc', LinearSVC())])
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
'svc__loss': ('hinge', 'squared_hinge')
}
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=1)
# Check that the best model found by grid search is 100% correct on the
# held out evaluation set.
pred = grid_search.fit(train_data, target_train).predict(test_data)
assert_array_equal(pred, target_test)
# on this toy dataset bigram representation which is used in the last of
# the grid_search is considered the best estimator since they all converge
# to 100% accuracy models
assert_equal(grid_search.best_score_, 1.0)
best_vectorizer = grid_search.best_estimator_.named_steps['vect']
assert_equal(best_vectorizer.ngram_range, (1, 1))
def test_vectorizer_pipeline_grid_selection():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
# split the dataset for model development and final evaluation
train_data, test_data, target_train, target_test = train_test_split(
data, target, test_size=.1, random_state=0)
pipeline = Pipeline([('vect', TfidfVectorizer()),
('svc', LinearSVC())])
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
'vect__norm': ('l1', 'l2'),
'svc__loss': ('hinge', 'squared_hinge'),
}
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=1)
# Check that the best model found by grid search is 100% correct on the
# held out evaluation set.
pred = grid_search.fit(train_data, target_train).predict(test_data)
assert_array_equal(pred, target_test)
# on this toy dataset bigram representation which is used in the last of
# the grid_search is considered the best estimator since they all converge
# to 100% accuracy models
assert_equal(grid_search.best_score_, 1.0)
best_vectorizer = grid_search.best_estimator_.named_steps['vect']
assert_equal(best_vectorizer.ngram_range, (1, 1))
assert_equal(best_vectorizer.norm, 'l2')
assert_false(best_vectorizer.fixed_vocabulary_)
def test_vectorizer_pipeline_cross_validation():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
pipeline = Pipeline([('vect', TfidfVectorizer()),
('svc', LinearSVC())])
cv_scores = cross_val_score(pipeline, data, target, cv=3)
assert_array_equal(cv_scores, [1., 1., 1.])
def test_vectorizer_unicode():
# tests that the count vectorizer works with cyrillic.
document = (
"\xd0\x9c\xd0\xb0\xd1\x88\xd0\xb8\xd0\xbd\xd0\xbd\xd0\xbe\xd0"
"\xb5 \xd0\xbe\xd0\xb1\xd1\x83\xd1\x87\xd0\xb5\xd0\xbd\xd0\xb8\xd0"
"\xb5 \xe2\x80\x94 \xd0\xbe\xd0\xb1\xd1\x88\xd0\xb8\xd1\x80\xd0\xbd"
"\xd1\x8b\xd0\xb9 \xd0\xbf\xd0\xbe\xd0\xb4\xd1\x80\xd0\xb0\xd0\xb7"
"\xd0\xb4\xd0\xb5\xd0\xbb \xd0\xb8\xd1\x81\xd0\xba\xd1\x83\xd1\x81"
"\xd1\x81\xd1\x82\xd0\xb2\xd0\xb5\xd0\xbd\xd0\xbd\xd0\xbe\xd0\xb3"
"\xd0\xbe \xd0\xb8\xd0\xbd\xd1\x82\xd0\xb5\xd0\xbb\xd0\xbb\xd0"
"\xb5\xd0\xba\xd1\x82\xd0\xb0, \xd0\xb8\xd0\xb7\xd1\x83\xd1\x87"
"\xd0\xb0\xd1\x8e\xd1\x89\xd0\xb8\xd0\xb9 \xd0\xbc\xd0\xb5\xd1\x82"
"\xd0\xbe\xd0\xb4\xd1\x8b \xd0\xbf\xd0\xbe\xd1\x81\xd1\x82\xd1\x80"
"\xd0\xbe\xd0\xb5\xd0\xbd\xd0\xb8\xd1\x8f \xd0\xb0\xd0\xbb\xd0\xb3"
"\xd0\xbe\xd1\x80\xd0\xb8\xd1\x82\xd0\xbc\xd0\xbe\xd0\xb2, \xd1\x81"
"\xd0\xbf\xd0\xbe\xd1\x81\xd0\xbe\xd0\xb1\xd0\xbd\xd1\x8b\xd1\x85 "
"\xd0\xbe\xd0\xb1\xd1\x83\xd1\x87\xd0\xb0\xd1\x82\xd1\x8c\xd1\x81\xd1"
"\x8f.")
vect = CountVectorizer()
X_counted = vect.fit_transform([document])
assert_equal(X_counted.shape, (1, 15))
vect = HashingVectorizer(norm=None, non_negative=True)
X_hashed = vect.transform([document])
assert_equal(X_hashed.shape, (1, 2 ** 20))
# No collisions on such a small dataset
assert_equal(X_counted.nnz, X_hashed.nnz)
# When norm is None and non_negative, the tokens are counted up to
# collisions
assert_array_equal(np.sort(X_counted.data), np.sort(X_hashed.data))
def test_tfidf_vectorizer_with_fixed_vocabulary():
# non regression smoke test for inheritance issues
vocabulary = ['pizza', 'celeri']
vect = TfidfVectorizer(vocabulary=vocabulary)
X_1 = vect.fit_transform(ALL_FOOD_DOCS)
X_2 = vect.transform(ALL_FOOD_DOCS)
assert_array_almost_equal(X_1.toarray(), X_2.toarray())
assert_true(vect.fixed_vocabulary_)
def test_pickling_vectorizer():
instances = [
HashingVectorizer(),
HashingVectorizer(norm='l1'),
HashingVectorizer(binary=True),
HashingVectorizer(ngram_range=(1, 2)),
CountVectorizer(),
CountVectorizer(preprocessor=strip_tags),
CountVectorizer(analyzer=lazy_analyze),
CountVectorizer(preprocessor=strip_tags).fit(JUNK_FOOD_DOCS),
CountVectorizer(strip_accents=strip_eacute).fit(JUNK_FOOD_DOCS),
TfidfVectorizer(),
TfidfVectorizer(analyzer=lazy_analyze),
TfidfVectorizer().fit(JUNK_FOOD_DOCS),
]
for orig in instances:
s = pickle.dumps(orig)
copy = pickle.loads(s)
assert_equal(type(copy), orig.__class__)
assert_equal(copy.get_params(), orig.get_params())
assert_array_equal(
copy.fit_transform(JUNK_FOOD_DOCS).toarray(),
orig.fit_transform(JUNK_FOOD_DOCS).toarray())
def test_stop_words_removal():
# Ensure that deleting the stop_words_ attribute doesn't affect transform
fitted_vectorizers = (
TfidfVectorizer().fit(JUNK_FOOD_DOCS),
CountVectorizer(preprocessor=strip_tags).fit(JUNK_FOOD_DOCS),
CountVectorizer(strip_accents=strip_eacute).fit(JUNK_FOOD_DOCS)
)
for vect in fitted_vectorizers:
vect_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
vect.stop_words_ = None
stop_None_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
delattr(vect, 'stop_words_')
stop_del_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
assert_array_equal(stop_None_transform, vect_transform)
assert_array_equal(stop_del_transform, vect_transform)
def test_pickling_transformer():
X = CountVectorizer().fit_transform(JUNK_FOOD_DOCS)
orig = TfidfTransformer().fit(X)
s = pickle.dumps(orig)
copy = pickle.loads(s)
assert_equal(type(copy), orig.__class__)
assert_array_equal(
copy.fit_transform(X).toarray(),
orig.fit_transform(X).toarray())
def test_non_unique_vocab():
vocab = ['a', 'b', 'c', 'a', 'a']
vect = CountVectorizer(vocabulary=vocab)
assert_raises(ValueError, vect.fit, [])
def test_hashingvectorizer_nan_in_docs():
# np.nan can appear when using pandas to load text fields from a csv file
# with missing values.
message = "np.nan is an invalid document, expected byte or unicode string."
exception = ValueError
def func():
hv = HashingVectorizer()
hv.fit_transform(['hello world', np.nan, 'hello hello'])
assert_raise_message(exception, message, func)
def test_tfidfvectorizer_binary():
# Non-regression test: TfidfVectorizer used to ignore its "binary" param.
v = TfidfVectorizer(binary=True, use_idf=False, norm=None)
assert_true(v.binary)
X = v.fit_transform(['hello world', 'hello hello']).toarray()
assert_array_equal(X.ravel(), [1, 1, 1, 0])
X2 = v.transform(['hello world', 'hello hello']).toarray()
assert_array_equal(X2.ravel(), [1, 1, 1, 0])
def test_tfidfvectorizer_export_idf():
vect = TfidfVectorizer(use_idf=True)
vect.fit(JUNK_FOOD_DOCS)
assert_array_almost_equal(vect.idf_, vect._tfidf.idf_)
def test_vectorizer_vocab_clone():
vect_vocab = TfidfVectorizer(vocabulary=["the"])
vect_vocab_clone = clone(vect_vocab)
vect_vocab.fit(ALL_FOOD_DOCS)
vect_vocab_clone.fit(ALL_FOOD_DOCS)
assert_equal(vect_vocab_clone.vocabulary_, vect_vocab.vocabulary_)
| bsd-3-clause |
eickenberg/scikit-learn | examples/svm/plot_custom_kernel.py | 115 | 1546 | """
======================
SVM with custom kernel
======================
Simple usage of Support Vector Machines to classify a sample. It will
plot the decision surface and the support vectors.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
Y = iris.target
def my_kernel(x, y):
"""
We create a custom kernel:
(2 0)
k(x, y) = x ( ) y.T
(0 1)
"""
M = np.array([[2, 0], [0, 1.0]])
return np.dot(np.dot(x, M), y.T)
h = .02 # step size in the mesh
# we create an instance of SVM and fit out data.
clf = svm.SVC(kernel=my_kernel)
clf.fit(X, Y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.title('3-Class classification using Support Vector Machine with custom'
' kernel')
plt.axis('tight')
plt.show()
| bsd-3-clause |
stainbank/simulocloud | simulocloud/visualise.py | 1 | 5885 | """ visualise.py
See, plot and visually explore pointclouds
"""
import numpy as np
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d
import itertools
import simulocloud.exceptions
# Mapping of dimension to index in bounds
_IDIM = {'x': 0, 'y': 1, 'z': 2}
def scatter(pcs, dims, bounds=None, highlight=None, n=10000,
colours=None, labels=None, title=None, figsize=(6,6)):
"""Create a scatter plot of one or more point clouds in 2D or 3D.
Arguments:
----------
pcs: iterable of PointCloud instances
point clouds to plot
dims: str
dimensions to plot on x, y (and optionally z) axes
e.g. 'xz' -> x vs z (i.e. 2D cross-section); 'xyz' -> x vs y vs z (3D)
bounds: pointcloud.Bounds namedtuple (optional)
(minx, miny, minz, maxx, maxy, mayz) bounds to crop pointclouds to
highlight: tuple or pointcloud.Bounds nametuple (optional)
(minx, miny, minz, maxx, maxy, mayz) bounds of area to highlight
n: int (default: 1e4)
max number of points to plot per point cloud
colours: iterable of valid matplotlib color arguments (optional)
colours to use for each pointcloud
labels: iterable of str (optional)
labels for each pointcloud
a, b, c etc. used by default
title: str (optional)
figure title
figsize: tuple (default: (6,6)
(width, height) figure dimensions in inches
Returns:
--------
matplotlib.figure.Figure instance
"""
# Parse dims as 2D or 3D
try:
dims = dims.lower()
ndims = len(dims)
projection = {2: None, 3: '3d'}[ndims]
trace = {2: lambda x0y0x1y1: (_trace_rectangle(*x0y0x1y1),),
3: _trace_cuboid}[ndims]
except(AttributeError):
raise simulocloud.exceptions.BadDims('dims must be str (not {})'.format(type(dims)))
except(KeyError):
raise simulocloud.exceptions.WrongNDims('dims must have either 2 or 3 dims (had {})'.format(ndims))
# Set up figure
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111, projection=projection)
ax.set_aspect('equal')
# Draw plots
pcs = _crop_and_sample_pointclouds(pcs, bounds, n)
for arrs, kwargs in _iter_scatter_args(pcs, dims, colours, labels):
ax.scatter(s=2, edgecolors='none', *arrs, **kwargs)
# Highlight area
if highlight is not None:
hbounds = _reorient_bounds(highlight, dims)
rects = trace(hbounds)
for rect in rects:
ax.plot(*rect, c='fuchsia')
# Annotate figure
ax.legend()
ax.set_xlabel(dims[0].upper())
ax.set_ylabel(dims[1].upper())
if ndims == 3:
ax.set_zlabel(dims[2].upper())
if title is not None:
ax.set_title(title)
return fig
def _iter_scatter_args(pcs, dims, colours, labels):
"""Yield plotting arrays and matplotlib scatter kwargs per pointcloud."""
# Generate defaults
if colours is None:
colours = _iternones()
if labels is None:
# labels = _iteralphabet()
labels = _iternones()
for pc, colour, label in itertools.izip(pcs, colours, labels):
arrs = (getattr(pc, dim.lower()) for dim in dims) # extract coordinates
kwargs = {'c': colour,
'label': label}
yield arrs, kwargs
def _crop_and_sample_pointclouds(pcs, bounds, n):
"""Return generator of cropped point clouds with maximum n points."""
if bounds is not None:
pcs = (pc.crop(bounds) for pc in pcs)
return (pc.downsample(n) if len(pc)>n else pc for pc in pcs)
def _iternones():
"""Return infinite generator yielding None."""
while True:
yield None
def _iteralphabet():
"""Return infinite generator yielding str in sequence a..z, aa..zz, etc."""
n = 1
while True:
for i in xrange(26):
yield chr(97+i)*n
else:
n += 1
def _trace_rectangle(x0, y0, x1, y1):
"""Generate 2D coordinates of points outlining rectangle clockwise.
Arguments
---------
x0, y0: float
coordinates of lowerleftmost rectangle vertex
x1, y1: float
coordinates of upperrightmost rectangle vertex
Returns
-------
ndarray (shape: (2, 5))
x and y coordinates of vertices ABCDA
"""
return np.array([(x0, y0), (x0, y1), (x1, y1), (x1, y0), (x0, y0)]).T
def _trace_cuboid(bounds):
"""Generate 3D coordinates of points outlining cuboid faces.
Arguments
---------
bounds: tuple or Bounds namedtuple
(minx, miny, minz, maxx, maxy, maxz) defining cuboid
Returns
-------
ndarray (shape: (6, 3, 5)
x, y and z coordinates of vertices ABCDA for each cuboid face
"""
cuboid = np.empty((6, 3, 5))
for ip, ix, iy in ((0, 1, 2), (1, 0, 2), (2, 0, 1)):
rect = _trace_rectangle(bounds[ix], bounds[iy], bounds[ix+3], bounds[iy+3])
for i in (ip, ip+3):
cuboid[i, ix], cuboid[i, iy] = rect
cuboid[i, ip] = np.repeat(bounds[i], 5)
return cuboid
def _reorient_bounds(bounds, dims):
"""Reorder bounds to the specified 2D or 3D spatial orientation.
Arguments
---------
bounds: tuple or bounds namedtuple
(minx, miny, minz, maxx, maxy, maxz)
dims: str
dims to reorient bounds to (e.g. 'yz' or 'xzy')
Returns
-------
tuple (len 2*len(dims))
mins and maxs in order of dims
Usage
-----
>>> bounds = Bounds(minx=10., miny=35., minz=6.,
... maxx=20., maxy=55., maxz=9.)
>>> _reorient_bounds(bounds, 'xzy') # 3D
(10.0, 6.0, 35.0, 20.0, 9.0, 55.0)
>>> _reorient_bounds(bounds, 'zx') # 2D
(6.0, 10.0, 9.0, 20.0)
"""
return tuple([bounds[_IDIM[dim]+n] for n in (0, 3) for dim in dims])
| mit |
tejasapatil/spark | python/pyspark/sql/utils.py | 6 | 6334 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import py4j
class CapturedException(Exception):
def __init__(self, desc, stackTrace):
self.desc = desc
self.stackTrace = stackTrace
def __str__(self):
return repr(self.desc)
class AnalysisException(CapturedException):
"""
Failed to analyze a SQL query plan.
"""
class ParseException(CapturedException):
"""
Failed to parse a SQL command.
"""
class IllegalArgumentException(CapturedException):
"""
Passed an illegal or inappropriate argument.
"""
class StreamingQueryException(CapturedException):
"""
Exception that stopped a :class:`StreamingQuery`.
"""
class QueryExecutionException(CapturedException):
"""
Failed to execute a query.
"""
def capture_sql_exception(f):
def deco(*a, **kw):
try:
return f(*a, **kw)
except py4j.protocol.Py4JJavaError as e:
s = e.java_exception.toString()
stackTrace = '\n\t at '.join(map(lambda x: x.toString(),
e.java_exception.getStackTrace()))
if s.startswith('org.apache.spark.sql.AnalysisException: '):
raise AnalysisException(s.split(': ', 1)[1], stackTrace)
if s.startswith('org.apache.spark.sql.catalyst.analysis'):
raise AnalysisException(s.split(': ', 1)[1], stackTrace)
if s.startswith('org.apache.spark.sql.catalyst.parser.ParseException: '):
raise ParseException(s.split(': ', 1)[1], stackTrace)
if s.startswith('org.apache.spark.sql.streaming.StreamingQueryException: '):
raise StreamingQueryException(s.split(': ', 1)[1], stackTrace)
if s.startswith('org.apache.spark.sql.execution.QueryExecutionException: '):
raise QueryExecutionException(s.split(': ', 1)[1], stackTrace)
if s.startswith('java.lang.IllegalArgumentException: '):
raise IllegalArgumentException(s.split(': ', 1)[1], stackTrace)
raise
return deco
def install_exception_handler():
"""
Hook an exception handler into Py4j, which could capture some SQL exceptions in Java.
When calling Java API, it will call `get_return_value` to parse the returned object.
If any exception happened in JVM, the result will be Java exception object, it raise
py4j.protocol.Py4JJavaError. We replace the original `get_return_value` with one that
could capture the Java exception and throw a Python one (with the same error message).
It's idempotent, could be called multiple times.
"""
original = py4j.protocol.get_return_value
# The original `get_return_value` is not patched, it's idempotent.
patched = capture_sql_exception(original)
# only patch the one used in py4j.java_gateway (call Java API)
py4j.java_gateway.get_return_value = patched
def toJArray(gateway, jtype, arr):
"""
Convert python list to java type array
:param gateway: Py4j Gateway
:param jtype: java type of element in array
:param arr: python type list
"""
jarr = gateway.new_array(jtype, len(arr))
for i in range(0, len(arr)):
jarr[i] = arr[i]
return jarr
def require_minimum_pandas_version():
""" Raise ImportError if minimum version of Pandas is not installed
"""
# TODO(HyukjinKwon): Relocate and deduplicate the version specification.
minimum_pandas_version = "0.19.2"
from distutils.version import LooseVersion
try:
import pandas
have_pandas = True
except ImportError:
have_pandas = False
if not have_pandas:
raise ImportError("Pandas >= %s must be installed; however, "
"it was not found." % minimum_pandas_version)
if LooseVersion(pandas.__version__) < LooseVersion(minimum_pandas_version):
raise ImportError("Pandas >= %s must be installed; however, "
"your version was %s." % (minimum_pandas_version, pandas.__version__))
def require_minimum_pyarrow_version():
""" Raise ImportError if minimum version of pyarrow is not installed
"""
# TODO(HyukjinKwon): Relocate and deduplicate the version specification.
minimum_pyarrow_version = "0.8.0"
from distutils.version import LooseVersion
try:
import pyarrow
have_arrow = True
except ImportError:
have_arrow = False
if not have_arrow:
raise ImportError("PyArrow >= %s must be installed; however, "
"it was not found." % minimum_pyarrow_version)
if LooseVersion(pyarrow.__version__) < LooseVersion(minimum_pyarrow_version):
raise ImportError("PyArrow >= %s must be installed; however, "
"your version was %s." % (minimum_pyarrow_version, pyarrow.__version__))
class ForeachBatchFunction(object):
"""
This is the Python implementation of Java interface 'ForeachBatchFunction'. This wraps
the user-defined 'foreachBatch' function such that it can be called from the JVM when
the query is active.
"""
def __init__(self, sql_ctx, func):
self.sql_ctx = sql_ctx
self.func = func
def call(self, jdf, batch_id):
from pyspark.sql.dataframe import DataFrame
try:
self.func(DataFrame(jdf, self.sql_ctx), batch_id)
except Exception as e:
self.error = e
raise e
class Java:
implements = ['org.apache.spark.sql.execution.streaming.sources.PythonForeachBatchFunction']
| apache-2.0 |
THEdavehogue/punxsutawney_phil_predictor | neural_net.py | 1 | 3350 | import theano
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.optimizers import SGD, RMSprop, Adagrad, Adam
import pandas as pd
import numpy as np
import cPickle as pickle
from sklearn.cross_validation import train_test_split
from sklearn.metrics import accuracy_score, precision_score, recall_score
def load_split_data(filename):
df = pd.read_pickle(filename)
df['prediction'] = df['prediction'].astype(int)
df = df.drop(['Mostly Cloudy', 'Clear', 'Partly Cloudy', 'Flurries', \
'Light Snow', 'Foggy', 'Snow', 'Rain'], axis=1)
y = df.pop('prediction').values
X = df.values
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, stratify=y)
return df, X_train, X_test, y_train, y_test
def define_nn_mlp_model(X_train, y_train):
''' defines multi-layer-perceptron neural network '''
model = Sequential()
model.add(Dense(64, input_dim=X_train.shape[1],
init='normal',
activation='sigmoid'))
model.add(Dense(64, init='normal', activation='sigmoid'))
model.add(Dense(input_dim=64,
output_dim=1,
init='normal',
activation='sigmoid'))
# sgd = SGD(lr=0.001, decay=1e-7, momentum=0.9, nesterov=True)
# rms = RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0)
adam = Adam(lr=0.1, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=["accuracy"] )
return model
def print_output(model, y_train, y_test, rng_seed):
'''prints model accuracy results'''
y_train_pred = model.predict_classes(X_train, verbose=0).squeeze()
y_test_pred = model.predict_classes(X_test, verbose=0).squeeze()
# y_train_pred.resize(1, y_train_pred.shape[0])
print '\nRandom number generator seed: ', rng_seed
print '\nFirst 20 labels: ', y_train[:20]
print 'First 20 predictions: ', y_train_pred[:20]
train_acc = accuracy_score(y_train, y_train_pred)
train_prec = precision_score(y_train, y_train_pred)
train_rec = recall_score(y_train, y_train_pred)
print '\nTraining accuracy: %.2f%%' % (train_acc * 100), \
'\nTraining precision: %.2f%%' % (train_prec * 100), \
'\nTraining recall: %.2f%%' % (train_rec * 100)
test_acc = accuracy_score(y_test, y_test_pred)
test_prec = precision_score(y_test, y_test_pred)
test_rec = recall_score(y_test, y_test_pred)
print 'Test accuracy: %.2f%%' % (test_acc * 100), \
'\nTest precision: %.2f%%' % (test_prec * 100), \
'\nTest recall: %.2f%%' % (test_rec * 100)
if test_acc < 0.94:
print '\nMan, your test accuracy is bad!'
else:
print "\nYou've made some improvements, I see..."
def pickle_it(model, filename):
with open('data/{}'.format(filename), 'wb') as f:
pickle.dump(model, f)
if __name__ == '__main__':
rng_seed = 42
df, X_train, X_test, y_train, y_test = load_split_data('data/groundhog_hourly_scrubbed.pkl')
model = define_nn_mlp_model(X_train, y_train)
model.fit(X_train, y_train, nb_epoch=35, batch_size=2, verbose=1, validation_split=0.1)
print_output(model, y_train, y_test, rng_seed)
pickle_it(model, 'nn_model.pkl')
| gpl-3.0 |
LLNL/spack | var/spack/repos/builtin/packages/py-pandas/package.py | 1 | 4929 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyPandas(PythonPackage):
"""pandas is a fast, powerful, flexible and easy to use open source
data analysis and manipulation tool, built on top of the Python
programming language."""
homepage = "https://pandas.pydata.org/"
url = "https://pypi.io/packages/source/p/pandas/pandas-1.0.5.tar.gz"
maintainers = ['adamjstewart']
import_modules = [
'pandas', 'pandas.compat', 'pandas.core', 'pandas.util', 'pandas.io',
'pandas.tseries', 'pandas._libs', 'pandas.plotting', 'pandas.arrays',
'pandas.api', 'pandas.errors', 'pandas._config', 'pandas.compat.numpy',
'pandas.core.reshape', 'pandas.core.tools', 'pandas.core.util',
'pandas.core.dtypes', 'pandas.core.groupby', 'pandas.core.internals',
'pandas.core.computation', 'pandas.core.arrays', 'pandas.core.ops',
'pandas.core.sparse', 'pandas.core.indexes', 'pandas.io.msgpack',
'pandas.io.formats', 'pandas.io.excel', 'pandas.io.json',
'pandas.io.sas', 'pandas.io.clipboard', 'pandas._libs.tslibs',
'pandas.plotting._matplotlib', 'pandas.api.types',
'pandas.api.extensions'
]
version('1.0.5', sha256='69c5d920a0b2a9838e677f78f4dde506b95ea8e4d30da25859db6469ded84fa8')
version('1.0.4', sha256='b35d625282baa7b51e82e52622c300a1ca9f786711b2af7cbe64f1e6831f4126')
version('1.0.3', sha256='32f42e322fb903d0e189a4c10b75ba70d90958cc4f66a1781ed027f1a1d14586')
version('1.0.2', sha256='76334ba36aa42f93b6b47b79cbc32187d3a178a4ab1c3a478c8f4198bcd93a73')
version('1.0.1', sha256='3c07765308f091d81b6735d4f2242bb43c332cc3461cae60543df6b10967fe27')
version('1.0.0', sha256='3ea6cc86931f57f18b1240572216f09922d91b19ab8a01cf24734394a3db3bec')
version('0.25.3', sha256='52da74df8a9c9a103af0a72c9d5fdc8e0183a90884278db7f386b5692a2220a4')
version('0.25.2', sha256='ca91a19d1f0a280874a24dca44aadce42da7f3a7edb7e9ab7c7baad8febee2be')
version('0.25.1', sha256='cb2e197b7b0687becb026b84d3c242482f20cbb29a9981e43604eb67576da9f6')
version('0.25.0', sha256='914341ad2d5b1ea522798efa4016430b66107d05781dbfe7cf05eba8f37df995')
version('0.24.2', sha256='4f919f409c433577a501e023943e582c57355d50a724c589e78bc1d551a535a2')
version('0.24.1', sha256='435821cb2501eabbcee7e83614bd710940dc0cf28b5afbc4bdb816c31cec71af')
version('0.23.4', sha256='5b24ca47acf69222e82530e89111dd9d14f9b970ab2cd3a1c2c78f0c4fbba4f4')
version('0.21.1', sha256='c5f5cba88bf0659554c41c909e1f78139f6fce8fa9315a29a23692b38ff9788a')
version('0.20.0', sha256='54f7a2bb2a7832c0446ad51d779806f07ec4ea2bb7c9aea4b83669fa97e778c4')
version('0.19.2', sha256='6f0f4f598c2b16746803c8bafef7c721c57e4844da752d36240c0acf97658014')
version('0.19.0', sha256='4697606cdf023c6b7fcb74e48aaf25cf282a1a00e339d2d274cf1b663748805b')
version('0.18.0', sha256='c975710ce8154b50f39a46aa3ea88d95b680191d1d9d4b5dd91eae7215e01814')
version('0.16.1', sha256='570d243f8cb068bf780461b9225d2e7bef7c90aa10d43cf908fe541fc92df8b6')
version('0.16.0', sha256='4013de6f8796ca9d2871218861823bd9878a8dfacd26e08ccf9afdd01bbad9f1')
# Required dependencies
# https://pandas.pydata.org/pandas-docs/stable/getting_started/install.html#dependencies
depends_on('python@3.6.1:', type=('build', 'run'), when='@1:')
depends_on('python@3.5.3:', type=('build', 'run'), when='@0.25:')
# https://pandas.pydata.org/docs/whatsnew/v1.0.0.html#build-changes
depends_on('py-cython@0.29.13:', type='build', when='@1:')
depends_on('py-setuptools@24.2.0:', type='build')
depends_on('py-numpy', type=('build', 'run'))
depends_on('py-numpy@1.13.3:', type=('build', 'run'), when='@0.25:')
depends_on('py-python-dateutil', type=('build', 'run'))
depends_on('py-python-dateutil@2.6.1:', type=('build', 'run'), when='@0.25:')
depends_on('py-pytz@2017.2:', type=('build', 'run'))
# Recommended dependencies
# https://pandas.pydata.org/pandas-docs/stable/getting_started/install.html#recommended-dependencies
depends_on('py-numexpr', type=('build', 'run'))
depends_on('py-numexpr@2.6.2:', type=('build', 'run'), when='@0.25:')
depends_on('py-bottleneck', type=('build', 'run'))
depends_on('py-bottleneck@1.2.1:', type=('build', 'run'), when='@0.25:')
# Optional dependencies
# https://pandas.pydata.org/pandas-docs/stable/getting_started/install.html#optional-dependencies
# Test dependencies
# https://pandas.pydata.org/pandas-docs/stable/development/contributing.html#running-the-test-suite
depends_on('py-pytest@4.0.2:', type='test')
depends_on('py-pytest-xdist', type='test')
depends_on('py-hypothesis@3.58:', type='test')
depends_on('py-pyarrow@0.10.0:', type='test')
| lgpl-2.1 |
wagdav/talk-python-in-fusion-2015 | demo_working_environment.py | 1 | 1799 | """
1. Present working environment
* matplotlib axes are good primitives to pass around (not figures)
* separate content and apperance
- see more Grammar of Graphics, ggplot http://ggplot.yhathq.com/
* re-use 'plots' on other figures
2. Implement highlight_y and make_bw_friendly functions
"""
import numpy as np
import matplotlib.pyplot as plt
def plot_sin(ax):
""" Plot a sine wave """
x = np.linspace(0, 2 * np.pi, 100)
for w in [1, 2, 4]:
ax.plot(x, np.sin(w * x))
ax.set_xlabel('time [s]')
ax.set_ylabel('amplitude [m]')
def plot_sinc(ax):
""" Plot the sinc function """
x = np.linspace(-4, 4, 100)
y = np.sinc(x)
ax.plot(x, y)
ax.set_xlabel('position [m]')
ax.set_ylabel('amplitude [m]')
def make_bw_friendly(ax):
""" Make the axis understandable on black and white print """
from itertools import cycle
linewidth = 2
color = 'black'
styles = ['-', '--', '-.', '.']
for line, style in zip(ax.lines, cycle(styles)):
line.set_linewidth(linewidth)
line.set_color(color)
line.set_linestyle(style)
def highlight_x(ax, limits, **kwargs):
""" Highlight parts of the plot on the x-axis between range """
ymin, ymax = limits
# save the arguments internally
_kwargs = {}
_kwargs.update(kwargs)
# set default opacity for the overlay
if 'alpha' not in _kwargs:
_kwargs['alpha'] = 0.5
ax.axvspan(ymin, ymax, **_kwargs)
if __name__ == '__main__':
if plt.fignum_exists(1):
plt.figure(1).clf()
fig, axes = plt.subplots(2, num=1)
plot_sin(axes[0])
plot_sinc(axes[1])
plt.tight_layout()
plt.draw()
highlight_x(axes[1], (0.2, 0.5), color='red')
make_bw_friendly(axes[0])
plt.show()
| mit |
rhyolight/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_wxagg.py | 70 | 9051 | from __future__ import division
"""
backend_wxagg.py
A wxPython backend for Agg. This uses the GUI widgets written by
Jeremy O'Donoghue (jeremy@o-donoghue.com) and the Agg backend by John
Hunter (jdhunter@ace.bsd.uchicago.edu)
Copyright (C) 2003-5 Jeremy O'Donoghue, John Hunter, Illinois Institute of
Technology
License: This work is licensed under the matplotlib license( PSF
compatible). A copy should be included with this source code.
"""
import wx
import matplotlib
from matplotlib.figure import Figure
from backend_agg import FigureCanvasAgg
import backend_wx
from backend_wx import FigureManager, FigureManagerWx, FigureCanvasWx, \
FigureFrameWx, DEBUG_MSG, NavigationToolbar2Wx, error_msg_wx, \
draw_if_interactive, show, Toolbar, backend_version
class FigureFrameWxAgg(FigureFrameWx):
def get_canvas(self, fig):
return FigureCanvasWxAgg(self, -1, fig)
def _get_toolbar(self, statbar):
if matplotlib.rcParams['toolbar']=='classic':
toolbar = NavigationToolbarWx(self.canvas, True)
elif matplotlib.rcParams['toolbar']=='toolbar2':
toolbar = NavigationToolbar2WxAgg(self.canvas)
toolbar.set_status_bar(statbar)
else:
toolbar = None
return toolbar
class FigureCanvasWxAgg(FigureCanvasAgg, FigureCanvasWx):
"""
The FigureCanvas contains the figure and does event handling.
In the wxPython backend, it is derived from wxPanel, and (usually)
lives inside a frame instantiated by a FigureManagerWx. The parent
window probably implements a wxSizer to control the displayed
control size - but we give a hint as to our preferred minimum
size.
"""
def draw(self, drawDC=None):
"""
Render the figure using agg.
"""
DEBUG_MSG("draw()", 1, self)
FigureCanvasAgg.draw(self)
self.bitmap = _convert_agg_to_wx_bitmap(self.get_renderer(), None)
self._isDrawn = True
self.gui_repaint(drawDC=drawDC)
def blit(self, bbox=None):
"""
Transfer the region of the agg buffer defined by bbox to the display.
If bbox is None, the entire buffer is transferred.
"""
if bbox is None:
self.bitmap = _convert_agg_to_wx_bitmap(self.get_renderer(), None)
self.gui_repaint()
return
l, b, w, h = bbox.bounds
r = l + w
t = b + h
x = int(l)
y = int(self.bitmap.GetHeight() - t)
srcBmp = _convert_agg_to_wx_bitmap(self.get_renderer(), None)
srcDC = wx.MemoryDC()
srcDC.SelectObject(srcBmp)
destDC = wx.MemoryDC()
destDC.SelectObject(self.bitmap)
destDC.BeginDrawing()
destDC.Blit(x, y, int(w), int(h), srcDC, x, y)
destDC.EndDrawing()
destDC.SelectObject(wx.NullBitmap)
srcDC.SelectObject(wx.NullBitmap)
self.gui_repaint()
filetypes = FigureCanvasAgg.filetypes
def print_figure(self, filename, *args, **kwargs):
# Use pure Agg renderer to draw
FigureCanvasAgg.print_figure(self, filename, *args, **kwargs)
# Restore the current view; this is needed because the
# artist contains methods rely on particular attributes
# of the rendered figure for determining things like
# bounding boxes.
if self._isDrawn:
self.draw()
class NavigationToolbar2WxAgg(NavigationToolbar2Wx):
def get_canvas(self, frame, fig):
return FigureCanvasWxAgg(frame, -1, fig)
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
# in order to expose the Figure constructor to the pylab
# interface we need to create the figure here
DEBUG_MSG("new_figure_manager()", 3, None)
backend_wx._create_wx_app()
FigureClass = kwargs.pop('FigureClass', Figure)
fig = FigureClass(*args, **kwargs)
frame = FigureFrameWxAgg(num, fig)
figmgr = frame.get_figure_manager()
if matplotlib.is_interactive():
figmgr.frame.Show()
return figmgr
#
# agg/wxPython image conversion functions (wxPython <= 2.6)
#
def _py_convert_agg_to_wx_image(agg, bbox):
"""
Convert the region of the agg buffer bounded by bbox to a wx.Image. If
bbox is None, the entire buffer is converted.
Note: agg must be a backend_agg.RendererAgg instance.
"""
image = wx.EmptyImage(int(agg.width), int(agg.height))
image.SetData(agg.tostring_rgb())
if bbox is None:
# agg => rgb -> image
return image
else:
# agg => rgb -> image => bitmap => clipped bitmap => image
return wx.ImageFromBitmap(_clipped_image_as_bitmap(image, bbox))
def _py_convert_agg_to_wx_bitmap(agg, bbox):
"""
Convert the region of the agg buffer bounded by bbox to a wx.Bitmap. If
bbox is None, the entire buffer is converted.
Note: agg must be a backend_agg.RendererAgg instance.
"""
if bbox is None:
# agg => rgb -> image => bitmap
return wx.BitmapFromImage(_py_convert_agg_to_wx_image(agg, None))
else:
# agg => rgb -> image => bitmap => clipped bitmap
return _clipped_image_as_bitmap(
_py_convert_agg_to_wx_image(agg, None),
bbox)
def _clipped_image_as_bitmap(image, bbox):
"""
Convert the region of a wx.Image bounded by bbox to a wx.Bitmap.
"""
l, b, width, height = bbox.get_bounds()
r = l + width
t = b + height
srcBmp = wx.BitmapFromImage(image)
srcDC = wx.MemoryDC()
srcDC.SelectObject(srcBmp)
destBmp = wx.EmptyBitmap(int(width), int(height))
destDC = wx.MemoryDC()
destDC.SelectObject(destBmp)
destDC.BeginDrawing()
x = int(l)
y = int(image.GetHeight() - t)
destDC.Blit(0, 0, int(width), int(height), srcDC, x, y)
destDC.EndDrawing()
srcDC.SelectObject(wx.NullBitmap)
destDC.SelectObject(wx.NullBitmap)
return destBmp
#
# agg/wxPython image conversion functions (wxPython >= 2.8)
#
def _py_WX28_convert_agg_to_wx_image(agg, bbox):
"""
Convert the region of the agg buffer bounded by bbox to a wx.Image. If
bbox is None, the entire buffer is converted.
Note: agg must be a backend_agg.RendererAgg instance.
"""
if bbox is None:
# agg => rgb -> image
image = wx.EmptyImage(int(agg.width), int(agg.height))
image.SetData(agg.tostring_rgb())
return image
else:
# agg => rgba buffer -> bitmap => clipped bitmap => image
return wx.ImageFromBitmap(_WX28_clipped_agg_as_bitmap(agg, bbox))
def _py_WX28_convert_agg_to_wx_bitmap(agg, bbox):
"""
Convert the region of the agg buffer bounded by bbox to a wx.Bitmap. If
bbox is None, the entire buffer is converted.
Note: agg must be a backend_agg.RendererAgg instance.
"""
if bbox is None:
# agg => rgba buffer -> bitmap
return wx.BitmapFromBufferRGBA(int(agg.width), int(agg.height),
agg.buffer_rgba(0, 0))
else:
# agg => rgba buffer -> bitmap => clipped bitmap
return _WX28_clipped_agg_as_bitmap(agg, bbox)
def _WX28_clipped_agg_as_bitmap(agg, bbox):
"""
Convert the region of a the agg buffer bounded by bbox to a wx.Bitmap.
Note: agg must be a backend_agg.RendererAgg instance.
"""
l, b, width, height = bbox.get_bounds()
r = l + width
t = b + height
srcBmp = wx.BitmapFromBufferRGBA(int(agg.width), int(agg.height),
agg.buffer_rgba(0, 0))
srcDC = wx.MemoryDC()
srcDC.SelectObject(srcBmp)
destBmp = wx.EmptyBitmap(int(width), int(height))
destDC = wx.MemoryDC()
destDC.SelectObject(destBmp)
destDC.BeginDrawing()
x = int(l)
y = int(int(agg.height) - t)
destDC.Blit(0, 0, int(width), int(height), srcDC, x, y)
destDC.EndDrawing()
srcDC.SelectObject(wx.NullBitmap)
destDC.SelectObject(wx.NullBitmap)
return destBmp
def _use_accelerator(state):
"""
Enable or disable the WXAgg accelerator, if it is present and is also
compatible with whatever version of wxPython is in use.
"""
global _convert_agg_to_wx_image
global _convert_agg_to_wx_bitmap
if getattr(wx, '__version__', '0.0')[0:3] < '2.8':
# wxPython < 2.8, so use the C++ accelerator or the Python routines
if state and _wxagg is not None:
_convert_agg_to_wx_image = _wxagg.convert_agg_to_wx_image
_convert_agg_to_wx_bitmap = _wxagg.convert_agg_to_wx_bitmap
else:
_convert_agg_to_wx_image = _py_convert_agg_to_wx_image
_convert_agg_to_wx_bitmap = _py_convert_agg_to_wx_bitmap
else:
# wxPython >= 2.8, so use the accelerated Python routines
_convert_agg_to_wx_image = _py_WX28_convert_agg_to_wx_image
_convert_agg_to_wx_bitmap = _py_WX28_convert_agg_to_wx_bitmap
# try to load the WXAgg accelerator
try:
import _wxagg
except ImportError:
_wxagg = None
# if it's present, use it
_use_accelerator(True)
| agpl-3.0 |
drodarie/nest-simulator | pynest/examples/hh_phaseplane.py | 9 | 4973 | # -*- coding: utf-8 -*-
#
# hh_phaseplane.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
'''
hh_phaseplane makes a numerical phase-plane analysis of the Hodgkin-Huxley
neuron (iaf_psc_alpha). Dynamics is investigated in the V-n space (see remark
below). A constant DC can be specified and its influence on the nullclines
can be studied.
REMARK
To make the two-dimensional analysis possible, the (four-dimensional)
Hodgkin-Huxley formalism needs to be artificially reduced to two dimensions,
in this case by 'clamping' the two other variables, m an h, to
constant values (m_eq and h_eq).
'''
import nest
from matplotlib import pyplot as plt
amplitude = 100. # Set externally applied current amplitude in pA
dt = 0.1 # simulation step length [ms]
nest.ResetKernel()
nest.set_verbosity('M_ERROR')
nest.SetKernelStatus({'resolution': dt})
neuron = nest.Create('hh_psc_alpha')
# Numerically obtain equilibrium state
nest.Simulate(1000)
m_eq = nest.GetStatus(neuron)[0]['Act_m']
h_eq = nest.GetStatus(neuron)[0]['Act_h']
nest.SetStatus(neuron, {'I_e': amplitude}) # Apply external current
# Scan state space
print('Scanning phase space')
V_new_vec = []
n_new_vec = []
# x will contain the phase-plane data as a vector field
x = []
count = 0
for V in range(-100, 42, 2):
n_V = []
n_n = []
for n in range(10, 81):
# Set V_m and n
nest.SetStatus(neuron, {'V_m': V*1.0, 'Inact_n': n/100.0,
'Act_m': m_eq, 'Act_h': h_eq})
# Find state
V_m = nest.GetStatus(neuron)[0]['V_m']
Inact_n = nest.GetStatus(neuron)[0]['Inact_n']
# Simulate a short while
nest.Simulate(dt)
# Find difference between new state and old state
V_m_new = nest.GetStatus(neuron)[0]['V_m'] - V*1.0
Inact_n_new = nest.GetStatus(neuron)[0]['Inact_n'] - n/100.0
# Store in vector for later analysis
n_V.append(abs(V_m_new))
n_n.append(abs(Inact_n_new))
x.append([V_m, Inact_n, V_m_new, Inact_n_new])
if count % 10 == 0:
# Write updated state next to old state
print('')
print('Vm: \t', V_m)
print('new Vm:\t', V_m_new)
print('Inact_n:', Inact_n)
print('new Inact_n:', Inact_n_new)
count += 1
# Store in vector for later analysis
V_new_vec.append(n_V)
n_new_vec.append(n_n)
# Set state for AP generation
nest.SetStatus(neuron, {'V_m': -34., 'Inact_n': 0.2,
'Act_m': m_eq, 'Act_h': h_eq})
print('')
print('AP-trajectory')
# ap will contain the trace of a single action potential as one possible
# numerical solution in the vector field
ap = []
for i in range(1, 1001):
# Find state
V_m = nest.GetStatus(neuron)[0]['V_m']
Inact_n = nest.GetStatus(neuron)[0]['Inact_n']
if i % 10 == 0:
# Write new state next to old state
print('Vm: \t', V_m)
print('Inact_n:', Inact_n)
ap.append([V_m, Inact_n])
# Simulate again
nest.SetStatus(neuron, {'Act_m': m_eq, 'Act_h': h_eq})
nest.Simulate(dt)
# Make analysis
print('')
print('Plot analysis')
V_matrix = [list(x) for x in zip(*V_new_vec)]
n_matrix = [list(x) for x in zip(*n_new_vec)]
n_vec = [x/100. for x in range(10, 81)]
V_vec = [x*1. for x in range(-100, 42, 2)]
nullcline_V = []
nullcline_n = []
print('Searching nullclines')
for i in range(0, len(V_vec)):
index = V_matrix[:][i].index(min(V_matrix[:][i]))
if index != 0 and index != len(n_vec):
nullcline_V.append([V_vec[i], n_vec[index]])
index = n_matrix[:][i].index(min(n_matrix[:][i]))
if index != 0 and index != len(n_vec):
nullcline_n.append([V_vec[i], n_vec[index]])
print('Plotting vector field')
factor = 0.1
for i in range(0, count, 3):
plt.plot([x[i][0], x[i][0] + factor*x[i][2]],
[x[i][1], x[i][1] + factor*x[i][3]], color=[0.6, 0.6, 0.6])
plt.plot(nullcline_V[:][0], nullcline_V[:][1], linewidth=2.0)
plt.plot(nullcline_n[:][0], nullcline_n[:][1], linewidth=2.0)
plt.xlim([V_vec[0], V_vec[-1]])
plt.ylim([n_vec[0], n_vec[-1]])
plt.plot(ap[:][0], ap[:][1], color='black', linewidth=1.0)
plt.xlabel('Membrane potential V [mV]')
plt.ylabel('Inactivation variable n')
plt.title('Phase space of the Hodgkin-Huxley Neuron')
plt.show()
| gpl-2.0 |
probml/pyprobml | scripts/prior_post_pred_binom_pymc3.py | 1 | 1898 | # prior and posterior predctiive for beta binomial
# fig 1.6 of 'Bayeysian Modeling and Computation'
import arviz as az
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pymc3 as pm
from scipy import stats
from scipy.stats import entropy
from scipy.optimize import minimize
import pyprobml_utils as pml
np.random.seed(0)
Y = stats.bernoulli(0.7).rvs(20)
with pm.Model() as model:
θ = pm.Beta("θ", 1, 1)
y_obs = pm.Binomial("y_obs",n=1, p=θ, observed=Y)
trace = pm.sample(1000, cores=1, return_inferencedata=False)
idata = az.from_pymc3(trace)
pred_dists = (pm.sample_prior_predictive(1000, model)["y_obs"],
pm.sample_posterior_predictive(idata, 1000, model)["y_obs"])
dist=pred_dists[0]
print(dist.shape)
num_success = dist.sum(1)
print(num_success.shape)
fig, ax = plt.subplots()
az.plot_dist(pred_dists[0].sum(1), hist_kwargs={"color":"0.5", "bins":range(0, 22)})
ax.set_title(f"Prior predictive distribution",fontweight='bold')
ax.set_xlim(-1, 21)
ax.set_ylim(0, 0.15)
ax.set_xlabel("number of success")
fig, ax = plt.subplots()
az.plot_dist(pred_dists[1].sum(1), hist_kwargs={"color":"0.5", "bins":range(0, 22)})
ax.set_title(f"Posterior predictive distribution",fontweight='bold')
ax.set_xlim(-1, 21)
ax.set_ylim(0, 0.15)
ax.set_xlabel("number of success")
fig, ax = plt.subplots()
az.plot_dist(θ.distribution.random(size=1000), plot_kwargs={"color":"0.5"},
fill_kwargs={'alpha':1})
ax.set_title("Prior distribution", fontweight='bold')
ax.set_xlim(0, 1)
ax.set_ylim(0, 4)
ax.tick_params(axis='both', pad=7)
ax.set_xlabel("θ")
fig, ax = plt.subplots()
az.plot_dist(idata.posterior["θ"], plot_kwargs={"color":"0.5"},
fill_kwargs={'alpha':1})
ax.set_title("Posterior distribution", fontweight='bold')
ax.set_xlim(0, 1)
ax.set_ylim(0, 4)
ax.tick_params(axis='both', pad=7)
ax.set_xlabel("θ")
| mit |
mehdidc/scikit-learn | sklearn/datasets/tests/test_svmlight_format.py | 28 | 10792 | from bz2 import BZ2File
import gzip
from io import BytesIO
import numpy as np
import os
import shutil
from tempfile import NamedTemporaryFile
from sklearn.externals.six import b
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_in
import sklearn
from sklearn.datasets import (load_svmlight_file, load_svmlight_files,
dump_svmlight_file)
currdir = os.path.dirname(os.path.abspath(__file__))
datafile = os.path.join(currdir, "data", "svmlight_classification.txt")
multifile = os.path.join(currdir, "data", "svmlight_multilabel.txt")
invalidfile = os.path.join(currdir, "data", "svmlight_invalid.txt")
invalidfile2 = os.path.join(currdir, "data", "svmlight_invalid_order.txt")
def test_load_svmlight_file():
X, y = load_svmlight_file(datafile)
# test X's shape
assert_equal(X.indptr.shape[0], 7)
assert_equal(X.shape[0], 6)
assert_equal(X.shape[1], 21)
assert_equal(y.shape[0], 6)
# test X's non-zero values
for i, j, val in ((0, 2, 2.5), (0, 10, -5.2), (0, 15, 1.5),
(1, 5, 1.0), (1, 12, -3),
(2, 20, 27)):
assert_equal(X[i, j], val)
# tests X's zero values
assert_equal(X[0, 3], 0)
assert_equal(X[0, 5], 0)
assert_equal(X[1, 8], 0)
assert_equal(X[1, 16], 0)
assert_equal(X[2, 18], 0)
# test can change X's values
X[0, 2] *= 2
assert_equal(X[0, 2], 5)
# test y
assert_array_equal(y, [1, 2, 3, 4, 1, 2])
def test_load_svmlight_file_fd():
# test loading from file descriptor
X1, y1 = load_svmlight_file(datafile)
fd = os.open(datafile, os.O_RDONLY)
try:
X2, y2 = load_svmlight_file(fd)
assert_array_equal(X1.data, X2.data)
assert_array_equal(y1, y2)
finally:
os.close(fd)
def test_load_svmlight_file_multilabel():
X, y = load_svmlight_file(multifile, multilabel=True)
assert_equal(y, [(0, 1), (2,), (1, 2)])
def test_load_svmlight_files():
X_train, y_train, X_test, y_test = load_svmlight_files([datafile] * 2,
dtype=np.float32)
assert_array_equal(X_train.toarray(), X_test.toarray())
assert_array_equal(y_train, y_test)
assert_equal(X_train.dtype, np.float32)
assert_equal(X_test.dtype, np.float32)
X1, y1, X2, y2, X3, y3 = load_svmlight_files([datafile] * 3,
dtype=np.float64)
assert_equal(X1.dtype, X2.dtype)
assert_equal(X2.dtype, X3.dtype)
assert_equal(X3.dtype, np.float64)
def test_load_svmlight_file_n_features():
X, y = load_svmlight_file(datafile, n_features=22)
# test X'shape
assert_equal(X.indptr.shape[0], 7)
assert_equal(X.shape[0], 6)
assert_equal(X.shape[1], 22)
# test X's non-zero values
for i, j, val in ((0, 2, 2.5), (0, 10, -5.2),
(1, 5, 1.0), (1, 12, -3)):
assert_equal(X[i, j], val)
# 21 features in file
assert_raises(ValueError, load_svmlight_file, datafile, n_features=20)
def test_load_compressed():
X, y = load_svmlight_file(datafile)
with NamedTemporaryFile(prefix="sklearn-test", suffix=".gz") as tmp:
tmp.close() # necessary under windows
with open(datafile, "rb") as f:
shutil.copyfileobj(f, gzip.open(tmp.name, "wb"))
Xgz, ygz = load_svmlight_file(tmp.name)
# because we "close" it manually and write to it,
# we need to remove it manually.
os.remove(tmp.name)
assert_array_equal(X.toarray(), Xgz.toarray())
assert_array_equal(y, ygz)
with NamedTemporaryFile(prefix="sklearn-test", suffix=".bz2") as tmp:
tmp.close() # necessary under windows
with open(datafile, "rb") as f:
shutil.copyfileobj(f, BZ2File(tmp.name, "wb"))
Xbz, ybz = load_svmlight_file(tmp.name)
# because we "close" it manually and write to it,
# we need to remove it manually.
os.remove(tmp.name)
assert_array_equal(X.toarray(), Xbz.toarray())
assert_array_equal(y, ybz)
@raises(ValueError)
def test_load_invalid_file():
load_svmlight_file(invalidfile)
@raises(ValueError)
def test_load_invalid_order_file():
load_svmlight_file(invalidfile2)
@raises(ValueError)
def test_load_zero_based():
f = BytesIO(b("-1 4:1.\n1 0:1\n"))
load_svmlight_file(f, zero_based=False)
def test_load_zero_based_auto():
data1 = b("-1 1:1 2:2 3:3\n")
data2 = b("-1 0:0 1:1\n")
f1 = BytesIO(data1)
X, y = load_svmlight_file(f1, zero_based="auto")
assert_equal(X.shape, (1, 3))
f1 = BytesIO(data1)
f2 = BytesIO(data2)
X1, y1, X2, y2 = load_svmlight_files([f1, f2], zero_based="auto")
assert_equal(X1.shape, (1, 4))
assert_equal(X2.shape, (1, 4))
def test_load_with_qid():
# load svmfile with qid attribute
data = b("""
3 qid:1 1:0.53 2:0.12
2 qid:1 1:0.13 2:0.1
7 qid:2 1:0.87 2:0.12""")
X, y = load_svmlight_file(BytesIO(data), query_id=False)
assert_array_equal(y, [3, 2, 7])
assert_array_equal(X.toarray(), [[.53, .12], [.13, .1], [.87, .12]])
res1 = load_svmlight_files([BytesIO(data)], query_id=True)
res2 = load_svmlight_file(BytesIO(data), query_id=True)
for X, y, qid in (res1, res2):
assert_array_equal(y, [3, 2, 7])
assert_array_equal(qid, [1, 1, 2])
assert_array_equal(X.toarray(), [[.53, .12], [.13, .1], [.87, .12]])
@raises(ValueError)
def test_load_invalid_file2():
load_svmlight_files([datafile, invalidfile, datafile])
@raises(TypeError)
def test_not_a_filename():
# in python 3 integers are valid file opening arguments (taken as unix
# file descriptors)
load_svmlight_file(.42)
@raises(IOError)
def test_invalid_filename():
load_svmlight_file("trou pic nic douille")
def test_dump():
Xs, y = load_svmlight_file(datafile)
Xd = Xs.toarray()
# slicing a csr_matrix can unsort its .indices, so test that we sort
# those correctly
Xsliced = Xs[np.arange(Xs.shape[0])]
for X in (Xs, Xd, Xsliced):
for zero_based in (True, False):
for dtype in [np.float32, np.float64, np.int32]:
f = BytesIO()
# we need to pass a comment to get the version info in;
# LibSVM doesn't grok comments so they're not put in by
# default anymore.
dump_svmlight_file(X.astype(dtype), y, f, comment="test",
zero_based=zero_based)
f.seek(0)
comment = f.readline()
try:
comment = str(comment, "utf-8")
except TypeError: # fails in Python 2.x
pass
assert_in("scikit-learn %s" % sklearn.__version__, comment)
comment = f.readline()
try:
comment = str(comment, "utf-8")
except TypeError: # fails in Python 2.x
pass
assert_in(["one", "zero"][zero_based] + "-based", comment)
X2, y2 = load_svmlight_file(f, dtype=dtype,
zero_based=zero_based)
assert_equal(X2.dtype, dtype)
assert_array_equal(X2.sorted_indices().indices, X2.indices)
if dtype == np.float32:
assert_array_almost_equal(
# allow a rounding error at the last decimal place
Xd.astype(dtype), X2.toarray(), 4)
else:
assert_array_almost_equal(
# allow a rounding error at the last decimal place
Xd.astype(dtype), X2.toarray(), 15)
assert_array_equal(y, y2)
def test_dump_concise():
one = 1
two = 2.1
three = 3.01
exact = 1.000000000000001
# loses the last decimal place
almost = 1.0000000000000001
X = [[one, two, three, exact, almost],
[1e9, 2e18, 3e27, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]]
y = [one, two, three, exact, almost]
f = BytesIO()
dump_svmlight_file(X, y, f)
f.seek(0)
# make sure it's using the most concise format possible
assert_equal(f.readline(),
b("1 0:1 1:2.1 2:3.01 3:1.000000000000001 4:1\n"))
assert_equal(f.readline(), b("2.1 0:1000000000 1:2e+18 2:3e+27\n"))
assert_equal(f.readline(), b("3.01 \n"))
assert_equal(f.readline(), b("1.000000000000001 \n"))
assert_equal(f.readline(), b("1 \n"))
f.seek(0)
# make sure it's correct too :)
X2, y2 = load_svmlight_file(f)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
def test_dump_comment():
X, y = load_svmlight_file(datafile)
X = X.toarray()
f = BytesIO()
ascii_comment = "This is a comment\nspanning multiple lines."
dump_svmlight_file(X, y, f, comment=ascii_comment, zero_based=False)
f.seek(0)
X2, y2 = load_svmlight_file(f, zero_based=False)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
# XXX we have to update this to support Python 3.x
utf8_comment = b("It is true that\n\xc2\xbd\xc2\xb2 = \xc2\xbc")
f = BytesIO()
assert_raises(UnicodeDecodeError,
dump_svmlight_file, X, y, f, comment=utf8_comment)
unicode_comment = utf8_comment.decode("utf-8")
f = BytesIO()
dump_svmlight_file(X, y, f, comment=unicode_comment, zero_based=False)
f.seek(0)
X2, y2 = load_svmlight_file(f, zero_based=False)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
f = BytesIO()
assert_raises(ValueError,
dump_svmlight_file, X, y, f, comment="I've got a \0.")
def test_dump_invalid():
X, y = load_svmlight_file(datafile)
f = BytesIO()
y2d = [y]
assert_raises(ValueError, dump_svmlight_file, X, y2d, f)
f = BytesIO()
assert_raises(ValueError, dump_svmlight_file, X, y[:-1], f)
def test_dump_query_id():
# test dumping a file with query_id
X, y = load_svmlight_file(datafile)
X = X.toarray()
query_id = np.arange(X.shape[0]) // 2
f = BytesIO()
dump_svmlight_file(X, y, f, query_id=query_id, zero_based=True)
f.seek(0)
X1, y1, query_id1 = load_svmlight_file(f, query_id=True, zero_based=True)
assert_array_almost_equal(X, X1.toarray())
assert_array_almost_equal(y, y1)
assert_array_almost_equal(query_id, query_id1)
| bsd-3-clause |
JeanKossaifi/scikit-learn | examples/model_selection/plot_validation_curve.py | 229 | 1823 | """
==========================
Plotting Validation Curves
==========================
In this plot you can see the training scores and validation scores of an SVM
for different values of the kernel parameter gamma. For very low values of
gamma, you can see that both the training score and the validation score are
low. This is called underfitting. Medium values of gamma will result in high
values for both scores, i.e. the classifier is performing fairly well. If gamma
is too high, the classifier will overfit, which means that the training score
is good but the validation score is poor.
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import load_digits
from sklearn.svm import SVC
from sklearn.learning_curve import validation_curve
digits = load_digits()
X, y = digits.data, digits.target
param_range = np.logspace(-6, -1, 5)
train_scores, test_scores = validation_curve(
SVC(), X, y, param_name="gamma", param_range=param_range,
cv=10, scoring="accuracy", n_jobs=1)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.title("Validation Curve with SVM")
plt.xlabel("$\gamma$")
plt.ylabel("Score")
plt.ylim(0.0, 1.1)
plt.semilogx(param_range, train_scores_mean, label="Training score", color="r")
plt.fill_between(param_range, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.2, color="r")
plt.semilogx(param_range, test_scores_mean, label="Cross-validation score",
color="g")
plt.fill_between(param_range, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.2, color="g")
plt.legend(loc="best")
plt.show()
| bsd-3-clause |
lmallin/coverage_test | python_venv/lib/python2.7/site-packages/pandas/tests/io/formats/test_style.py | 3 | 36182 | import copy
import textwrap
import pytest
import numpy as np
import pandas as pd
from pandas import DataFrame
import pandas.util.testing as tm
jinja2 = pytest.importorskip('jinja2')
from pandas.io.formats.style import Styler, _get_level_lengths # noqa
class TestStyler(object):
def setup_method(self, method):
np.random.seed(24)
self.s = DataFrame({'A': np.random.permutation(range(6))})
self.df = DataFrame({'A': [0, 1], 'B': np.random.randn(2)})
self.f = lambda x: x
self.g = lambda x: x
def h(x, foo='bar'):
return pd.Series(['color: %s' % foo], index=x.index, name=x.name)
self.h = h
self.styler = Styler(self.df)
self.attrs = pd.DataFrame({'A': ['color: red', 'color: blue']})
self.dataframes = [
self.df,
pd.DataFrame({'f': [1., 2.], 'o': ['a', 'b'],
'c': pd.Categorical(['a', 'b'])})
]
def test_init_non_pandas(self):
with pytest.raises(TypeError):
Styler([1, 2, 3])
def test_init_series(self):
result = Styler(pd.Series([1, 2]))
assert result.data.ndim == 2
def test_repr_html_ok(self):
self.styler._repr_html_()
def test_update_ctx(self):
self.styler._update_ctx(self.attrs)
expected = {(0, 0): ['color: red'],
(1, 0): ['color: blue']}
assert self.styler.ctx == expected
def test_update_ctx_flatten_multi(self):
attrs = DataFrame({"A": ['color: red; foo: bar',
'color: blue; foo: baz']})
self.styler._update_ctx(attrs)
expected = {(0, 0): ['color: red', ' foo: bar'],
(1, 0): ['color: blue', ' foo: baz']}
assert self.styler.ctx == expected
def test_update_ctx_flatten_multi_traliing_semi(self):
attrs = DataFrame({"A": ['color: red; foo: bar;',
'color: blue; foo: baz;']})
self.styler._update_ctx(attrs)
expected = {(0, 0): ['color: red', ' foo: bar'],
(1, 0): ['color: blue', ' foo: baz']}
assert self.styler.ctx == expected
def test_copy(self):
s2 = copy.copy(self.styler)
assert self.styler is not s2
assert self.styler.ctx is s2.ctx # shallow
assert self.styler._todo is s2._todo
self.styler._update_ctx(self.attrs)
self.styler.highlight_max()
assert self.styler.ctx == s2.ctx
assert self.styler._todo == s2._todo
def test_deepcopy(self):
s2 = copy.deepcopy(self.styler)
assert self.styler is not s2
assert self.styler.ctx is not s2.ctx
assert self.styler._todo is not s2._todo
self.styler._update_ctx(self.attrs)
self.styler.highlight_max()
assert self.styler.ctx != s2.ctx
assert s2._todo == []
assert self.styler._todo != s2._todo
def test_clear(self):
s = self.df.style.highlight_max()._compute()
assert len(s.ctx) > 0
assert len(s._todo) > 0
s.clear()
assert len(s.ctx) == 0
assert len(s._todo) == 0
def test_render(self):
df = pd.DataFrame({"A": [0, 1]})
style = lambda x: pd.Series(["color: red", "color: blue"], name=x.name)
s = Styler(df, uuid='AB').apply(style)
s.render()
# it worked?
def test_render_double(self):
df = pd.DataFrame({"A": [0, 1]})
style = lambda x: pd.Series(["color: red; border: 1px",
"color: blue; border: 2px"], name=x.name)
s = Styler(df, uuid='AB').apply(style)
s.render()
# it worked?
def test_set_properties(self):
df = pd.DataFrame({"A": [0, 1]})
result = df.style.set_properties(color='white',
size='10px')._compute().ctx
# order is deterministic
v = ["color: white", "size: 10px"]
expected = {(0, 0): v, (1, 0): v}
assert result.keys() == expected.keys()
for v1, v2 in zip(result.values(), expected.values()):
assert sorted(v1) == sorted(v2)
def test_set_properties_subset(self):
df = pd.DataFrame({'A': [0, 1]})
result = df.style.set_properties(subset=pd.IndexSlice[0, 'A'],
color='white')._compute().ctx
expected = {(0, 0): ['color: white']}
assert result == expected
def test_empty_index_name_doesnt_display(self):
# https://github.com/pandas-dev/pandas/pull/12090#issuecomment-180695902
df = pd.DataFrame({'A': [1, 2], 'B': [3, 4], 'C': [5, 6]})
result = df.style._translate()
expected = [[{'class': 'blank level0', 'type': 'th', 'value': '',
'is_visible': True, 'display_value': ''},
{'class': 'col_heading level0 col0',
'display_value': 'A',
'type': 'th',
'value': 'A',
'is_visible': True,
},
{'class': 'col_heading level0 col1',
'display_value': 'B',
'type': 'th',
'value': 'B',
'is_visible': True,
},
{'class': 'col_heading level0 col2',
'display_value': 'C',
'type': 'th',
'value': 'C',
'is_visible': True,
}]]
assert result['head'] == expected
def test_index_name(self):
# https://github.com/pandas-dev/pandas/issues/11655
df = pd.DataFrame({'A': [1, 2], 'B': [3, 4], 'C': [5, 6]})
result = df.set_index('A').style._translate()
expected = [[{'class': 'blank level0', 'type': 'th', 'value': '',
'display_value': '', 'is_visible': True},
{'class': 'col_heading level0 col0', 'type': 'th',
'value': 'B', 'display_value': 'B', 'is_visible': True},
{'class': 'col_heading level0 col1', 'type': 'th',
'value': 'C', 'display_value': 'C', 'is_visible': True}],
[{'class': 'index_name level0', 'type': 'th',
'value': 'A'},
{'class': 'blank', 'type': 'th', 'value': ''},
{'class': 'blank', 'type': 'th', 'value': ''}]]
assert result['head'] == expected
def test_multiindex_name(self):
# https://github.com/pandas-dev/pandas/issues/11655
df = pd.DataFrame({'A': [1, 2], 'B': [3, 4], 'C': [5, 6]})
result = df.set_index(['A', 'B']).style._translate()
expected = [[
{'class': 'blank', 'type': 'th', 'value': '',
'display_value': '', 'is_visible': True},
{'class': 'blank level0', 'type': 'th', 'value': '',
'display_value': '', 'is_visible': True},
{'class': 'col_heading level0 col0', 'type': 'th',
'value': 'C', 'display_value': 'C', 'is_visible': True}],
[{'class': 'index_name level0', 'type': 'th',
'value': 'A'},
{'class': 'index_name level1', 'type': 'th',
'value': 'B'},
{'class': 'blank', 'type': 'th', 'value': ''}]]
assert result['head'] == expected
def test_numeric_columns(self):
# https://github.com/pandas-dev/pandas/issues/12125
# smoke test for _translate
df = pd.DataFrame({0: [1, 2, 3]})
df.style._translate()
def test_apply_axis(self):
df = pd.DataFrame({'A': [0, 0], 'B': [1, 1]})
f = lambda x: ['val: %s' % x.max() for v in x]
result = df.style.apply(f, axis=1)
assert len(result._todo) == 1
assert len(result.ctx) == 0
result._compute()
expected = {(0, 0): ['val: 1'], (0, 1): ['val: 1'],
(1, 0): ['val: 1'], (1, 1): ['val: 1']}
assert result.ctx == expected
result = df.style.apply(f, axis=0)
expected = {(0, 0): ['val: 0'], (0, 1): ['val: 1'],
(1, 0): ['val: 0'], (1, 1): ['val: 1']}
result._compute()
assert result.ctx == expected
result = df.style.apply(f) # default
result._compute()
assert result.ctx == expected
def test_apply_subset(self):
axes = [0, 1]
slices = [pd.IndexSlice[:], pd.IndexSlice[:, ['A']],
pd.IndexSlice[[1], :], pd.IndexSlice[[1], ['A']],
pd.IndexSlice[:2, ['A', 'B']]]
for ax in axes:
for slice_ in slices:
result = self.df.style.apply(self.h, axis=ax, subset=slice_,
foo='baz')._compute().ctx
expected = dict(((r, c), ['color: baz'])
for r, row in enumerate(self.df.index)
for c, col in enumerate(self.df.columns)
if row in self.df.loc[slice_].index and
col in self.df.loc[slice_].columns)
assert result == expected
def test_applymap_subset(self):
def f(x):
return 'foo: bar'
slices = [pd.IndexSlice[:], pd.IndexSlice[:, ['A']],
pd.IndexSlice[[1], :], pd.IndexSlice[[1], ['A']],
pd.IndexSlice[:2, ['A', 'B']]]
for slice_ in slices:
result = self.df.style.applymap(f, subset=slice_)._compute().ctx
expected = dict(((r, c), ['foo: bar'])
for r, row in enumerate(self.df.index)
for c, col in enumerate(self.df.columns)
if row in self.df.loc[slice_].index and
col in self.df.loc[slice_].columns)
assert result == expected
def test_empty(self):
df = pd.DataFrame({'A': [1, 0]})
s = df.style
s.ctx = {(0, 0): ['color: red'],
(1, 0): ['']}
result = s._translate()['cellstyle']
expected = [{'props': [['color', ' red']], 'selector': 'row0_col0'},
{'props': [['', '']], 'selector': 'row1_col0'}]
assert result == expected
def test_bar_align_left(self):
df = pd.DataFrame({'A': [0, 1, 2]})
result = df.style.bar()._compute().ctx
expected = {
(0, 0): ['width: 10em', ' height: 80%'],
(1, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient('
'90deg,#d65f5f 50.0%, transparent 0%)'],
(2, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient('
'90deg,#d65f5f 100.0%, transparent 0%)']
}
assert result == expected
result = df.style.bar(color='red', width=50)._compute().ctx
expected = {
(0, 0): ['width: 10em', ' height: 80%'],
(1, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient('
'90deg,red 25.0%, transparent 0%)'],
(2, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient('
'90deg,red 50.0%, transparent 0%)']
}
assert result == expected
df['C'] = ['a'] * len(df)
result = df.style.bar(color='red', width=50)._compute().ctx
assert result == expected
df['C'] = df['C'].astype('category')
result = df.style.bar(color='red', width=50)._compute().ctx
assert result == expected
def test_bar_align_left_0points(self):
df = pd.DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
result = df.style.bar()._compute().ctx
expected = {(0, 0): ['width: 10em', ' height: 80%'],
(0, 1): ['width: 10em', ' height: 80%'],
(0, 2): ['width: 10em', ' height: 80%'],
(1, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg,#d65f5f 50.0%,'
' transparent 0%)'],
(1, 1): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg,#d65f5f 50.0%,'
' transparent 0%)'],
(1, 2): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg,#d65f5f 50.0%,'
' transparent 0%)'],
(2, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg,#d65f5f 100.0%'
', transparent 0%)'],
(2, 1): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg,#d65f5f 100.0%'
', transparent 0%)'],
(2, 2): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg,#d65f5f 100.0%'
', transparent 0%)']}
assert result == expected
result = df.style.bar(axis=1)._compute().ctx
expected = {(0, 0): ['width: 10em', ' height: 80%'],
(0, 1): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg,#d65f5f 50.0%,'
' transparent 0%)'],
(0, 2): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg,#d65f5f 100.0%'
', transparent 0%)'],
(1, 0): ['width: 10em', ' height: 80%'],
(1, 1): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg,#d65f5f 50.0%'
', transparent 0%)'],
(1, 2): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg,#d65f5f 100.0%'
', transparent 0%)'],
(2, 0): ['width: 10em', ' height: 80%'],
(2, 1): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg,#d65f5f 50.0%'
', transparent 0%)'],
(2, 2): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg,#d65f5f 100.0%'
', transparent 0%)']}
assert result == expected
def test_bar_align_mid_pos_and_neg(self):
df = pd.DataFrame({'A': [-10, 0, 20, 90]})
result = df.style.bar(align='mid', color=[
'#d65f5f', '#5fba7d'])._compute().ctx
expected = {(0, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg, '
'transparent 0%, transparent 0.0%, #d65f5f 0.0%, '
'#d65f5f 10.0%, transparent 10.0%)'],
(1, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg, '
'transparent 0%, transparent 10.0%, '
'#d65f5f 10.0%, #d65f5f 10.0%, '
'transparent 10.0%)'],
(2, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg, '
'transparent 0%, transparent 10.0%, #5fba7d 10.0%'
', #5fba7d 30.0%, transparent 30.0%)'],
(3, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg, '
'transparent 0%, transparent 10.0%, '
'#5fba7d 10.0%, #5fba7d 100.0%, '
'transparent 100.0%)']}
assert result == expected
def test_bar_align_mid_all_pos(self):
df = pd.DataFrame({'A': [10, 20, 50, 100]})
result = df.style.bar(align='mid', color=[
'#d65f5f', '#5fba7d'])._compute().ctx
expected = {(0, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg, '
'transparent 0%, transparent 0.0%, #5fba7d 0.0%, '
'#5fba7d 10.0%, transparent 10.0%)'],
(1, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg, '
'transparent 0%, transparent 0.0%, #5fba7d 0.0%, '
'#5fba7d 20.0%, transparent 20.0%)'],
(2, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg, '
'transparent 0%, transparent 0.0%, #5fba7d 0.0%, '
'#5fba7d 50.0%, transparent 50.0%)'],
(3, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg, '
'transparent 0%, transparent 0.0%, #5fba7d 0.0%, '
'#5fba7d 100.0%, transparent 100.0%)']}
assert result == expected
def test_bar_align_mid_all_neg(self):
df = pd.DataFrame({'A': [-100, -60, -30, -20]})
result = df.style.bar(align='mid', color=[
'#d65f5f', '#5fba7d'])._compute().ctx
expected = {(0, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg, '
'transparent 0%, transparent 0.0%, '
'#d65f5f 0.0%, #d65f5f 100.0%, '
'transparent 100.0%)'],
(1, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg, '
'transparent 0%, transparent 40.0%, '
'#d65f5f 40.0%, #d65f5f 100.0%, '
'transparent 100.0%)'],
(2, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg, '
'transparent 0%, transparent 70.0%, '
'#d65f5f 70.0%, #d65f5f 100.0%, '
'transparent 100.0%)'],
(3, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg, '
'transparent 0%, transparent 80.0%, '
'#d65f5f 80.0%, #d65f5f 100.0%, '
'transparent 100.0%)']}
assert result == expected
def test_bar_align_zero_pos_and_neg(self):
# See https://github.com/pandas-dev/pandas/pull/14757
df = pd.DataFrame({'A': [-10, 0, 20, 90]})
result = df.style.bar(align='zero', color=[
'#d65f5f', '#5fba7d'], width=90)._compute().ctx
expected = {(0, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg, '
'transparent 0%, transparent 45.0%, '
'#d65f5f 45.0%, #d65f5f 50%, '
'transparent 50%)'],
(1, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg, '
'transparent 0%, transparent 50%, '
'#5fba7d 50%, #5fba7d 50.0%, '
'transparent 50.0%)'],
(2, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg, '
'transparent 0%, transparent 50%, #5fba7d 50%, '
'#5fba7d 60.0%, transparent 60.0%)'],
(3, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg, '
'transparent 0%, transparent 50%, #5fba7d 50%, '
'#5fba7d 95.0%, transparent 95.0%)']}
assert result == expected
def test_bar_bad_align_raises(self):
df = pd.DataFrame({'A': [-100, -60, -30, -20]})
with pytest.raises(ValueError):
df.style.bar(align='poorly', color=['#d65f5f', '#5fba7d'])
def test_highlight_null(self, null_color='red'):
df = pd.DataFrame({'A': [0, np.nan]})
result = df.style.highlight_null()._compute().ctx
expected = {(0, 0): [''],
(1, 0): ['background-color: red']}
assert result == expected
def test_nonunique_raises(self):
df = pd.DataFrame([[1, 2]], columns=['A', 'A'])
with pytest.raises(ValueError):
df.style
with pytest.raises(ValueError):
Styler(df)
def test_caption(self):
styler = Styler(self.df, caption='foo')
result = styler.render()
assert all(['caption' in result, 'foo' in result])
styler = self.df.style
result = styler.set_caption('baz')
assert styler is result
assert styler.caption == 'baz'
def test_uuid(self):
styler = Styler(self.df, uuid='abc123')
result = styler.render()
assert 'abc123' in result
styler = self.df.style
result = styler.set_uuid('aaa')
assert result is styler
assert result.uuid == 'aaa'
def test_table_styles(self):
style = [{'selector': 'th', 'props': [('foo', 'bar')]}]
styler = Styler(self.df, table_styles=style)
result = ' '.join(styler.render().split())
assert 'th { foo: bar; }' in result
styler = self.df.style
result = styler.set_table_styles(style)
assert styler is result
assert styler.table_styles == style
def test_table_attributes(self):
attributes = 'class="foo" data-bar'
styler = Styler(self.df, table_attributes=attributes)
result = styler.render()
assert 'class="foo" data-bar' in result
result = self.df.style.set_table_attributes(attributes).render()
assert 'class="foo" data-bar' in result
def test_precision(self):
with pd.option_context('display.precision', 10):
s = Styler(self.df)
assert s.precision == 10
s = Styler(self.df, precision=2)
assert s.precision == 2
s2 = s.set_precision(4)
assert s is s2
assert s.precision == 4
def test_apply_none(self):
def f(x):
return pd.DataFrame(np.where(x == x.max(), 'color: red', ''),
index=x.index, columns=x.columns)
result = (pd.DataFrame([[1, 2], [3, 4]])
.style.apply(f, axis=None)._compute().ctx)
assert result[(1, 1)] == ['color: red']
def test_trim(self):
result = self.df.style.render() # trim=True
assert result.count('#') == 0
result = self.df.style.highlight_max().render()
assert result.count('#') == len(self.df.columns)
def test_highlight_max(self):
df = pd.DataFrame([[1, 2], [3, 4]], columns=['A', 'B'])
# max(df) = min(-df)
for max_ in [True, False]:
if max_:
attr = 'highlight_max'
else:
df = -df
attr = 'highlight_min'
result = getattr(df.style, attr)()._compute().ctx
assert result[(1, 1)] == ['background-color: yellow']
result = getattr(df.style, attr)(color='green')._compute().ctx
assert result[(1, 1)] == ['background-color: green']
result = getattr(df.style, attr)(subset='A')._compute().ctx
assert result[(1, 0)] == ['background-color: yellow']
result = getattr(df.style, attr)(axis=0)._compute().ctx
expected = {(1, 0): ['background-color: yellow'],
(1, 1): ['background-color: yellow'],
(0, 1): [''], (0, 0): ['']}
assert result == expected
result = getattr(df.style, attr)(axis=1)._compute().ctx
expected = {(0, 1): ['background-color: yellow'],
(1, 1): ['background-color: yellow'],
(0, 0): [''], (1, 0): ['']}
assert result == expected
# separate since we cant negate the strs
df['C'] = ['a', 'b']
result = df.style.highlight_max()._compute().ctx
expected = {(1, 1): ['background-color: yellow']}
result = df.style.highlight_min()._compute().ctx
expected = {(0, 0): ['background-color: yellow']}
def test_export(self):
f = lambda x: 'color: red' if x > 0 else 'color: blue'
g = lambda x, y, z: 'color: %s' if x > 0 else 'color: %s' % z
style1 = self.styler
style1.applymap(f)\
.applymap(g, y='a', z='b')\
.highlight_max()
result = style1.export()
style2 = self.df.style
style2.use(result)
assert style1._todo == style2._todo
style2.render()
def test_display_format(self):
df = pd.DataFrame(np.random.random(size=(2, 2)))
ctx = df.style.format("{:0.1f}")._translate()
assert all(['display_value' in c for c in row] for row in ctx['body'])
assert (all([len(c['display_value']) <= 3 for c in row[1:]]
for row in ctx['body']))
assert len(ctx['body'][0][1]['display_value'].lstrip('-')) <= 3
def test_display_format_raises(self):
df = pd.DataFrame(np.random.randn(2, 2))
with pytest.raises(TypeError):
df.style.format(5)
with pytest.raises(TypeError):
df.style.format(True)
def test_display_subset(self):
df = pd.DataFrame([[.1234, .1234], [1.1234, 1.1234]],
columns=['a', 'b'])
ctx = df.style.format({"a": "{:0.1f}", "b": "{0:.2%}"},
subset=pd.IndexSlice[0, :])._translate()
expected = '0.1'
assert ctx['body'][0][1]['display_value'] == expected
assert ctx['body'][1][1]['display_value'] == '1.1234'
assert ctx['body'][0][2]['display_value'] == '12.34%'
raw_11 = '1.1234'
ctx = df.style.format("{:0.1f}",
subset=pd.IndexSlice[0, :])._translate()
assert ctx['body'][0][1]['display_value'] == expected
assert ctx['body'][1][1]['display_value'] == raw_11
ctx = df.style.format("{:0.1f}",
subset=pd.IndexSlice[0, :])._translate()
assert ctx['body'][0][1]['display_value'] == expected
assert ctx['body'][1][1]['display_value'] == raw_11
ctx = df.style.format("{:0.1f}",
subset=pd.IndexSlice['a'])._translate()
assert ctx['body'][0][1]['display_value'] == expected
assert ctx['body'][0][2]['display_value'] == '0.1234'
ctx = df.style.format("{:0.1f}",
subset=pd.IndexSlice[0, 'a'])._translate()
assert ctx['body'][0][1]['display_value'] == expected
assert ctx['body'][1][1]['display_value'] == raw_11
ctx = df.style.format("{:0.1f}",
subset=pd.IndexSlice[[0, 1], ['a']])._translate()
assert ctx['body'][0][1]['display_value'] == expected
assert ctx['body'][1][1]['display_value'] == '1.1'
assert ctx['body'][0][2]['display_value'] == '0.1234'
assert ctx['body'][1][2]['display_value'] == '1.1234'
def test_display_dict(self):
df = pd.DataFrame([[.1234, .1234], [1.1234, 1.1234]],
columns=['a', 'b'])
ctx = df.style.format({"a": "{:0.1f}", "b": "{0:.2%}"})._translate()
assert ctx['body'][0][1]['display_value'] == '0.1'
assert ctx['body'][0][2]['display_value'] == '12.34%'
df['c'] = ['aaa', 'bbb']
ctx = df.style.format({"a": "{:0.1f}", "c": str.upper})._translate()
assert ctx['body'][0][1]['display_value'] == '0.1'
assert ctx['body'][0][3]['display_value'] == 'AAA'
def test_bad_apply_shape(self):
df = pd.DataFrame([[1, 2], [3, 4]])
with pytest.raises(ValueError):
df.style._apply(lambda x: 'x', subset=pd.IndexSlice[[0, 1], :])
with pytest.raises(ValueError):
df.style._apply(lambda x: [''], subset=pd.IndexSlice[[0, 1], :])
with pytest.raises(ValueError):
df.style._apply(lambda x: ['', '', '', ''])
with pytest.raises(ValueError):
df.style._apply(lambda x: ['', '', ''], subset=1)
with pytest.raises(ValueError):
df.style._apply(lambda x: ['', '', ''], axis=1)
def test_apply_bad_return(self):
def f(x):
return ''
df = pd.DataFrame([[1, 2], [3, 4]])
with pytest.raises(TypeError):
df.style._apply(f, axis=None)
def test_apply_bad_labels(self):
def f(x):
return pd.DataFrame(index=[1, 2], columns=['a', 'b'])
df = pd.DataFrame([[1, 2], [3, 4]])
with pytest.raises(ValueError):
df.style._apply(f, axis=None)
def test_get_level_lengths(self):
index = pd.MultiIndex.from_product([['a', 'b'], [0, 1, 2]])
expected = {(0, 0): 3, (0, 3): 3, (1, 0): 1, (1, 1): 1, (1, 2): 1,
(1, 3): 1, (1, 4): 1, (1, 5): 1}
result = _get_level_lengths(index)
tm.assert_dict_equal(result, expected)
def test_get_level_lengths_un_sorted(self):
index = pd.MultiIndex.from_arrays([
[1, 1, 2, 1],
['a', 'b', 'b', 'd']
])
expected = {(0, 0): 2, (0, 2): 1, (0, 3): 1,
(1, 0): 1, (1, 1): 1, (1, 2): 1, (1, 3): 1}
result = _get_level_lengths(index)
tm.assert_dict_equal(result, expected)
def test_mi_sparse(self):
df = pd.DataFrame({'A': [1, 2]},
index=pd.MultiIndex.from_arrays([['a', 'a'],
[0, 1]]))
result = df.style._translate()
body_0 = result['body'][0][0]
expected_0 = {
"value": "a", "display_value": "a", "is_visible": True,
"type": "th", "attributes": ["rowspan=2"],
"class": "row_heading level0 row0",
}
tm.assert_dict_equal(body_0, expected_0)
body_1 = result['body'][0][1]
expected_1 = {
"value": 0, "display_value": 0, "is_visible": True,
"type": "th", "class": "row_heading level1 row0",
}
tm.assert_dict_equal(body_1, expected_1)
body_10 = result['body'][1][0]
expected_10 = {
"value": 'a', "display_value": 'a', "is_visible": False,
"type": "th", "class": "row_heading level0 row1",
}
tm.assert_dict_equal(body_10, expected_10)
head = result['head'][0]
expected = [
{'type': 'th', 'class': 'blank', 'value': '',
'is_visible': True, "display_value": ''},
{'type': 'th', 'class': 'blank level0', 'value': '',
'is_visible': True, 'display_value': ''},
{'type': 'th', 'class': 'col_heading level0 col0', 'value': 'A',
'is_visible': True, 'display_value': 'A'}]
assert head == expected
def test_mi_sparse_disabled(self):
with pd.option_context('display.multi_sparse', False):
df = pd.DataFrame({'A': [1, 2]},
index=pd.MultiIndex.from_arrays([['a', 'a'],
[0, 1]]))
result = df.style._translate()
body = result['body']
for row in body:
assert 'attributes' not in row[0]
def test_mi_sparse_index_names(self):
df = pd.DataFrame({'A': [1, 2]}, index=pd.MultiIndex.from_arrays(
[['a', 'a'], [0, 1]],
names=['idx_level_0', 'idx_level_1'])
)
result = df.style._translate()
head = result['head'][1]
expected = [{
'class': 'index_name level0', 'value': 'idx_level_0',
'type': 'th'},
{'class': 'index_name level1', 'value': 'idx_level_1',
'type': 'th'},
{'class': 'blank', 'value': '', 'type': 'th'}]
assert head == expected
def test_mi_sparse_column_names(self):
df = pd.DataFrame(
np.arange(16).reshape(4, 4),
index=pd.MultiIndex.from_arrays(
[['a', 'a', 'b', 'a'], [0, 1, 1, 2]],
names=['idx_level_0', 'idx_level_1']),
columns=pd.MultiIndex.from_arrays(
[['C1', 'C1', 'C2', 'C2'], [1, 0, 1, 0]],
names=['col_0', 'col_1']
)
)
result = df.style._translate()
head = result['head'][1]
expected = [
{'class': 'blank', 'value': '', 'display_value': '',
'type': 'th', 'is_visible': True},
{'class': 'index_name level1', 'value': 'col_1',
'display_value': 'col_1', 'is_visible': True, 'type': 'th'},
{'class': 'col_heading level1 col0',
'display_value': 1,
'is_visible': True,
'type': 'th',
'value': 1},
{'class': 'col_heading level1 col1',
'display_value': 0,
'is_visible': True,
'type': 'th',
'value': 0},
{'class': 'col_heading level1 col2',
'display_value': 1,
'is_visible': True,
'type': 'th',
'value': 1},
{'class': 'col_heading level1 col3',
'display_value': 0,
'is_visible': True,
'type': 'th',
'value': 0},
]
assert head == expected
class TestStylerMatplotlibDep(object):
def test_background_gradient(self):
tm._skip_if_no_mpl()
df = pd.DataFrame([[1, 2], [2, 4]], columns=['A', 'B'])
for c_map in [None, 'YlOrRd']:
result = df.style.background_gradient(cmap=c_map)._compute().ctx
assert all("#" in x[0] for x in result.values())
assert result[(0, 0)] == result[(0, 1)]
assert result[(1, 0)] == result[(1, 1)]
result = df.style.background_gradient(
subset=pd.IndexSlice[1, 'A'])._compute().ctx
assert result[(1, 0)] == ['background-color: #fff7fb']
def test_block_names():
# catch accidental removal of a block
expected = {
'before_style', 'style', 'table_styles', 'before_cellstyle',
'cellstyle', 'before_table', 'table', 'caption', 'thead', 'tbody',
'after_table', 'before_head_rows', 'head_tr', 'after_head_rows',
'before_rows', 'tr', 'after_rows',
}
result = set(Styler.template.blocks)
assert result == expected
def test_from_custom_template(tmpdir):
p = tmpdir.mkdir("templates").join("myhtml.tpl")
p.write(textwrap.dedent("""\
{% extends "html.tpl" %}
{% block table %}
<h1>{{ table_title|default("My Table") }}</h1>
{{ super() }}
{% endblock table %}"""))
result = Styler.from_custom_template(str(tmpdir.join('templates')),
'myhtml.tpl')
assert issubclass(result, Styler)
assert result.env is not Styler.env
assert result.template is not Styler.template
styler = result(pd.DataFrame({"A": [1, 2]}))
assert styler.render()
def test_shim():
# https://github.com/pandas-dev/pandas/pull/16059
# Remove in 0.21
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
from pandas.formats.style import Styler as _styler # noqa
| mit |
vermouthmjl/scikit-learn | examples/applications/topics_extraction_with_nmf_lda.py | 38 | 3869 | """
=======================================================================================
Topic extraction with Non-negative Matrix Factorization and Latent Dirichlet Allocation
=======================================================================================
This is an example of applying Non-negative Matrix Factorization
and Latent Dirichlet Allocation on a corpus of documents and
extract additive models of the topic structure of the corpus.
The output is a list of topics, each represented as a list of terms
(weights are not shown).
The default parameters (n_samples / n_features / n_topics) should make
the example runnable in a couple of tens of seconds. You can try to
increase the dimensions of the problem, but be aware that the time
complexity is polynomial in NMF. In LDA, the time complexity is
proportional to (n_samples * iterations).
"""
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# Lars Buitinck
# Chyi-Kwei Yau <chyikwei.yau@gmail.com>
# License: BSD 3 clause
from __future__ import print_function
from time import time
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.decomposition import NMF, LatentDirichletAllocation
from sklearn.datasets import fetch_20newsgroups
n_samples = 2000
n_features = 1000
n_topics = 10
n_top_words = 20
def print_top_words(model, feature_names, n_top_words):
for topic_idx, topic in enumerate(model.components_):
print("Topic #%d:" % topic_idx)
print(" ".join([feature_names[i]
for i in topic.argsort()[:-n_top_words - 1:-1]]))
print()
# Load the 20 newsgroups dataset and vectorize it. We use a few heuristics
# to filter out useless terms early on: the posts are stripped of headers,
# footers and quoted replies, and common English words, words occurring in
# only one document or in at least 95% of the documents are removed.
print("Loading dataset...")
t0 = time()
dataset = fetch_20newsgroups(shuffle=True, random_state=1,
remove=('headers', 'footers', 'quotes'))
data_samples = dataset.data[:n_samples]
print("done in %0.3fs." % (time() - t0))
# Use tf-idf features for NMF.
print("Extracting tf-idf features for NMF...")
tfidf_vectorizer = TfidfVectorizer(max_df=0.95, min_df=2,
max_features=n_features,
stop_words='english')
t0 = time()
tfidf = tfidf_vectorizer.fit_transform(data_samples)
print("done in %0.3fs." % (time() - t0))
# Use tf (raw term count) features for LDA.
print("Extracting tf features for LDA...")
tf_vectorizer = CountVectorizer(max_df=0.95, min_df=2,
max_features=n_features,
stop_words='english')
t0 = time()
tf = tf_vectorizer.fit_transform(data_samples)
print("done in %0.3fs." % (time() - t0))
# Fit the NMF model
print("Fitting the NMF model with tf-idf features, "
"n_samples=%d and n_features=%d..."
% (n_samples, n_features))
t0 = time()
nmf = NMF(n_components=n_topics, random_state=1,
alpha=.1, l1_ratio=.5).fit(tfidf)
print("done in %0.3fs." % (time() - t0))
print("\nTopics in NMF model:")
tfidf_feature_names = tfidf_vectorizer.get_feature_names()
print_top_words(nmf, tfidf_feature_names, n_top_words)
print("Fitting LDA models with tf features, "
"n_samples=%d and n_features=%d..."
% (n_samples, n_features))
lda = LatentDirichletAllocation(n_topics=n_topics, max_iter=5,
learning_method='online',
learning_offset=50.,
random_state=0)
t0 = time()
lda.fit(tf)
print("done in %0.3fs." % (time() - t0))
print("\nTopics in LDA model:")
tf_feature_names = tf_vectorizer.get_feature_names()
print_top_words(lda, tf_feature_names, n_top_words)
| bsd-3-clause |
kfogel/batman | batman/plots.py | 1 | 2325 | # The batman package: fast computation of exoplanet transit light curves
# Copyright (C) 2015 Laura Kreidberg
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function
import numpy as np
import math
import matplotlib.pyplot as plt
from .transitmodel import *
import timeit
def wrapper(func, *args, **kwargs):
def wrapped():
return func(*args, **kwargs)
return wrapped
def make_plots():
"""zs = np.linspace(0., 1., 1000)
rp = 0.1
u = [0., 0.7, 0.0, -0.3]
f = occultnl.occultnl(zs, rp, u[0], u[1], u[2], u[3], 1.0e-2, 4)
fhi = occultnl.occultnl(zs, rp, u[0], u[1], u[2], u[3], 1.0e-4, 4)
fquad = occultquad.occultquad(zs, rp, 0.1, 0.3, 4)
#for i in range(len(f)): print "z, fnl, fquad", zs[i], f[i], fquad[i]
for i in range(1,16):
wrapped = wrapper(occultquad.occultquad, zs, rp, 0.1, 0.3, i)
t = timeit.timeit(wrapped,number=1)
print i, t
plt.plot(zs, (f - fhi)*1.0e6)
plt.plot(zs, (fhi - fquad)*1.0e6, color='r')
plt.axvline(0.9)
plt.show()"""
#generates Figure FIXME: max err as a function of function call time
"""zs = np.linspace(0., 1., 1000)
rp = 0.1
u = [0., 0.7, 0.0, -0.3]
n = 20
ts = []
errs = []
f_ref = occultnl.occultnl(zs, rp, u[0], u[1], u[2], u[3], 1.0e-4, 4)
fac = np.logspace(-3, -1, n)
for i in range(n):
wrapped = wrapper(occultnl.occultnl, zs, rp, u[0], u[1], u[2], u[3], fac[i], 12)
t = timeit.timeit(wrapped,number=10)/10.
ts.append(t)
print t
f= occultnl.occultnl(zs, rp, u[0], u[1], u[2], u[3], fac[i], 12)
err = np.max(np.abs(f - f_ref))
errs.append(err)
plt.plot(np.array(ts), np.array(errs)*1.0e6)
plt.yscale('log')
plt.xscale('log')
plt.xlabel("Time (s)")
plt.ylabel("Max Err (ppm)")
plt.show()"""
| gpl-3.0 |
bsautermeister/tensorflow-handwriting-demo | tensorflow/training.py | 1 | 10237 | """ Trains a model on handwriting data. """
from __future__ import absolute_import, division, print_function
import os
import sys
import time
import math
import argparse
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.core.protobuf import saver_pb2
import utils.ui
import utils.tensor
import utils.embedding
import models
import datasets
FLAGS = None
# disable TensorFlow C++ warnings
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
def main(_):
"""Executed only if run as a script."""
if FLAGS.dataset == 'mnist':
dataset = datasets.MnistDataset()
elif FLAGS.dataset == 'hw-local':
dataset = datasets.HandwritingDataset('http://localhost:64303')
elif FLAGS.dataset == 'hw-production':
dataset = datasets.HandwritingDataset('http://bsautermeister.de/handwriting-service')
else:
raise Exception('Unknown dataset.')
dataset.show_info()
if FLAGS.dataset_check:
exit()
with tf.name_scope('placeholders'):
x_ph = tf.placeholder(tf.float32, shape=[None] + list(dataset.data_shape))
y_ph = tf.placeholder(tf.int32, shape=[None, 1])
dropout_ph = tf.placeholder(tf.float32)
augment_ph = tf.placeholder_with_default(tf.constant(False, tf.bool), shape=[])
tf.add_to_collection('x_ph', x_ph)
tf.add_to_collection('y_ph', y_ph)
tf.add_to_collection('dropout_ph', dropout_ph)
tf.add_to_collection('augment_ph', augment_ph)
with tf.name_scope('data_augmentation'):
def augment_data(input_data, angle, shift):
num_images_ = tf.shape(input_data)[0]
# random rotate
processed_data = tf.contrib.image.rotate(input_data,
tf.random_uniform([num_images_],
maxval=math.pi / 180 * angle,
minval=math.pi / 180 * -angle))
# random shift
base_row = tf.constant([1, 0, 0, 0, 1, 0, 0, 0], shape=[1, 8], dtype=tf.float32)
base_ = tf.tile(base_row, [num_images_, 1])
mask_row = tf.constant([0, 0, 1, 0, 0, 1, 0, 0], shape=[1, 8], dtype=tf.float32)
mask_ = tf.tile(mask_row, [num_images_, 1])
random_shift_ = tf.random_uniform([num_images_, 8], minval=-shift, maxval=shift, dtype=tf.float32)
transforms_ = base_ + random_shift_ * mask_
processed_data = tf.contrib.image.transform(images=processed_data,
transforms=transforms_)
return processed_data
preprocessed = tf.cond(augment_ph, lambda: augment_data(x_ph, angle=5.0, shift=2.49), lambda: x_ph)
with tf.name_scope('model'):
model_y = emb_layer = None
if FLAGS.model == 'neural_net':
model_y, emb_layer = models.neural_net(preprocessed, [128, 128, dataset.num_classes],
dropout_ph, FLAGS.weight_decay)
elif FLAGS.model == 'conv_net':
model_y, emb_layer = models.conv_net(preprocessed, [(16, 5), (32, 3)], [128, dataset.num_classes],
dropout_ph, FLAGS.weight_decay)
else:
raise Exception('Unknown network model type.')
tf.add_to_collection('model_y', tf.nn.softmax(model_y))
with tf.name_scope('loss'):
y_one_hot = tf.one_hot(indices=y_ph, depth=dataset.num_classes, on_value=1.0, off_value=0.0, axis=-1)
y_one_hot = tf.reshape(y_one_hot, [-1, dataset.num_classes])
loss_ = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=model_y, labels=y_one_hot))
regularization_list = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
tf.summary.scalar('xe-loss', loss_)
if len(regularization_list) > 0:
loss_ += tf.add_n(regularization_list)
train_ = tf.train.AdamOptimizer(FLAGS.learning_rate).minimize(loss_)
with tf.name_scope('metrics'):
model_out = tf.nn.softmax(model_y)
reshaped_y_ph = tf.reshape(y_ph, [-1])
_, accuracy_ = tf.metrics.accuracy(labels=reshaped_y_ph, predictions=tf.argmax(model_out, axis=1))
tf.summary.scalar('accuracy', accuracy_)
saver = tf.train.Saver(write_version=saver_pb2.SaverDef.V1)
summary_ = tf.summary.merge_all()
with tf.Session() as sess:
# delete old summaries
summary_dir = 'summary'
if tf.gfile.IsDirectory(summary_dir):
tf.gfile.DeleteRecursively(summary_dir)
train_writer = tf.summary.FileWriter(os.path.join(summary_dir, 'training'), sess.graph)
valid_writer = tf.summary.FileWriter(os.path.join(summary_dir, 'validation'))
sess.run(tf.global_variables_initializer())
print('\nModel with {} trainable parameters.'.format(utils.tensor.get_num_trainable_params()))
time.sleep(3)
print('\nTraining...')
f, ax = plt.subplots(2, 1)
train_losses = {'step': [], 'value': []}
valid_losses = {'step': [], 'value': []}
valid_accuracy = {'step': [], 'value': []}
step = 1
loss_sum = 0.0
loss_n = 0
for epoch in range(FLAGS.train_epochs):
print('\nStarting epoch {} / {}...'.format(epoch + 1, FLAGS.train_epochs))
sess.run(tf.local_variables_initializer())
num_batches = int(dataset.train_size / FLAGS.batch_size)
for b in range(num_batches):
batch_x, batch_y = dataset.train_batch(FLAGS.batch_size)
_, loss, summary = sess.run([train_, loss_, summary_],
feed_dict={x_ph: batch_x,
y_ph: batch_y,
dropout_ph: FLAGS.dropout,
augment_ph: FLAGS.augmentation})
loss_sum += loss
loss_n += 1
if step % 10 == 0:
loss_avg = loss_sum / loss_n
train_losses['step'].append(step)
train_losses['value'].append(loss_avg)
print('Step {:3d} with loss: {:.5f}'.format(step, loss_avg))
loss_sum = 0.0
loss_n = 0
train_writer.add_summary(summary, step)
train_writer.flush()
step += 1
valid_x, valid_y = dataset.valid()
loss, accuracy, summary = sess.run([loss_, accuracy_, summary_],
feed_dict={x_ph: valid_x,
y_ph: valid_y,
dropout_ph: 1.0})
valid_losses['step'].append(step)
valid_losses['value'].append(loss)
valid_accuracy['step'].append(step)
valid_accuracy['value'].append(accuracy)
print('VALIDATION > Step {:3d} with loss: {:.5f}, accuracy: {:.4f}'.format(step, loss, accuracy))
valid_writer.add_summary(summary, step)
valid_writer.flush()
if FLAGS.save_checkpoint:
checkpoint_dir = 'checkpoint'
if not os.path.isdir(checkpoint_dir):
os.makedirs(checkpoint_dir)
# save checkpoint
print('Saving checkpoint...')
save_path = saver.save(sess, os.path.join(checkpoint_dir, 'model.ckpt'))
print('Model saved in file: {}'.format(save_path))
if FLAGS.save_embedding:
log_dir = 'summary/validation'
if not os.path.isdir(log_dir):
os.makedirs(log_dir)
valid_x, valid_y = dataset.valid()
print('Saving embedding...')
embvis = utils.embedding.EmbeddingVisualizer(sess, valid_x, valid_y, x_ph, emb_layer)
embvis.write(log_dir, alphabetical=FLAGS.dataset != 'mnist')
print('Showing plot...')
ax[0].plot(train_losses['step'], train_losses['value'], label='Train loss')
ax[0].plot(valid_losses['step'], valid_losses['value'], label='Valid loss')
ax[0].legend(loc='upper right')
ax[1].plot(valid_accuracy['step'], valid_accuracy['value'], label='Valid accuracy')
ax[1].legend(loc='lower right')
plt.show()
print('DONE')
if __name__ == '__main__':
PARSER = argparse.ArgumentParser()
PARSER.add_argument('--batch_size', type=int, default=64, # large batch size (>>100) gives much better results
help='The batch size.')
PARSER.add_argument('--learning_rate', type=float, default=0.001,
help='The initial learning rate.')
PARSER.add_argument('--train_epochs', type=int, default=5,
help='The number of training epochs.')
PARSER.add_argument('--dropout', type=float, default=0.5,
help='The keep probability of the dropout layer.')
PARSER.add_argument('--weight_decay', type=float, default=0.001,
help='The lambda koefficient for weight decay regularization.')
PARSER.add_argument('--model', type=str, default='neural_net',
help='The network model no use.')
PARSER.add_argument('--save_checkpoint', type=bool, default=True,
help='Whether we save a checkpoint or not.')
PARSER.add_argument('--save_embedding', type=bool, default=True,
help='Whether we save the embedding.')
PARSER.add_argument('--dataset', type=str, default='mnist',
help='The dataset to use.')
PARSER.add_argument('--augmentation', type=bool, default=False,
help='Whether data augmentation (rotate/shift) is used or not.')
PARSER.add_argument('--dataset_check', type=bool, default=False,
help='Whether the dataset should be checked only.')
FLAGS, UNPARSED = PARSER.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + UNPARSED)
| mit |
lthurlow/Network-Grapher | proj/external/matplotlib-1.2.1/lib/mpl_examples/units/evans_test.py | 9 | 2325 | """
A mockup "Foo" units class which supports
conversion and different tick formatting depending on the "unit".
Here the "unit" is just a scalar conversion factor, but this example shows mpl is
entirely agnostic to what kind of units client packages use
"""
from matplotlib.cbook import iterable
import matplotlib.units as units
import matplotlib.ticker as ticker
import matplotlib.pyplot as plt
class Foo:
def __init__( self, val, unit=1.0 ):
self.unit = unit
self._val = val * unit
def value( self, unit ):
if unit is None: unit = self.unit
return self._val / unit
class FooConverter:
@staticmethod
def axisinfo(unit, axis):
'return the Foo AxisInfo'
if unit==1.0 or unit==2.0:
return units.AxisInfo(
majloc = ticker.IndexLocator( 8, 0 ),
majfmt = ticker.FormatStrFormatter("VAL: %s"),
label='foo',
)
else:
return None
@staticmethod
def convert(obj, unit, axis):
"""
convert obj using unit. If obj is a sequence, return the
converted sequence
"""
if units.ConversionInterface.is_numlike(obj):
return obj
if iterable(obj):
return [o.value(unit) for o in obj]
else:
return obj.value(unit)
@staticmethod
def default_units(x, axis):
'return the default unit for x or None'
if iterable(x):
for thisx in x:
return thisx.unit
else:
return x.unit
units.registry[Foo] = FooConverter()
# create some Foos
x = []
for val in range( 0, 50, 2 ):
x.append( Foo( val, 1.0 ) )
# and some arbitrary y data
y = [i for i in range( len(x) ) ]
# plot specifying units
fig = plt.figure()
fig.suptitle("Custom units")
fig.subplots_adjust(bottom=0.2)
ax = fig.add_subplot(1,2,2)
ax.plot( x, y, 'o', xunits=2.0 )
for label in ax.get_xticklabels():
label.set_rotation(30)
label.set_ha('right')
ax.set_title("xunits = 2.0")
# plot without specifying units; will use the None branch for axisinfo
ax = fig.add_subplot(1,2,1)
ax.plot( x, y ) # uses default units
ax.set_title('default units')
for label in ax.get_xticklabels():
label.set_rotation(30)
label.set_ha('right')
plt.show()
| mit |
ekansa/open-context-py | opencontext_py/apps/imports/kobotoolbox/utilities.py | 1 | 9698 | from time import sleep
import uuid as GenUUID
import os, sys, shutil
import numpy as np
import pandas as pd
import xlrd
from django.conf import settings
from opencontext_py.apps.ocitems.manifest.models import Manifest
LABEL_ALTERNATIVE_PARTS = {
# Keyed by project_uuid
'DF043419-F23B-41DA-7E4D-EE52AF22F92F': {
'PC': ['PC', 'PC '],
'VDM': ['VDM', 'VdM', 'VdM ']
}
}
MULTI_VALUE_COL_PREFIXES = [
'Preliminary Phasing/',
'Trench Supervisor/',
'Decorative Techniques and Motifs/Decorative Technique/',
'Decorative Techniques and Motifs/Motif/',
'Fabric Category/',
'Vessel Part Present/',
'Modification/',
'Type of Composition Subject/',
]
UUID_SOURCE_KOBOTOOLBOX = 'kobotoolbox' # UUID minted by kobotoolbox
UUID_SOURCE_OC_KOBO_ETL = 'oc-kobo-etl' # UUID minted by this ETL process
UUID_SOURCE_OC_LOOKUP = 'open-context' # UUID existing in Open Context
LINK_RELATION_TYPE_COL = 'Relation_type'
def make_directory_files_df(attachments_path):
"""Makes a dataframe listing all the files a Kobo Attachments directory."""
file_data = {
'path':[],
'path-uuid':[],
'filename': [],
}
for dirpath, dirnames, filenames in os.walk(attachments_path):
for filename in filenames:
file_path = os.path.join(dirpath, filename)
uuid_dir = os.path.split(os.path.abspath(dirpath))[-1]
file_data['path'].append(file_path)
file_data['path-uuid'].append(uuid_dir)
file_data['filename'].append(filename)
df = pd.DataFrame(data=file_data)
return df
def list_excel_files(excel_dirpath):
"""Makes a list of Excel files in a directory path."""
xlsx_files = []
for item in os.listdir(excel_dirpath):
item_path = os.path.join(excel_dirpath, item)
if not os.path.isfile(item_path):
continue
if not (item.endswith('.xlsx') or
item.endswith('.xls')):
continue
xlsx_files.append(item_path)
return xlsx_files
def read_excel_to_dataframes(excel_filepath):
"""Reads an Excel workbook into a dictionary of dataframes keyed by sheet names."""
dfs = {}
xls = xlrd.open_workbook(excel_filepath)
for sheet_name in xls.sheet_names():
print('Reading sheet ' + sheet_name)
# This probably needs an upgraded pandas
# dfs[sheet_name] = pd.read_excel(xls, sheet_name=sheet_name, engine='xlrd')
dfs[sheet_name] = pd.read_excel(xls, sheet_name, engine='xlrd')
return dfs
def reorder_first_columns(df, first_columns):
"""Reorders a columns in a dataframe so first_columns appear first"""
return df[move_to_prefix(list(df.columns), first_columns)]
def move_to_prefix(all_list, prefix_list):
"""Reorders elements in a list to move the prefix_list elements first"""
all_list = list(all_list) # So we don't mutate all_list
for p_element in prefix_list:
all_list.remove(p_element)
return prefix_list + all_list
def drop_empty_cols(df):
"""Drops columns with empty or null values."""
for col in df.columns:
df[col].replace('', np.nan, inplace=True)
df_output = df.dropna(axis=1,how='all').copy()
df_output.reset_index(drop=True, inplace=True)
return df_output
def update_multivalue_col_vals(df, multi_col_prefix):
"""Updates the values of multi-value nominal columns"""
multi_cols = [c for c in df.columns.tolist() if c.startswith(multi_col_prefix)]
drop_cols = []
for col in multi_cols:
df[col] = df[col].astype(str)
val_index = ((df[col] == '1')|(df[col] == '1.0')|(df[col] == 'True'))
if df[val_index].empty:
drop_cols.append(col)
continue
# Set rows to the column's value if "True" (1).
df.loc[val_index, col] = col.split(
multi_col_prefix
)[-1].strip()
# Set rows to blank if the column is not True (1).
df.loc[~val_index, col] = np.nan
# Drop the columns that where not actually used.
df.drop(drop_cols, axis=1, inplace=True, errors='ignore')
rename_cols = {}
i = 0
for col in multi_cols:
if col in drop_cols:
continue
i += 1
rename_cols[col] = multi_col_prefix + str(i)
# Rename the columns that were used.
df.rename(columns=rename_cols, inplace=True)
return drop_empty_cols(df)
def update_multivalue_columns(df, multival_col_prefixes=None):
"""Updates multivalue columns, removing the ones not in use"""
if multival_col_prefixes is None:
multival_col_prefixes = MULTI_VALUE_COL_PREFIXES
for multi_col_prefix in multival_col_prefixes:
df = update_multivalue_col_vals(df, multi_col_prefix)
return df
def clean_up_multivalue_cols(df, skip_cols=[], delim='::'):
"""Cleans up multivalue columns where one column has values that concatenate values from other columns"""
poss_multi_value_cols = {}
cols = df.columns.tolist()
sub_cols = []
for col in cols:
if col in skip_cols:
continue
for other_col in cols:
if other_col == col:
# Same column, skip it.
continue
if not other_col.startswith(col) or not '/' in other_col:
# This other column is not prefixed by col
continue
if other_col in sub_cols:
# We want to avoid a situation where something/1 is considered to be a
# parent of something/10
continue
other_col_vals = df[df[other_col].notnull()][other_col].unique().tolist()
if len(other_col_vals) > 1:
# This is not a column with a single value, so skip.
continue
sub_cols.append(other_col)
if col not in poss_multi_value_cols:
poss_multi_value_cols[col] = []
# Add a tuple of the other column name, and it's unique value.
poss_multi_value_cols[col].append((other_col, other_col_vals[0],))
for col, rel_cols_vals in poss_multi_value_cols.items():
for act_rel_col, act_val in rel_cols_vals:
# Update the col by filtering for non null values for col,
# and for where the act_rel_col has it's act_val.
print('Remove the column {} value "{}" in the column {}'.format(act_rel_col, act_val, col))
rep_indx = (df[col].notnull() & (df[act_rel_col] == act_val))
# Remove the string we don't want, from col, which concatenates multiple
# values.
df.loc[rep_indx, col] = df[col].str.replace(act_val, '')
if delim in act_val:
# Remove the first part of a hiearchy delimited value from a likely parent column.
df.loc[rep_indx, col] = df[col].str.replace(
act_val.split(delim)[0],
''
)
# Now do final cleanup
df.loc[rep_indx, col] = df[col].str.strip()
# Now do a file cleanup, removing anything that's no longer present.
df = drop_empty_cols(df)
return df
def get_alternate_labels(label, project_uuid, config=None):
"""Returns a list of a label and alternative versions based on project config"""
label = str(label)
if config is None:
config = LABEL_ALTERNATIVE_PARTS
if not project_uuid in config:
return [label]
label_variations = []
for label_part, label_alts in config[project_uuid].items():
label_part = str(label_part)
if not label_part in label:
label_variations.append(label)
for label_alt in label_alts:
label_variations.append(
label.replace(label_part, label_alt)
)
return label_variations
def parse_opencontext_url(s):
"""Returns a tuple of the item_type and uuid for an Open Context url"""
if ((not s.startswith('https://opencontext.org')) and
(not s.startswith('http://opencontext.org'))):
return None, None
oc_split = s.split('opencontext.org/')
id_part = oc_split[-1]
id_parts = id_part.split('/')
if len(id_parts) < 2:
# The ID parts is not complete
return None, None
item_type = id_parts[0]
uuid = id_parts[1]
return item_type, uuid
def parse_opencontext_uuid(s):
"""Returns an Open Context UUID from an OC URL"""
_, uuid = parse_opencontext_url(s)
return uuid
def parse_opencontext_type(s):
"""Returns the Open Context item type from an OC URL"""
item_type, _ = parse_opencontext_url(s)
return item_type
def lookup_manifest_obj(
label,
project_uuid,
item_type,
label_alt_configs=None,
class_uris=None
):
"""Returns a manifest object based on label variations"""
label_variations = get_alternate_labels(
label,
project_uuid,
config=label_alt_configs
)
man_objs = Manifest.objects.filter(
label__in=label_variations,
item_type=item_type,
project_uuid=project_uuid
)
if class_uris is not None:
# Further filter if we have class_uris
man_objs = man_objs.filter(class_uri__in=class_uris)
man_obj = man_objs.first()
return man_obj
def lookup_manifest_uuid(
label,
project_uuid,
item_type,
label_alt_configs=None,
class_uris=None
):
"""Returns a manifest object uuid on label variations"""
man_obj = lookup_manifest_obj(
label,
project_uuid,
item_type,
label_alt_configs=label_alt_configs,
class_uris=class_uris
)
if man_obj is None:
return None
return man_obj.uuid | gpl-3.0 |
tgsmith61591/pyramid | pmdarima/utils/tests/test_array.py | 1 | 6931 |
from pmdarima.utils.array import diff, diff_inv, c, is_iterable, as_series, \
check_exog
from pmdarima.utils import get_callable
from numpy.testing import assert_array_equal, assert_array_almost_equal
import pytest
import pandas as pd
import numpy as np
x = np.arange(5)
m = np.array([10, 5, 12, 23, 18, 3, 2, 0, 12]).reshape(3, 3).T
X = pd.DataFrame.from_records(
np.random.RandomState(2).rand(4, 4),
columns=['a', 'b', 'c', 'd']
)
# need some infinite values in X for testing check_exog
X_nan = X.copy()
X_nan.loc[0, 'a'] = np.nan
X_inf = X.copy()
X_inf.loc[0, 'a'] = np.inf
# for diffinv
x_mat = (np.arange(9) + 1).reshape(3, 3).T
def test_diff():
# test vector for lag = (1, 2), diff = (1, 2)
assert_array_equal(diff(x, lag=1, differences=1), np.ones(4))
assert_array_equal(diff(x, lag=1, differences=2), np.zeros(3))
assert_array_equal(diff(x, lag=2, differences=1), np.ones(3) * 2)
assert_array_equal(diff(x, lag=2, differences=2), np.zeros(1))
# test matrix for lag = (1, 2), diff = (1, 2)
assert_array_equal(diff(m, lag=1, differences=1),
np.array([[-5, -5, -2], [7, -15, 12]]))
assert_array_equal(diff(m, lag=1, differences=2),
np.array([[12, -10, 14]]))
assert_array_equal(diff(m, lag=2, differences=1), np.array([[2, -20, 10]]))
assert diff(m, lag=2, differences=2).shape[0] == 0
@pytest.mark.parametrize(
'arr,lag,differences,xi,expected', [
# VECTORS -------------------------------------------------------------
# > x = c(0, 1, 2, 3, 4)
# > diffinv(x, lag=1, differences=1)
# [1] 0 0 1 3 6 10
pytest.param(x, 1, 1, None, [0, 0, 1, 3, 6, 10]),
# > diffinv(x, lag=1, differences=2)
# [1] 0 0 0 1 4 10 20
pytest.param(x, 1, 2, None, [0, 0, 0, 1, 4, 10, 20]),
# > diffinv(x, lag=2, differences=1)
# [1] 0 0 0 1 2 4 6
pytest.param(x, 2, 1, None, [0, 0, 0, 1, 2, 4, 6]),
# > diffinv(x, lag=2, differences=2)
# [1] 0 0 0 0 0 1 2 5 8
pytest.param(x, 2, 2, None, [0, 0, 0, 0, 0, 1, 2, 5, 8]),
# This is a test of the intermediate stage when x == [1, 0, 3, 2]
pytest.param([1, 0, 3, 2], 1, 1, [0], [0, 1, 1, 4, 6]),
# This is an intermediate stage when x == [0, 1, 2, 3, 4]
pytest.param(x, 1, 1, [0], [0, 0, 1, 3, 6, 10]),
# MATRICES ------------------------------------------------------------
# > matrix(data=c(1, 2, 3, 4, 5, 6, 7, 8, 9), nrow=3, ncol=3)
# [,1] [,2] [,3]
# [1,] 1 4 7
# [2,] 2 5 8
# [3,] 3 6 9
# > diffinv(X, 1, 1)
# [,1] [,2] [,3]
# [1,] 0 0 0
# [2,] 1 4 7
# [3,] 3 9 15
# [4,] 6 15 24
pytest.param(x_mat, 1, 1, None,
[[0, 0, 0],
[1, 4, 7],
[3, 9, 15],
[6, 15, 24]]),
# > diffinv(X, 1, 2)
# [,1] [,2] [,3]
# [1,] 0 0 0
# [2,] 0 0 0
# [3,] 1 4 7
# [4,] 4 13 22
# [5,] 10 28 46
pytest.param(x_mat, 1, 2, None,
[[0, 0, 0],
[0, 0, 0],
[1, 4, 7],
[4, 13, 22],
[10, 28, 46]]),
# > diffinv(X, 2, 1)
# [,1] [,2] [,3]
# [1,] 0 0 0
# [2,] 0 0 0
# [3,] 1 4 7
# [4,] 2 5 8
# [5,] 4 10 16
pytest.param(x_mat, 2, 1, None,
[[0, 0, 0],
[0, 0, 0],
[1, 4, 7],
[2, 5, 8],
[4, 10, 16]]),
# > diffinv(X, 2, 2)
# [,1] [,2] [,3]
# [1,] 0 0 0
# [2,] 0 0 0
# [3,] 0 0 0
# [4,] 0 0 0
# [5,] 1 4 7
# [6,] 2 5 8
# [7,] 5 14 23
pytest.param(x_mat, 2, 2, None,
[[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[1, 4, 7],
[2, 5, 8],
[5, 14, 23]]),
]
)
def test_diff_inv(arr, lag, differences, xi, expected):
res = diff_inv(arr, lag=lag, differences=differences, xi=xi)
expected = np.array(expected, dtype=np.float)
assert_array_equal(expected, res)
def test_concatenate():
assert_array_equal(c(1, np.zeros(3)), np.array([1.0, 0.0, 0.0, 0.0]))
assert_array_equal(c([1], np.zeros(3)), np.array([1.0, 0.0, 0.0, 0.0]))
assert_array_equal(c(1), np.ones(1))
assert c() is None
assert_array_equal(c([1]), np.ones(1))
def test_corner_in_callable():
# test the ValueError in the get-callable method
with pytest.raises(ValueError):
get_callable('fake-key', {'a': 1})
def test_corner():
# fails because lag < 1
with pytest.raises(ValueError):
diff(x=x, lag=0)
with pytest.raises(ValueError):
diff_inv(x=x, lag=0)
# fails because differences < 1
with pytest.raises(ValueError):
diff(x=x, differences=0)
with pytest.raises(ValueError):
diff_inv(x=x, differences=0)
# Passing in xi with the incorrect shape to a 2-d array
with pytest.raises(IndexError):
diff_inv(x=np.array([[1, 1], [1, 1]]), xi=np.array([[1]]))
def test_is_iterable():
assert not is_iterable("this string")
assert is_iterable(["this", "list"])
assert not is_iterable(None)
assert is_iterable(np.array([1, 2]))
def test_as_series():
assert isinstance(as_series([1, 2, 3]), pd.Series)
assert isinstance(as_series(np.arange(5)), pd.Series)
assert isinstance(as_series(pd.Series([1, 2, 3])), pd.Series)
@pytest.mark.parametrize(
'arr', [
np.random.rand(5),
pd.Series(np.random.rand(5)),
]
)
def test_check_exog_ndim_value_err(arr):
with pytest.raises(ValueError):
check_exog(arr)
@pytest.mark.parametrize('arr', [X_nan, X_inf])
def test_check_exog_infinite_value_err(arr):
with pytest.raises(ValueError):
check_exog(arr, force_all_finite=True)
# show it passes when False
assert check_exog(
arr, force_all_finite=False, dtype=None, copy=False) is arr
def test_exog_pd_dataframes():
# test with copy
assert check_exog(X, force_all_finite=True, copy=True).equals(X)
# test without copy
assert check_exog(X, force_all_finite=True, copy=False) is X
def test_exog_np_array():
X_np = np.random.RandomState(1).rand(5, 5)
# show works on a list
assert_array_almost_equal(X_np, check_exog(X_np.tolist()))
assert_array_almost_equal(X_np, check_exog(X_np))
| mit |
marcsans/cnn-physics-perception | phy/lib/python2.7/site-packages/mpl_toolkits/axisartist/axislines.py | 7 | 26173 | """
Axislines includes modified implementation of the Axes class. The
biggest difference is that the artists responsible to draw axis line,
ticks, ticklabel and axis labels are separated out from the mpl's Axis
class, which are much more than artists in the original
mpl. Originally, this change was motivated to support curvilinear
grid. Here are a few reasons that I came up with new axes class.
* "top" and "bottom" x-axis (or "left" and "right" y-axis) can have
different ticks (tick locations and labels). This is not possible
with the current mpl, although some twin axes trick can help.
* Curvilinear grid.
* angled ticks.
In the new axes class, xaxis and yaxis is set to not visible by
default, and new set of artist (AxisArtist) are defined to draw axis
line, ticks, ticklabels and axis label. Axes.axis attribute serves as
a dictionary of these artists, i.e., ax.axis["left"] is a AxisArtist
instance responsible to draw left y-axis. The default Axes.axis contains
"bottom", "left", "top" and "right".
AxisArtist can be considered as a container artist and
has following children artists which will draw ticks, labels, etc.
* line
* major_ticks, major_ticklabels
* minor_ticks, minor_ticklabels
* offsetText
* label
Note that these are separate artists from Axis class of the
original mpl, thus most of tick-related command in the original mpl
won't work, although some effort has made to work with. For example,
color and markerwidth of the ax.axis["bottom"].major_ticks will follow
those of Axes.xaxis unless explicitly specified.
In addition to AxisArtist, the Axes will have *gridlines* attribute,
which obviously draws grid lines. The gridlines needs to be separated
from the axis as some gridlines can never pass any axis.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
import matplotlib.axes as maxes
import matplotlib.artist as martist
import matplotlib.text as mtext
import matplotlib.font_manager as font_manager
from matplotlib.path import Path
from matplotlib.transforms import Affine2D, ScaledTranslation, \
IdentityTransform, TransformedPath, Bbox
from matplotlib.collections import LineCollection
from matplotlib import rcParams
from matplotlib.artist import allow_rasterization
import warnings
import numpy as np
import matplotlib.lines as mlines
from .axisline_style import AxislineStyle
from .axis_artist import AxisArtist, GridlinesCollection
class AxisArtistHelper(object):
"""
AxisArtistHelper should define
following method with given APIs. Note that the first axes argument
will be axes attribute of the caller artist.
# LINE (spinal line?)
def get_line(self, axes):
# path : Path
return path
def get_line_transform(self, axes):
# ...
# trans : transform
return trans
# LABEL
def get_label_pos(self, axes):
# x, y : position
return (x, y), trans
def get_label_offset_transform(self, \
axes,
pad_points, fontprops, renderer,
bboxes,
):
# va : vertical alignment
# ha : horizontal alignment
# a : angle
return trans, va, ha, a
# TICK
def get_tick_transform(self, axes):
return trans
def get_tick_iterators(self, axes):
# iter : iterable object that yields (c, angle, l) where
# c, angle, l is position, tick angle, and label
return iter_major, iter_minor
"""
class _Base(object):
"""
Base class for axis helper.
"""
def __init__(self):
"""
"""
self.delta1, self.delta2 = 0.00001, 0.00001
def update_lim(self, axes):
pass
class Fixed(_Base):
"""
Helper class for a fixed (in the axes coordinate) axis.
"""
_default_passthru_pt = dict(left=(0, 0),
right=(1, 0),
bottom=(0, 0),
top=(0, 1))
def __init__(self,
loc, nth_coord=None,
):
"""
nth_coord = along which coordinate value varies
in 2d, nth_coord = 0 -> x axis, nth_coord = 1 -> y axis
"""
self._loc = loc
if loc not in ["left", "right", "bottom", "top"]:
raise ValueError("%s" % loc)
if nth_coord is None:
if loc in ["left", "right"]:
nth_coord = 1
elif loc in ["bottom", "top"]:
nth_coord = 0
self.nth_coord = nth_coord
super(AxisArtistHelper.Fixed, self).__init__()
self.passthru_pt = self._default_passthru_pt[loc]
_verts = np.array([[0., 0.],
[1., 1.]])
fixed_coord = 1-nth_coord
_verts[:,fixed_coord] = self.passthru_pt[fixed_coord]
# axis line in transAxes
self._path = Path(_verts)
def get_nth_coord(self):
return self.nth_coord
# LINE
def get_line(self, axes):
return self._path
def get_line_transform(self, axes):
return axes.transAxes
# LABEL
def get_axislabel_transform(self, axes):
return axes.transAxes
def get_axislabel_pos_angle(self, axes):
"""
label reference position in transAxes.
get_label_transform() returns a transform of (transAxes+offset)
"""
loc = self._loc
pos, angle_tangent = dict(left=((0., 0.5), 90),
right=((1., 0.5), 90),
bottom=((0.5, 0.), 0),
top=((0.5, 1.), 0))[loc]
return pos, angle_tangent
# TICK
def get_tick_transform(self, axes):
trans_tick = [axes.get_xaxis_transform(),
axes.get_yaxis_transform()][self.nth_coord]
return trans_tick
class Floating(_Base):
def __init__(self, nth_coord,
value):
self.nth_coord = nth_coord
self._value = value
super(AxisArtistHelper.Floating,
self).__init__()
def get_nth_coord(self):
return self.nth_coord
def get_line(self, axes):
raise RuntimeError("get_line method should be defined by the derived class")
class AxisArtistHelperRectlinear(object):
class Fixed(AxisArtistHelper.Fixed):
def __init__(self,
axes, loc, nth_coord=None,
):
"""
nth_coord = along which coordinate value varies
in 2d, nth_coord = 0 -> x axis, nth_coord = 1 -> y axis
"""
super(AxisArtistHelperRectlinear.Fixed, self).__init__( \
loc, nth_coord)
self.axis = [axes.xaxis, axes.yaxis][self.nth_coord]
# TICK
def get_tick_iterators(self, axes):
"""tick_loc, tick_angle, tick_label"""
loc = self._loc
if loc in ["bottom", "top"]:
angle_normal, angle_tangent = 90, 0
else:
angle_normal, angle_tangent = 0, 90
major = self.axis.major
majorLocs = major.locator()
major.formatter.set_locs(majorLocs)
majorLabels = [major.formatter(val, i) for i, val in enumerate(majorLocs)]
minor = self.axis.minor
minorLocs = minor.locator()
minor.formatter.set_locs(minorLocs)
minorLabels = [minor.formatter(val, i) for i, val in enumerate(minorLocs)]
trans_tick = self.get_tick_transform(axes)
tr2ax = trans_tick + axes.transAxes.inverted()
def _f(locs, labels):
for x, l in zip(locs, labels):
c = list(self.passthru_pt) # copy
c[self.nth_coord] = x
# check if the tick point is inside axes
c2 = tr2ax.transform_point(c)
#delta=0.00001
if 0. -self.delta1<= c2[self.nth_coord] <= 1.+self.delta2:
yield c, angle_normal, angle_tangent, l
return _f(majorLocs, majorLabels), _f(minorLocs, minorLabels)
class Floating(AxisArtistHelper.Floating):
def __init__(self, axes, nth_coord,
passingthrough_point, axis_direction="bottom"):
super(AxisArtistHelperRectlinear.Floating, self).__init__( \
nth_coord, passingthrough_point)
self._axis_direction = axis_direction
self.axis = [axes.xaxis, axes.yaxis][self.nth_coord]
def get_line(self, axes):
_verts = np.array([[0., 0.],
[1., 1.]])
fixed_coord = 1-self.nth_coord
trans_passingthrough_point = axes.transData + axes.transAxes.inverted()
p = trans_passingthrough_point.transform_point([self._value,
self._value])
_verts[:,fixed_coord] = p[fixed_coord]
return Path(_verts)
def get_line_transform(self, axes):
return axes.transAxes
def get_axislabel_transform(self, axes):
return axes.transAxes
def get_axislabel_pos_angle(self, axes):
"""
label reference position in transAxes.
get_label_transform() returns a transform of (transAxes+offset)
"""
loc = self._axis_direction
#angle = dict(left=0,
# right=0,
# bottom=.5*np.pi,
# top=.5*np.pi)[loc]
if self.nth_coord == 0:
angle = 0
else:
angle = 90
_verts = [0.5, 0.5]
fixed_coord = 1-self.nth_coord
trans_passingthrough_point = axes.transData + axes.transAxes.inverted()
p = trans_passingthrough_point.transform_point([self._value,
self._value])
_verts[fixed_coord] = p[fixed_coord]
if not (0. <= _verts[fixed_coord] <= 1.):
return None, None
else:
return _verts, angle
def get_tick_transform(self, axes):
return axes.transData
def get_tick_iterators(self, axes):
"""tick_loc, tick_angle, tick_label"""
loc = self._axis_direction
if loc in ["bottom", "top"]:
angle_normal, angle_tangent = 90, 0
else:
angle_normal, angle_tangent = 0, 90
if self.nth_coord == 0:
angle_normal, angle_tangent = 90, 0
else:
angle_normal, angle_tangent = 0, 90
#angle = 90 - 90 * self.nth_coord
major = self.axis.major
majorLocs = major.locator()
major.formatter.set_locs(majorLocs)
majorLabels = [major.formatter(val, i) for i, val in enumerate(majorLocs)]
minor = self.axis.minor
minorLocs = minor.locator()
minor.formatter.set_locs(minorLocs)
minorLabels = [minor.formatter(val, i) for i, val in enumerate(minorLocs)]
tr2ax = axes.transData + axes.transAxes.inverted()
def _f(locs, labels):
for x, l in zip(locs, labels):
c = [self._value, self._value]
c[self.nth_coord] = x
c1, c2 = tr2ax.transform_point(c)
if 0. <= c1 <= 1. and 0. <= c2 <= 1.:
if 0. - self.delta1 <= [c1, c2][self.nth_coord] <= 1. + self.delta2:
yield c, angle_normal, angle_tangent, l
return _f(majorLocs, majorLabels), _f(minorLocs, minorLabels)
class GridHelperBase(object):
def __init__(self):
self._force_update = True
self._old_limits = None
super(GridHelperBase, self).__init__()
def update_lim(self, axes):
x1, x2 = axes.get_xlim()
y1, y2 = axes.get_ylim()
if self._force_update or self._old_limits != (x1, x2, y1, y2):
self._update(x1, x2, y1, y2)
self._force_update = False
self._old_limits = (x1, x2, y1, y2)
def _update(self, x1, x2, y1, y2):
pass
def invalidate(self):
self._force_update = True
def valid(self):
return not self._force_update
def get_gridlines(self, which, axis):
"""
Return list of grid lines as a list of paths (list of points).
*which* : "major" or "minor"
*axis* : "both", "x" or "y"
"""
return []
def new_gridlines(self, ax):
"""
Create and return a new GridlineCollection instance.
*which* : "major" or "minor"
*axis* : "both", "x" or "y"
"""
gridlines = GridlinesCollection(None, transform=ax.transData,
colors=rcParams['grid.color'],
linestyles=rcParams['grid.linestyle'],
linewidths=rcParams['grid.linewidth'])
ax._set_artist_props(gridlines)
gridlines.set_grid_helper(self)
ax.axes._set_artist_props(gridlines)
# gridlines.set_clip_path(self.axes.patch)
# set_clip_path need to be deferred after Axes.cla is completed.
# It is done inside the cla.
return gridlines
class GridHelperRectlinear(GridHelperBase):
def __init__(self, axes):
super(GridHelperRectlinear, self).__init__()
self.axes = axes
def new_fixed_axis(self, loc,
nth_coord=None,
axis_direction=None,
offset=None,
axes=None,
):
if axes is None:
warnings.warn("'new_fixed_axis' explicitly requires the axes keyword.")
axes = self.axes
_helper = AxisArtistHelperRectlinear.Fixed(axes, loc, nth_coord)
if axis_direction is None:
axis_direction = loc
axisline = AxisArtist(axes, _helper, offset=offset,
axis_direction=axis_direction,
)
return axisline
def new_floating_axis(self, nth_coord, value,
axis_direction="bottom",
axes=None,
):
if axes is None:
warnings.warn("'new_floating_axis' explicitly requires the axes keyword.")
axes = self.axes
passthrough_point = (value, value)
transform = axes.transData
_helper = AxisArtistHelperRectlinear.Floating( \
axes, nth_coord, value, axis_direction)
axisline = AxisArtist(axes, _helper)
axisline.line.set_clip_on(True)
axisline.line.set_clip_box(axisline.axes.bbox)
return axisline
def get_gridlines(self, which="major", axis="both"):
"""
return list of gridline coordinates in data coordinates.
*which* : "major" or "minor"
*axis* : "both", "x" or "y"
"""
gridlines = []
if axis in ["both", "x"]:
locs = []
y1, y2 = self.axes.get_ylim()
#if self.axes.xaxis._gridOnMajor:
if which in ["both", "major"]:
locs.extend(self.axes.xaxis.major.locator())
#if self.axes.xaxis._gridOnMinor:
if which in ["both", "minor"]:
locs.extend(self.axes.xaxis.minor.locator())
for x in locs:
gridlines.append([[x, x], [y1, y2]])
if axis in ["both", "y"]:
x1, x2 = self.axes.get_xlim()
locs = []
if self.axes.yaxis._gridOnMajor:
#if which in ["both", "major"]:
locs.extend(self.axes.yaxis.major.locator())
if self.axes.yaxis._gridOnMinor:
#if which in ["both", "minor"]:
locs.extend(self.axes.yaxis.minor.locator())
for y in locs:
gridlines.append([[x1, x2], [y, y]])
return gridlines
class SimpleChainedObjects(object):
def __init__(self, objects):
self._objects = objects
def __getattr__(self, k):
_a = SimpleChainedObjects([getattr(a, k) for a in self._objects])
return _a
def __call__(self, *kl, **kwargs):
for m in self._objects:
m(*kl, **kwargs)
class Axes(maxes.Axes):
class AxisDict(dict):
def __init__(self, axes):
self.axes = axes
super(Axes.AxisDict, self).__init__()
def __getitem__(self, k):
if isinstance(k, tuple):
r = SimpleChainedObjects([dict.__getitem__(self, k1) for k1 in k])
return r
elif isinstance(k, slice):
if k.start == None and k.stop == None and k.step == None:
r = SimpleChainedObjects(list(six.itervalues(self)))
return r
else:
raise ValueError("Unsupported slice")
else:
return dict.__getitem__(self, k)
def __call__(self, *v, **kwargs):
return maxes.Axes.axis(self.axes, *v, **kwargs)
def __init__(self, *kl, **kw):
helper = kw.pop("grid_helper", None)
self._axisline_on = True
if helper:
self._grid_helper = helper
else:
self._grid_helper = GridHelperRectlinear(self)
super(Axes, self).__init__(*kl, **kw)
self.toggle_axisline(True)
def toggle_axisline(self, b=None):
if b is None:
b = not self._axisline_on
if b:
self._axisline_on = True
for s in self.spines.values():
s.set_visible(False)
self.xaxis.set_visible(False)
self.yaxis.set_visible(False)
else:
self._axisline_on = False
for s in self.spines.values():
s.set_visible(True)
self.xaxis.set_visible(True)
self.yaxis.set_visible(True)
def _init_axis(self):
super(Axes, self)._init_axis()
def _init_axis_artists(self, axes=None):
if axes is None:
axes = self
self._axislines = self.AxisDict(self)
new_fixed_axis = self.get_grid_helper().new_fixed_axis
for loc in ["bottom", "top", "left", "right"]:
self._axislines[loc] = new_fixed_axis(loc=loc, axes=axes,
axis_direction=loc)
for axisline in [self._axislines["top"], self._axislines["right"]]:
axisline.label.set_visible(False)
axisline.major_ticklabels.set_visible(False)
axisline.minor_ticklabels.set_visible(False)
def _get_axislines(self):
return self._axislines
axis = property(_get_axislines)
def new_gridlines(self, grid_helper=None):
"""
Create and return a new GridlineCollection instance.
*which* : "major" or "minor"
*axis* : "both", "x" or "y"
"""
if grid_helper is None:
grid_helper = self.get_grid_helper()
gridlines = grid_helper.new_gridlines(self)
return gridlines
def _init_gridlines(self, grid_helper=None):
# It is done inside the cla.
gridlines = self.new_gridlines(grid_helper)
self.gridlines = gridlines
def cla(self):
# gridlines need to b created before cla() since cla calls grid()
self._init_gridlines()
super(Axes, self).cla()
# the clip_path should be set after Axes.cla() since that's
# when a patch is created.
self.gridlines.set_clip_path(self.axes.patch)
self._init_axis_artists()
def get_grid_helper(self):
return self._grid_helper
def grid(self, b=None, which='major', axis="both", **kwargs):
"""
Toggle the gridlines, and optionally set the properties of the lines.
"""
# their are some discrepancy between the behavior of grid in
# axes_grid and the original mpl's grid, because axes_grid
# explicitly set the visibility of the gridlines.
super(Axes, self).grid(b, which=which, axis=axis, **kwargs)
if not self._axisline_on:
return
if b is None:
if self.axes.xaxis._gridOnMinor or self.axes.xaxis._gridOnMajor or \
self.axes.yaxis._gridOnMinor or self.axes.yaxis._gridOnMajor:
b=True
else:
b=False
self.gridlines.set_which(which)
self.gridlines.set_axis(axis)
self.gridlines.set_visible(b)
if len(kwargs):
martist.setp(self.gridlines, **kwargs)
def get_children(self):
if self._axisline_on:
children = list(six.itervalues(self._axislines)) + [self.gridlines]
else:
children = []
children.extend(super(Axes, self).get_children())
return children
def invalidate_grid_helper(self):
self._grid_helper.invalidate()
def new_fixed_axis(self, loc, offset=None):
gh = self.get_grid_helper()
axis = gh.new_fixed_axis(loc,
nth_coord=None,
axis_direction=None,
offset=offset,
axes=self,
)
return axis
def new_floating_axis(self, nth_coord, value,
axis_direction="bottom",
):
gh = self.get_grid_helper()
axis = gh.new_floating_axis(nth_coord, value,
axis_direction=axis_direction,
axes=self)
return axis
def draw(self, renderer, inframe=False):
if not self._axisline_on:
super(Axes, self).draw(renderer, inframe)
return
orig_artists = self.artists
self.artists = self.artists + list(self._axislines.values()) + [self.gridlines]
super(Axes, self).draw(renderer, inframe)
self.artists = orig_artists
def get_tightbbox(self, renderer, call_axes_locator=True):
bb0 = super(Axes, self).get_tightbbox(renderer, call_axes_locator)
if not self._axisline_on:
return bb0
bb = [bb0]
for axisline in list(six.itervalues(self._axislines)):
if not axisline.get_visible():
continue
bb.append(axisline.get_tightbbox(renderer))
# if axisline.label.get_visible():
# bb.append(axisline.label.get_window_extent(renderer))
# if axisline.major_ticklabels.get_visible():
# bb.extend(axisline.major_ticklabels.get_window_extents(renderer))
# if axisline.minor_ticklabels.get_visible():
# bb.extend(axisline.minor_ticklabels.get_window_extents(renderer))
# if axisline.major_ticklabels.get_visible() or \
# axisline.minor_ticklabels.get_visible():
# bb.append(axisline.offsetText.get_window_extent(renderer))
#bb.extend([c.get_window_extent(renderer) for c in artists \
# if c.get_visible()])
_bbox = Bbox.union([b for b in bb if b and (b.width!=0 or b.height!=0)])
return _bbox
Subplot = maxes.subplot_class_factory(Axes)
class AxesZero(Axes):
def __init__(self, *kl, **kw):
super(AxesZero, self).__init__(*kl, **kw)
def _init_axis_artists(self):
super(AxesZero, self)._init_axis_artists()
new_floating_axis = self._grid_helper.new_floating_axis
xaxis_zero = new_floating_axis(nth_coord=0,
value=0.,
axis_direction="bottom",
axes=self)
xaxis_zero.line.set_clip_path(self.patch)
xaxis_zero.set_visible(False)
self._axislines["xzero"] = xaxis_zero
yaxis_zero = new_floating_axis(nth_coord=1,
value=0.,
axis_direction="left",
axes=self)
yaxis_zero.line.set_clip_path(self.patch)
yaxis_zero.set_visible(False)
self._axislines["yzero"] = yaxis_zero
SubplotZero = maxes.subplot_class_factory(AxesZero)
if 0:
#if __name__ == "__main__":
import matplotlib.pyplot as plt
fig = plt.figure(1, (4,3))
ax = SubplotZero(fig, 1, 1, 1)
fig.add_subplot(ax)
ax.axis["xzero"].set_visible(True)
ax.axis["xzero"].label.set_text("Axis Zero")
for n in ["top", "right"]:
ax.axis[n].set_visible(False)
xx = np.arange(0, 2*np.pi, 0.01)
ax.plot(xx, np.sin(xx))
ax.set_ylabel("Test")
plt.draw()
plt.show()
if __name__ == "__main__":
#if 1:
import matplotlib.pyplot as plt
fig = plt.figure(1, (4,3))
ax = Subplot(fig, 1, 1, 1)
fig.add_subplot(ax)
xx = np.arange(0, 2*np.pi, 0.01)
ax.plot(xx, np.sin(xx))
ax.set_ylabel("Test")
ax.axis["top"].major_ticks.set_tick_out(True) #set_tick_direction("out")
ax.axis["bottom"].major_ticks.set_tick_out(True) #set_tick_direction("out")
#ax.axis["bottom"].set_tick_direction("in")
ax.axis["bottom"].set_label("Tk0")
plt.draw()
plt.show()
| mit |
chuckchen/spark | python/pyspark/sql/group.py | 23 | 10681 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
from pyspark.sql.column import Column, _to_seq
from pyspark.sql.dataframe import DataFrame
from pyspark.sql.pandas.group_ops import PandasGroupedOpsMixin
from pyspark.sql.types import StructType, StructField, IntegerType, StringType
__all__ = ["GroupedData"]
def dfapi(f):
def _api(self):
name = f.__name__
jdf = getattr(self._jgd, name)()
return DataFrame(jdf, self.sql_ctx)
_api.__name__ = f.__name__
_api.__doc__ = f.__doc__
return _api
def df_varargs_api(f):
def _api(self, *cols):
name = f.__name__
jdf = getattr(self._jgd, name)(_to_seq(self.sql_ctx._sc, cols))
return DataFrame(jdf, self.sql_ctx)
_api.__name__ = f.__name__
_api.__doc__ = f.__doc__
return _api
class GroupedData(PandasGroupedOpsMixin):
"""
A set of methods for aggregations on a :class:`DataFrame`,
created by :func:`DataFrame.groupBy`.
.. versionadded:: 1.3
"""
def __init__(self, jgd, df):
self._jgd = jgd
self._df = df
self.sql_ctx = df.sql_ctx
def agg(self, *exprs):
"""Compute aggregates and returns the result as a :class:`DataFrame`.
The available aggregate functions can be:
1. built-in aggregation functions, such as `avg`, `max`, `min`, `sum`, `count`
2. group aggregate pandas UDFs, created with :func:`pyspark.sql.functions.pandas_udf`
.. note:: There is no partial aggregation with group aggregate UDFs, i.e.,
a full shuffle is required. Also, all the data of a group will be loaded into
memory, so the user should be aware of the potential OOM risk if data is skewed
and certain groups are too large to fit in memory.
.. seealso:: :func:`pyspark.sql.functions.pandas_udf`
If ``exprs`` is a single :class:`dict` mapping from string to string, then the key
is the column to perform aggregation on, and the value is the aggregate function.
Alternatively, ``exprs`` can also be a list of aggregate :class:`Column` expressions.
.. versionadded:: 1.3.0
Parameters
----------
exprs : dict
a dict mapping from column name (string) to aggregate functions (string),
or a list of :class:`Column`.
Notes
-----
Built-in aggregation functions and group aggregate pandas UDFs cannot be mixed
in a single call to this function.
Examples
--------
>>> gdf = df.groupBy(df.name)
>>> sorted(gdf.agg({"*": "count"}).collect())
[Row(name='Alice', count(1)=1), Row(name='Bob', count(1)=1)]
>>> from pyspark.sql import functions as F
>>> sorted(gdf.agg(F.min(df.age)).collect())
[Row(name='Alice', min(age)=2), Row(name='Bob', min(age)=5)]
>>> from pyspark.sql.functions import pandas_udf, PandasUDFType
>>> @pandas_udf('int', PandasUDFType.GROUPED_AGG) # doctest: +SKIP
... def min_udf(v):
... return v.min()
>>> sorted(gdf.agg(min_udf(df.age)).collect()) # doctest: +SKIP
[Row(name='Alice', min_udf(age)=2), Row(name='Bob', min_udf(age)=5)]
"""
assert exprs, "exprs should not be empty"
if len(exprs) == 1 and isinstance(exprs[0], dict):
jdf = self._jgd.agg(exprs[0])
else:
# Columns
assert all(isinstance(c, Column) for c in exprs), "all exprs should be Column"
jdf = self._jgd.agg(exprs[0]._jc,
_to_seq(self.sql_ctx._sc, [c._jc for c in exprs[1:]]))
return DataFrame(jdf, self.sql_ctx)
@dfapi
def count(self):
"""Counts the number of records for each group.
.. versionadded:: 1.3.0
Examples
--------
>>> sorted(df.groupBy(df.age).count().collect())
[Row(age=2, count=1), Row(age=5, count=1)]
"""
@df_varargs_api
def mean(self, *cols):
"""Computes average values for each numeric columns for each group.
:func:`mean` is an alias for :func:`avg`.
.. versionadded:: 1.3.0
Parameters
----------
cols : str
column names. Non-numeric columns are ignored.
Examples
--------
>>> df.groupBy().mean('age').collect()
[Row(avg(age)=3.5)]
>>> df3.groupBy().mean('age', 'height').collect()
[Row(avg(age)=3.5, avg(height)=82.5)]
"""
@df_varargs_api
def avg(self, *cols):
"""Computes average values for each numeric columns for each group.
:func:`mean` is an alias for :func:`avg`.
.. versionadded:: 1.3.0
Parameters
----------
cols : str
column names. Non-numeric columns are ignored.
Examples
--------
>>> df.groupBy().avg('age').collect()
[Row(avg(age)=3.5)]
>>> df3.groupBy().avg('age', 'height').collect()
[Row(avg(age)=3.5, avg(height)=82.5)]
"""
@df_varargs_api
def max(self, *cols):
"""Computes the max value for each numeric columns for each group.
.. versionadded:: 1.3.0
Examples
--------
>>> df.groupBy().max('age').collect()
[Row(max(age)=5)]
>>> df3.groupBy().max('age', 'height').collect()
[Row(max(age)=5, max(height)=85)]
"""
@df_varargs_api
def min(self, *cols):
"""Computes the min value for each numeric column for each group.
.. versionadded:: 1.3.0
Parameters
----------
cols : str
column names. Non-numeric columns are ignored.
Examples
--------
>>> df.groupBy().min('age').collect()
[Row(min(age)=2)]
>>> df3.groupBy().min('age', 'height').collect()
[Row(min(age)=2, min(height)=80)]
"""
@df_varargs_api
def sum(self, *cols):
"""Compute the sum for each numeric columns for each group.
.. versionadded:: 1.3.0
Parameters
----------
cols : str
column names. Non-numeric columns are ignored.
Examples
--------
>>> df.groupBy().sum('age').collect()
[Row(sum(age)=7)]
>>> df3.groupBy().sum('age', 'height').collect()
[Row(sum(age)=7, sum(height)=165)]
"""
def pivot(self, pivot_col, values=None):
"""
Pivots a column of the current :class:`DataFrame` and perform the specified aggregation.
There are two versions of pivot function: one that requires the caller to specify the list
of distinct values to pivot on, and one that does not. The latter is more concise but less
efficient, because Spark needs to first compute the list of distinct values internally.
.. versionadded:: 1.6.0
Parameters
----------
pivot_col : str
Name of the column to pivot.
values :
List of values that will be translated to columns in the output DataFrame.
Examples
--------
# Compute the sum of earnings for each year by course with each course as a separate column
>>> df4.groupBy("year").pivot("course", ["dotNET", "Java"]).sum("earnings").collect()
[Row(year=2012, dotNET=15000, Java=20000), Row(year=2013, dotNET=48000, Java=30000)]
# Or without specifying column values (less efficient)
>>> df4.groupBy("year").pivot("course").sum("earnings").collect()
[Row(year=2012, Java=20000, dotNET=15000), Row(year=2013, Java=30000, dotNET=48000)]
>>> df5.groupBy("sales.year").pivot("sales.course").sum("sales.earnings").collect()
[Row(year=2012, Java=20000, dotNET=15000), Row(year=2013, Java=30000, dotNET=48000)]
"""
if values is None:
jgd = self._jgd.pivot(pivot_col)
else:
jgd = self._jgd.pivot(pivot_col, values)
return GroupedData(jgd, self._df)
def _test():
import doctest
from pyspark.sql import Row, SparkSession
import pyspark.sql.group
globs = pyspark.sql.group.__dict__.copy()
spark = SparkSession.builder\
.master("local[4]")\
.appName("sql.group tests")\
.getOrCreate()
sc = spark.sparkContext
globs['sc'] = sc
globs['spark'] = spark
globs['df'] = sc.parallelize([(2, 'Alice'), (5, 'Bob')]) \
.toDF(StructType([StructField('age', IntegerType()),
StructField('name', StringType())]))
globs['df3'] = sc.parallelize([Row(name='Alice', age=2, height=80),
Row(name='Bob', age=5, height=85)]).toDF()
globs['df4'] = sc.parallelize([Row(course="dotNET", year=2012, earnings=10000),
Row(course="Java", year=2012, earnings=20000),
Row(course="dotNET", year=2012, earnings=5000),
Row(course="dotNET", year=2013, earnings=48000),
Row(course="Java", year=2013, earnings=30000)]).toDF()
globs['df5'] = sc.parallelize([
Row(training="expert", sales=Row(course="dotNET", year=2012, earnings=10000)),
Row(training="junior", sales=Row(course="Java", year=2012, earnings=20000)),
Row(training="expert", sales=Row(course="dotNET", year=2012, earnings=5000)),
Row(training="junior", sales=Row(course="dotNET", year=2013, earnings=48000)),
Row(training="expert", sales=Row(course="Java", year=2013, earnings=30000))]).toDF()
(failure_count, test_count) = doctest.testmod(
pyspark.sql.group, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE | doctest.REPORT_NDIFF)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 |
ghislainp/iris | docs/iris/example_code/Meteorology/TEC.py | 12 | 1054 | """
Ionosphere space weather
========================
This space weather example plots a filled contour of rotated pole point
data with a shaded relief image underlay. The plot shows aggregated
vertical electron content in the ionosphere.
The plot exhibits an interesting outline effect due to excluding data
values below a certain threshold.
"""
import matplotlib.pyplot as plt
import numpy.ma as ma
import iris
import iris.plot as iplt
import iris.quickplot as qplt
def main():
# Load the "total electron content" cube.
filename = iris.sample_data_path('space_weather.nc')
cube = iris.load_cube(filename, 'total electron content')
# Explicitly mask negative electron content.
cube.data = ma.masked_less(cube.data, 0)
# Plot the cube using one hundred colour levels.
qplt.contourf(cube, 100)
plt.title('Total Electron Content')
plt.xlabel('longitude / degrees')
plt.ylabel('latitude / degrees')
plt.gca().stock_img()
plt.gca().coastlines()
iplt.show()
if __name__ == '__main__':
main()
| gpl-3.0 |
ycaihua/scikit-learn | examples/exercises/plot_iris_exercise.py | 323 | 1602 | """
================================
SVM Exercise
================================
A tutorial exercise for using different SVM kernels.
This exercise is used in the :ref:`using_kernels_tut` part of the
:ref:`supervised_learning_tut` section of the :ref:`stat_learn_tut_index`.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets, svm
iris = datasets.load_iris()
X = iris.data
y = iris.target
X = X[y != 0, :2]
y = y[y != 0]
n_sample = len(X)
np.random.seed(0)
order = np.random.permutation(n_sample)
X = X[order]
y = y[order].astype(np.float)
X_train = X[:.9 * n_sample]
y_train = y[:.9 * n_sample]
X_test = X[.9 * n_sample:]
y_test = y[.9 * n_sample:]
# fit the model
for fig_num, kernel in enumerate(('linear', 'rbf', 'poly')):
clf = svm.SVC(kernel=kernel, gamma=10)
clf.fit(X_train, y_train)
plt.figure(fig_num)
plt.clf()
plt.scatter(X[:, 0], X[:, 1], c=y, zorder=10, cmap=plt.cm.Paired)
# Circle out the test data
plt.scatter(X_test[:, 0], X_test[:, 1], s=80, facecolors='none', zorder=10)
plt.axis('tight')
x_min = X[:, 0].min()
x_max = X[:, 0].max()
y_min = X[:, 1].min()
y_max = X[:, 1].max()
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.decision_function(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
plt.pcolormesh(XX, YY, Z > 0, cmap=plt.cm.Paired)
plt.contour(XX, YY, Z, colors=['k', 'k', 'k'], linestyles=['--', '-', '--'],
levels=[-.5, 0, .5])
plt.title(kernel)
plt.show()
| bsd-3-clause |
minxuancao/shogun | examples/undocumented/python_modular/graphical/interactive_gp_demo.py | 10 | 14176 | #
# This program is free software you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation either version 3 of the License, or
# (at your option) any later version.
#
# Written (C) 2012 Heiko Strathmann, based on interactive_svm_demo by Christian
# Widmer which itself is based on PyQT Demo by Eli Bendersky
#
"""
Shogun Gaussian processes demo based on interactive SVM demo by Christian \
Widmer and Soeren Sonnenburg which itself is based on PyQT Demo by Eli Bendersky
Work to be done on parameter (e.g. kernel width) optimization.
Heiko Strathmann/Cameron Lai
License: GPLv3
"""
import sys, os, csv
import scipy as SP
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from numpy import *
import matplotlib
from matplotlib.colorbar import make_axes, Colorbar
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QT as NavigationToolbar
from matplotlib.figure import Figure
from modshogun import *
from modshogun import *
from modshogun import *
import util
class Form(QMainWindow):
def __init__(self, parent=None):
super(Form, self).__init__(parent)
self.setWindowTitle('SHOGUN interactive demo')
self.series_list_model = QStandardItemModel()
self.create_menu()
self.create_main_frame()
self.create_status_bar()
self.create_toy_data()
self.on_show()
def on_show(self):
self.axes.clear()
self.axes.plot(self.x, self.y, 'ro')
self.axes.set_xlim((self.xmin,self.xmax))
self.axes.set_ylim((self.ymin,self.ymax))
self.axes.grid(True)
self.canvas.draw()
self.fill_series_list(self.get_stats())
def on_about(self):
msg = __doc__
QMessageBox.about(self, "About the demo", msg.strip())
def fill_series_list(self, names):
self.series_list_model.clear()
for name in names:
item = QStandardItem(name)
item.setCheckState(Qt.Unchecked)
item.setCheckable(False)
self.series_list_model.appendRow(item)
def onclick(self, event):
print 'button=%d, x=%d, y=%d, xdata=%f, ydata=%f'%(event.button, event.x, event.y, event.xdata, event.ydata)
x=SP.append(self.x, event.xdata)
self.y=SP.append(self.y, event.ydata)
self.x= x[:,SP.newaxis]
self.on_show()
self.status_text.setText("New data point: x=%f, y=%f"%(event.xdata, event.ydata))
def create_menu(self):
self.file_menu = self.menuBar().addMenu("&File")
#load_action = self.create_action("&Load file",
# shortcut="Ctrl+L", slot=self.load_file, tip="Load a file")
quit_action = self.create_action("&Quit", slot=self.close,
shortcut="Ctrl+Q", tip="Close the application")
#self.add_actions(self.file_menu,
# (load_action, None, quit_action))
self.help_menu = self.menuBar().addMenu("&Help")
about_action = self.create_action("&About",
shortcut='F1', slot=self.on_about,
tip='About the demo')
self.add_actions(self.help_menu, (about_action,))
def clear_data(self):
self.x=SP.array([])
self.y=SP.array([])
self.xmin=-5
self.xmax=5
self.ymin=-5
self.ymax=5
self.on_show()
self.status_text.setText("Data cleared")
def enable_widgets(self):
kernel_name = self.kernel_combo.currentText()
if kernel_name == "Linear":
self.sigma.setDisabled(True)
self.degree.setDisabled(True)
elif kernel_name == "Polynomial":
self.sigma.setDisabled(True)
self.degree.setEnabled(True)
elif kernel_name == "Gaussian":
self.sigma.setEnabled(True)
self.degree.setDisabled(True)
def get_stats(self):
num_train = len(self.x)
str_train = "num training points: %i" % num_train
str_test = "num training points: %s" % self.nTest.text()
return (str_train, str_test)
def create_toy_data(self):
#0. generate Toy-Data; just samples from a superposition of a sin + linear trend
x = SP.arange(self.xmin,self.xmax,(self.xmax-self.xmin)/100.0)
C = 2 #offset
b = 0
y = b*x + C + float(self.sine_amplitude.text())*SP.sin(float(self.sine_freq.text())*x)
# dy = b + 1*SP.cos(x)
y += float(self.noise_level.text())*random.randn(y.shape[0])
self.y=y-y.mean()
self.x= x[:,SP.newaxis]
self.on_show()
def learn_kernel_width(self):
root=ModelSelectionParameters();
c1=ModelSelectionParameters("inference_method", inf);
root.append_child(c1);
c2 = ModelSelectionParameters("scale");
c1.append_child(c2);
c2.build_values(0.01, 4.0, R_LINEAR);
c3 = ModelSelectionParameters("likelihood_model", likelihood);
c1.append_child(c3);
c4=ModelSelectionParameters("sigma");
c3.append_child(c4);
c4.build_values(0.001, 4.0, R_LINEAR);
c5 =ModelSelectionParameters("kernel", SECF);
c1.append_child(c5);
c6 =ModelSelectionParameters("width");
c5.append_child(c6);
c6.build_values(0.001, 4.0, R_LINEAR);
crit = GradientCriterion();
grad=GradientEvaluation(gp, feat_train, labels, crit);
grad.set_function(inf);
gp.print_modsel_params();
root.print_tree();
grad_search=GradientModelSelection(root, grad);
grad.set_autolock(0);
best_combination=grad_search.select_model(1);
self.sigma.setText("1.0")
self.plot_gp()
def plot_gp(self):
feat_train = RealFeatures(self.x.T)
labels = RegressionLabels(self.y)
#[x,y]=self.data.get_data()
#feat_train=RealFeatures(x.T)
#labels=RegressionLabels(y)
n_dimensions = 1
kernel_name = self.kernel_combo.currentText()
print "current kernel is %s" % (kernel_name)
#new interface with likelihood parametres being decoupled from the covaraince function
likelihood = GaussianLikelihood()
#covar_parms = SP.log([2])
#hyperparams = {'covar':covar_parms,'lik':SP.log([1])}
# construct covariance function
width=float(self.sigma.text())
degree=int(self.degree.text())
if kernel_name == "Linear":
gk = LinearKernel(feat_train, feat_train)
gk.set_normalizer(IdentityKernelNormalizer())
elif kernel_name == "Polynomial":
gk = PolyKernel(feat_train, feat_train, degree, True)
gk.set_normalizer(IdentityKernelNormalizer())
elif kernel_name == "Gaussian":
gk = GaussianKernel(feat_train, feat_train, width)
#SECF = GaussianKernel(feat_train, feat_train, width)
#covar = SECF
zmean = ZeroMean();
inf = ExactInferenceMethod(gk, feat_train, zmean, labels, likelihood);
inf.get_negative_marginal_likelihood()
# location of unispaced predictions
x_test = array([linspace(self.xmin,self.xmax, self.nTest.text())])
feat_test=RealFeatures(x_test)
gp = GaussianProcessRegression(inf)
gp.train()
covariance = gp.get_variance_vector(feat_test)
predictions = gp.get_mean_vector(feat_test)
#print "x_test"
#print feat_test.get_feature_matrix()
#print "mean predictions"
#print predictions.get_labels()
#print "covariances"
#print covariance.get_labels()
self.status_text.setText("Negative Log Marginal Likelihood = %f"%(inf.get_negative_marginal_likelihood()))
self.axes.clear()
self.axes.grid(True)
self.axes.set_xlim((self.xmin,self.xmax))
self.axes.set_ylim((self.ymin,self.ymax))
self.axes.hold(True)
x_test=feat_test.get_feature_matrix()[0]
self.axes.plot(x_test, predictions, 'b-x')
#self.axes.plot(x_test, labels.get_labels(), 'ro')
self.axes.plot(self.x, self.y, 'ro')
#self.axes.plot(feat_test.get_feature_matrix()[0], predictions.get_labels()-3*sqrt(covariance.get_labels()))
#self.axes.plot(feat_test.get_feature_matrix()[0], predictions.get_labels()+3*sqrt(covariance.get_labels()))
upper = predictions+3*sqrt(covariance)
lower = predictions-3*sqrt(covariance)
self.axes.fill_between(x_test, lower, upper, color='grey')
self.axes.hold(False)
self.canvas.draw()
self.fill_series_list(self.get_stats())
def create_main_frame(self):
self.xmin=-5
self.xmax=5
self.ymin=-5
self.ymax=5
self.main_frame = QWidget()
plot_frame = QWidget()
self.dpi = 100
self.fig = Figure((6.0, 6.0), dpi=self.dpi)
self.canvas = FigureCanvas(self.fig)
self.canvas.setParent(self.main_frame)
cid = self.canvas.mpl_connect('button_press_event', self.onclick)
self.axes = self.fig.add_subplot(111)
self.cax = None
#self.mpl_toolbar = NavigationToolbar(self.canvas, self.main_frame)
self.kernel_combo = QComboBox()
self.kernel_combo.insertItem(-1, "Gaussian")
self.kernel_combo.insertItem(-1, "Polynomial")
self.kernel_combo.insertItem(-1, "Linear")
self.kernel_combo.maximumSize = QSize(300, 50)
self.connect(self.kernel_combo, SIGNAL("currentIndexChanged(QString)"), self.enable_widgets)
log_label = QLabel("Data points")
self.series_list_view = QListView()
self.series_list_view.setModel(self.series_list_model)
self.sine_freq = QLineEdit()
self.sine_freq.setText("1.0")
self.sine_amplitude = QLineEdit()
self.sine_amplitude.setText("1.0")
self.sigma = QLineEdit()
self.sigma.setText("1.2")
self.degree = QLineEdit()
self.degree.setText("2")
self.noise_level = QLineEdit()
self.noise_level.setText("1")
self.nTest = QLineEdit()
self.nTest.setText("100")
spins_hbox = QHBoxLayout()
spins_hbox.addWidget(QLabel('Sine data setting: '))
spins_hbox.addWidget(QLabel('Sine Freq.'))
spins_hbox.addWidget(self.sine_freq)
spins_hbox.addWidget(QLabel('Sine Amplitude'))
spins_hbox.addWidget(self.sine_amplitude)
spins_hbox.addWidget(QLabel('Noise Level'))
spins_hbox.addWidget(self.noise_level)
spins_hbox.addStretch(1)
spins_hbox2 = QHBoxLayout()
spins_hbox2.addWidget(QLabel('Kernel Setting: '))
spins_hbox2.addWidget(QLabel('Type'))
spins_hbox2.addWidget(self.kernel_combo)
spins_hbox2.addWidget(QLabel("Width"))
spins_hbox2.addWidget(self.sigma)
spins_hbox2.addWidget(QLabel("Degree"))
spins_hbox2.addWidget(self.degree)
spins_hbox2.addStretch(1)
spins_hbox3 = QHBoxLayout()
spins_hbox3.addWidget(QLabel('Test Setting: '))
spins_hbox3.addWidget(QLabel('Number of test points'))
spins_hbox3.addWidget(self.nTest)
spins_hbox3.addStretch(1)
self.show_button = QPushButton("&Train GP")
self.connect(self.show_button, SIGNAL('clicked()'), self.plot_gp)
self.gen_sine_data_button = QPushButton("&Generate Sine Data")
self.connect(self.gen_sine_data_button, SIGNAL('clicked()'), self.create_toy_data)
self.clear_data_button = QPushButton("&Clear")
self.connect(self.clear_data_button, SIGNAL('clicked()'), self.clear_data)
self.learn_kernel_button = QPushButton("&Learn Kernel Width and train GP")
self.connect(self.learn_kernel_button, SIGNAL('clicked()'), self.learn_kernel_width)
left_vbox = QVBoxLayout()
left_vbox.addWidget(self.canvas)
#left_vbox.addWidget(self.mpl_toolbar)
right0_vbox = QVBoxLayout()
right0_vbox.addWidget(QLabel("Data Points"))
right0_vbox.addWidget(self.series_list_view)
#right0_vbox.addWidget(self.legend_cb)
right0_vbox.addStretch(1)
right2_vbox = QVBoxLayout()
right2_vbox.addWidget(QLabel("Settings"))
right2_vbox.addWidget(self.gen_sine_data_button)
right2_vbox.addWidget(self.clear_data_button)
right2_vbox.addWidget(self.show_button)
#right2_vbox.addWidget(self.learn_kernel_button)
right2_vbox.addLayout(spins_hbox)
right2_vbox.addLayout(spins_hbox2)
right2_vbox.addLayout(spins_hbox3)
right2_vbox.addStretch(1)
right_vbox = QHBoxLayout()
right_vbox.addLayout(right0_vbox)
right_vbox.addLayout(right2_vbox)
hbox = QVBoxLayout()
hbox.addLayout(left_vbox)
hbox.addLayout(right_vbox)
self.main_frame.setLayout(hbox)
self.setCentralWidget(self.main_frame)
self.enable_widgets()
def create_status_bar(self):
self.status_text = QLabel("")
self.statusBar().addWidget(self.status_text, 1)
def add_actions(self, target, actions):
for action in actions:
if action is None:
target.addSeparator()
else:
target.addAction(action)
def create_action( self, text, slot=None, shortcut=None,
icon=None, tip=None, checkable=False,
signal="triggered()"):
action = QAction(text, self)
if icon is not None:
action.setIcon(QIcon(":/%s.png" % icon))
if shortcut is not None:
action.setShortcut(shortcut)
if tip is not None:
action.setToolTip(tip)
action.setStatusTip(tip)
if slot is not None:
self.connect(action, SIGNAL(signal), slot)
if checkable:
action.setCheckable(True)
return action
def main():
app = QApplication(sys.argv)
form = Form()
form.show()
app.exec_()
if __name__ == "__main__":
main()
| gpl-3.0 |
pluskid/mxnet | example/bayesian-methods/bdk_demo.py | 15 | 15051 | from __future__ import print_function
import mxnet as mx
import mxnet.ndarray as nd
import numpy
import logging
import matplotlib.pyplot as plt
from scipy.stats import gaussian_kde
import argparse
from algos import *
from data_loader import *
from utils import *
class CrossEntropySoftmax(mx.operator.NumpyOp):
def __init__(self):
super(CrossEntropySoftmax, self).__init__(False)
def list_arguments(self):
return ['data', 'label']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
data_shape = in_shape[0]
label_shape = in_shape[0]
output_shape = in_shape[0]
return [data_shape, label_shape], [output_shape]
def forward(self, in_data, out_data):
x = in_data[0]
y = out_data[0]
y[:] = numpy.exp(x - x.max(axis=1).reshape((x.shape[0], 1))).astype('float32')
y /= y.sum(axis=1).reshape((x.shape[0], 1))
def backward(self, out_grad, in_data, out_data, in_grad):
l = in_data[1]
y = out_data[0]
dx = in_grad[0]
dx[:] = (y - l)
class LogSoftmax(mx.operator.NumpyOp):
def __init__(self):
super(LogSoftmax, self).__init__(False)
def list_arguments(self):
return ['data', 'label']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
data_shape = in_shape[0]
label_shape = in_shape[0]
output_shape = in_shape[0]
return [data_shape, label_shape], [output_shape]
def forward(self, in_data, out_data):
x = in_data[0]
y = out_data[0]
y[:] = (x - x.max(axis=1, keepdims=True)).astype('float32')
y -= numpy.log(numpy.exp(y).sum(axis=1, keepdims=True)).astype('float32')
# y[:] = numpy.exp(x - x.max(axis=1).reshape((x.shape[0], 1)))
# y /= y.sum(axis=1).reshape((x.shape[0], 1))
def backward(self, out_grad, in_data, out_data, in_grad):
l = in_data[1]
y = out_data[0]
dx = in_grad[0]
dx[:] = (numpy.exp(y) - l).astype('float32')
def classification_student_grad(student_outputs, teacher_pred):
return [student_outputs[0] - teacher_pred]
def regression_student_grad(student_outputs, teacher_pred, teacher_noise_precision):
student_mean = student_outputs[0]
student_var = student_outputs[1]
grad_mean = nd.exp(-student_var) * (student_mean - teacher_pred)
grad_var = (1 - nd.exp(-student_var) * (nd.square(student_mean - teacher_pred)
+ 1.0 / teacher_noise_precision)) / 2
return [grad_mean, grad_var]
def get_mnist_sym(output_op=None, num_hidden=400):
net = mx.symbol.Variable('data')
net = mx.symbol.FullyConnected(data=net, name='mnist_fc1', num_hidden=num_hidden)
net = mx.symbol.Activation(data=net, name='mnist_relu1', act_type="relu")
net = mx.symbol.FullyConnected(data=net, name='mnist_fc2', num_hidden=num_hidden)
net = mx.symbol.Activation(data=net, name='mnist_relu2', act_type="relu")
net = mx.symbol.FullyConnected(data=net, name='mnist_fc3', num_hidden=10)
if output_op is None:
net = mx.symbol.SoftmaxOutput(data=net, name='softmax')
else:
net = output_op(data=net, name='softmax')
return net
def synthetic_grad(X, theta, sigma1, sigma2, sigmax, rescale_grad=1.0, grad=None):
if grad is None:
grad = nd.empty(theta.shape, theta.context)
theta1 = theta.asnumpy()[0]
theta2 = theta.asnumpy()[1]
v1 = sigma1 ** 2
v2 = sigma2 ** 2
vx = sigmax ** 2
denominator = numpy.exp(-(X - theta1) ** 2 / (2 * vx)) + numpy.exp(
-(X - theta1 - theta2) ** 2 / (2 * vx))
grad_npy = numpy.zeros(theta.shape)
grad_npy[0] = -rescale_grad * ((numpy.exp(-(X - theta1) ** 2 / (2 * vx)) * (X - theta1) / vx
+ numpy.exp(-(X - theta1 - theta2) ** 2 / (2 * vx)) * (
X - theta1 - theta2) / vx) / denominator).sum() \
+ theta1 / v1
grad_npy[1] = -rescale_grad * ((numpy.exp(-(X - theta1 - theta2) ** 2 / (2 * vx)) * (
X - theta1 - theta2) / vx) / denominator).sum() \
+ theta2 / v2
grad[:] = grad_npy
return grad
def get_toy_sym(teacher=True, teacher_noise_precision=None):
if teacher:
net = mx.symbol.Variable('data')
net = mx.symbol.FullyConnected(data=net, name='teacher_fc1', num_hidden=100)
net = mx.symbol.Activation(data=net, name='teacher_relu1', act_type="relu")
net = mx.symbol.FullyConnected(data=net, name='teacher_fc2', num_hidden=1)
net = mx.symbol.LinearRegressionOutput(data=net, name='teacher_output',
grad_scale=teacher_noise_precision)
else:
net = mx.symbol.Variable('data')
net = mx.symbol.FullyConnected(data=net, name='student_fc1', num_hidden=100)
net = mx.symbol.Activation(data=net, name='student_relu1', act_type="relu")
student_mean = mx.symbol.FullyConnected(data=net, name='student_mean', num_hidden=1)
student_var = mx.symbol.FullyConnected(data=net, name='student_var', num_hidden=1)
net = mx.symbol.Group([student_mean, student_var])
return net
def dev():
return mx.gpu()
def run_mnist_SGD(training_num=50000):
X, Y, X_test, Y_test = load_mnist(training_num)
minibatch_size = 100
net = get_mnist_sym()
data_shape = (minibatch_size,) + X.shape[1::]
data_inputs = {'data': nd.zeros(data_shape, ctx=dev()),
'softmax_label': nd.zeros((minibatch_size,), ctx=dev())}
initializer = mx.init.Xavier(factor_type="in", magnitude=2.34)
exe, exe_params, _ = SGD(sym=net, dev=dev(), data_inputs=data_inputs, X=X, Y=Y,
X_test=X_test, Y_test=Y_test,
total_iter_num=1000000,
initializer=initializer,
lr=5E-6, prior_precision=1.0, minibatch_size=100)
def run_mnist_SGLD(training_num=50000):
X, Y, X_test, Y_test = load_mnist(training_num)
minibatch_size = 100
net = get_mnist_sym()
data_shape = (minibatch_size,) + X.shape[1::]
data_inputs = {'data': nd.zeros(data_shape, ctx=dev()),
'softmax_label': nd.zeros((minibatch_size,), ctx=dev())}
initializer = mx.init.Xavier(factor_type="in", magnitude=2.34)
exe, sample_pool = SGLD(sym=net, dev=dev(), data_inputs=data_inputs, X=X, Y=Y,
X_test=X_test, Y_test=Y_test,
total_iter_num=1000000,
initializer=initializer,
learning_rate=4E-6, prior_precision=1.0, minibatch_size=100,
thin_interval=100, burn_in_iter_num=1000)
def run_mnist_DistilledSGLD(training_num=50000):
X, Y, X_test, Y_test = load_mnist(training_num)
minibatch_size = 100
if training_num >= 10000:
num_hidden = 800
total_iter_num = 1000000
teacher_learning_rate = 1E-6
student_learning_rate = 0.0001
teacher_prior = 1
student_prior = 0.1
perturb_deviation = 0.1
else:
num_hidden = 400
total_iter_num = 20000
teacher_learning_rate = 4E-5
student_learning_rate = 0.0001
teacher_prior = 1
student_prior = 0.1
perturb_deviation = 0.001
teacher_net = get_mnist_sym(num_hidden=num_hidden)
logsoftmax = LogSoftmax()
student_net = get_mnist_sym(output_op=logsoftmax, num_hidden=num_hidden)
data_shape = (minibatch_size,) + X.shape[1::]
teacher_data_inputs = {'data': nd.zeros(data_shape, ctx=dev()),
'softmax_label': nd.zeros((minibatch_size,), ctx=dev())}
student_data_inputs = {'data': nd.zeros(data_shape, ctx=dev()),
'softmax_label': nd.zeros((minibatch_size, 10), ctx=dev())}
teacher_initializer = BiasXavier(factor_type="in", magnitude=1)
student_initializer = BiasXavier(factor_type="in", magnitude=1)
student_exe, student_params, _ = \
DistilledSGLD(teacher_sym=teacher_net, student_sym=student_net,
teacher_data_inputs=teacher_data_inputs,
student_data_inputs=student_data_inputs,
X=X, Y=Y, X_test=X_test, Y_test=Y_test, total_iter_num=total_iter_num,
student_initializer=student_initializer,
teacher_initializer=teacher_initializer,
student_optimizing_algorithm="adam",
teacher_learning_rate=teacher_learning_rate,
student_learning_rate=student_learning_rate,
teacher_prior_precision=teacher_prior, student_prior_precision=student_prior,
perturb_deviation=perturb_deviation, minibatch_size=100, dev=dev())
def run_toy_SGLD():
X, Y, X_test, Y_test = load_toy()
minibatch_size = 1
teacher_noise_precision = 1.0 / 9.0
net = get_toy_sym(True, teacher_noise_precision)
data_shape = (minibatch_size,) + X.shape[1::]
data_inputs = {'data': nd.zeros(data_shape, ctx=dev()),
'teacher_output_label': nd.zeros((minibatch_size, 1), ctx=dev())}
initializer = mx.init.Uniform(0.07)
exe, params, _ = \
SGLD(sym=net, data_inputs=data_inputs,
X=X, Y=Y, X_test=X_test, Y_test=Y_test, total_iter_num=50000,
initializer=initializer,
learning_rate=1E-4,
# lr_scheduler=mx.lr_scheduler.FactorScheduler(100000, 0.5),
prior_precision=0.1,
burn_in_iter_num=1000,
thin_interval=10,
task='regression',
minibatch_size=minibatch_size, dev=dev())
def run_toy_DistilledSGLD():
X, Y, X_test, Y_test = load_toy()
minibatch_size = 1
teacher_noise_precision = 1.0
teacher_net = get_toy_sym(True, teacher_noise_precision)
student_net = get_toy_sym(False)
data_shape = (minibatch_size,) + X.shape[1::]
teacher_data_inputs = {'data': nd.zeros(data_shape, ctx=dev()),
'teacher_output_label': nd.zeros((minibatch_size, 1), ctx=dev())}
student_data_inputs = {'data': nd.zeros(data_shape, ctx=dev())}
# 'softmax_label': nd.zeros((minibatch_size, 10), ctx=dev())}
teacher_initializer = mx.init.Uniform(0.07)
student_initializer = mx.init.Uniform(0.07)
student_grad_f = lambda student_outputs, teacher_pred: \
regression_student_grad(student_outputs, teacher_pred, teacher_noise_precision)
student_exe, student_params, _ = \
DistilledSGLD(teacher_sym=teacher_net, student_sym=student_net,
teacher_data_inputs=teacher_data_inputs,
student_data_inputs=student_data_inputs,
X=X, Y=Y, X_test=X_test, Y_test=Y_test, total_iter_num=80000,
teacher_initializer=teacher_initializer,
student_initializer=student_initializer,
teacher_learning_rate=1E-4, student_learning_rate=0.01,
# teacher_lr_scheduler=mx.lr_scheduler.FactorScheduler(100000, 0.5),
student_lr_scheduler=mx.lr_scheduler.FactorScheduler(8000, 0.8),
student_grad_f=student_grad_f,
teacher_prior_precision=0.1, student_prior_precision=0.001,
perturb_deviation=0.1, minibatch_size=minibatch_size, task='regression',
dev=dev())
def run_toy_HMC():
X, Y, X_test, Y_test = load_toy()
minibatch_size = Y.shape[0]
noise_precision = 1 / 9.0
net = get_toy_sym(True, noise_precision)
data_shape = (minibatch_size,) + X.shape[1::]
data_inputs = {'data': nd.zeros(data_shape, ctx=dev()),
'teacher_output_label': nd.zeros((minibatch_size, 1), ctx=dev())}
initializer = mx.init.Uniform(0.07)
sample_pool = HMC(net, data_inputs=data_inputs, X=X, Y=Y, X_test=X_test, Y_test=Y_test,
sample_num=300000, initializer=initializer, prior_precision=1.0,
learning_rate=1E-3, L=10, dev=dev())
def run_synthetic_SGLD():
theta1 = 0
theta2 = 1
sigma1 = numpy.sqrt(10)
sigma2 = 1
sigmax = numpy.sqrt(2)
X = load_synthetic(theta1=theta1, theta2=theta2, sigmax=sigmax, num=100)
minibatch_size = 1
total_iter_num = 1000000
lr_scheduler = SGLDScheduler(begin_rate=0.01, end_rate=0.0001, total_iter_num=total_iter_num,
factor=0.55)
optimizer = mx.optimizer.create('sgld',
learning_rate=None,
rescale_grad=1.0,
lr_scheduler=lr_scheduler,
wd=0)
updater = mx.optimizer.get_updater(optimizer)
theta = mx.random.normal(0, 1, (2,), mx.cpu())
grad = nd.empty((2,), mx.cpu())
samples = numpy.zeros((2, total_iter_num))
start = time.time()
for i in xrange(total_iter_num):
if (i + 1) % 100000 == 0:
end = time.time()
print("Iter:%d, Time spent: %f" % (i + 1, end - start))
start = time.time()
ind = numpy.random.randint(0, X.shape[0])
synthetic_grad(X[ind], theta, sigma1, sigma2, sigmax, rescale_grad=
X.shape[0] / float(minibatch_size), grad=grad)
updater('theta', grad, theta)
samples[:, i] = theta.asnumpy()
plt.hist2d(samples[0, :], samples[1, :], (200, 200), cmap=plt.cm.jet)
plt.colorbar()
plt.show()
if __name__ == '__main__':
numpy.random.seed(100)
mx.random.seed(100)
parser = argparse.ArgumentParser(
description="Examples in the paper [NIPS2015]Bayesian Dark Knowledge and "
"[ICML2011]Bayesian Learning via Stochastic Gradient Langevin Dynamics")
parser.add_argument("-d", "--dataset", type=int, default=1,
help="Dataset to use. 0 --> TOY, 1 --> MNIST, 2 --> Synthetic Data in "
"the SGLD paper")
parser.add_argument("-l", "--algorithm", type=int, default=2,
help="Type of algorithm to use. 0 --> SGD, 1 --> SGLD, other-->DistilledSGLD")
parser.add_argument("-t", "--training", type=int, default=50000,
help="Number of training samples")
args = parser.parse_args()
training_num = args.training
if args.dataset == 1:
if 0 == args.algorithm:
run_mnist_SGD(training_num)
elif 1 == args.algorithm:
run_mnist_SGLD(training_num)
else:
run_mnist_DistilledSGLD(training_num)
elif args.dataset == 0:
if 1 == args.algorithm:
run_toy_SGLD()
elif 2 == args.algorithm:
run_toy_DistilledSGLD()
elif 3 == args.algorithm:
run_toy_HMC()
else:
run_synthetic_SGLD()
| apache-2.0 |
srowen/spark | python/pyspark/pandas/tests/test_utils.py | 15 | 3850 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pandas as pd
from pyspark.pandas.utils import (
lazy_property,
validate_arguments_and_invoke_function,
validate_bool_kwarg,
)
from pyspark.testing.pandasutils import PandasOnSparkTestCase
from pyspark.testing.sqlutils import SQLTestUtils
some_global_variable = 0
class UtilsTest(PandasOnSparkTestCase, SQLTestUtils):
# a dummy to_html version with an extra parameter that pandas does not support
# used in test_validate_arguments_and_invoke_function
def to_html(self, max_rows=None, unsupported_param=None):
args = locals()
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}, index=[0, 1, 3])
validate_arguments_and_invoke_function(pdf, self.to_html, pd.DataFrame.to_html, args)
def to_clipboard(self, sep=",", **kwargs):
args = locals()
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}, index=[0, 1, 3])
validate_arguments_and_invoke_function(
pdf, self.to_clipboard, pd.DataFrame.to_clipboard, args
)
# Support for **kwargs
self.to_clipboard(sep=",", index=False)
def test_validate_arguments_and_invoke_function(self):
# This should pass and run fine
self.to_html()
self.to_html(unsupported_param=None)
self.to_html(max_rows=5)
# This should fail because we are explicitly setting an unsupported param
# to a non-default value
with self.assertRaises(TypeError):
self.to_html(unsupported_param=1)
def test_lazy_property(self):
obj = TestClassForLazyProp()
# If lazy prop is not working, the second test would fail (because it'd be 2)
self.assert_eq(obj.lazy_prop, 1)
self.assert_eq(obj.lazy_prop, 1)
def test_validate_bool_kwarg(self):
# This should pass and run fine
pandas_on_spark = True
self.assert_eq(validate_bool_kwarg(pandas_on_spark, "pandas_on_spark"), True)
pandas_on_spark = False
self.assert_eq(validate_bool_kwarg(pandas_on_spark, "pandas_on_spark"), False)
pandas_on_spark = None
self.assert_eq(validate_bool_kwarg(pandas_on_spark, "pandas_on_spark"), None)
# This should fail because we are explicitly setting a non-boolean value
pandas_on_spark = "true"
with self.assertRaisesRegex(
TypeError, 'For argument "pandas_on_spark" expected type bool, received type str.'
):
validate_bool_kwarg(pandas_on_spark, "pandas_on_spark")
class TestClassForLazyProp:
def __init__(self):
self.some_variable = 0
@lazy_property
def lazy_prop(self):
self.some_variable += 1
return self.some_variable
if __name__ == "__main__":
import unittest
from pyspark.pandas.tests.test_utils import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| apache-2.0 |
dkazanc/TomoPhantom | Demos/Python/3D/Object3D.py | 1 | 2274 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Note that the TomoPhantom package is released under Apache License, Version 2.0
Script to generate 3D analytical objects and their projection data
Recursively adding objects one can build a required model with the corresponding projection data
@author: Daniil Kazantsev
"""
import numpy as np
import matplotlib.pyplot as plt
from tomophantom import TomoP3D
from tomophantom.TomoP3D import Objects3D
N3D_size = 256
# specify object parameters, here we replicate model
obj3D_1 = {'Obj': Objects3D.GAUSSIAN,
'C0' : 1.0,
'x0' :-0.25,
'y0' : -0.15,
'z0' : 0.0,
'a' : 0.3,
'b' : 0.2,
'c' : 0.3,
'phi1' : 35.0}
obj3D_2 = {'Obj': Objects3D.CUBOID,
'C0' : 1.00,
'x0' :0.1,
'y0' : 0.2,
'z0' : 0.0,
'a' : 0.15,
'b' : 0.35,
'c' : 0.6,
'phi1' : -60.0}
print ("Building 3D object using TomoPhantom software")
myObjects = [obj3D_1, obj3D_2] # dictionary of objects
Object3D = TomoP3D.Object(N3D_size, myObjects)
sliceSel = int(0.5*N3D_size)
#plt.gray()
plt.figure()
plt.subplot(131)
plt.imshow(Object3D[sliceSel,:,:],vmin=0, vmax=1)
plt.title('3D Object, axial view')
plt.subplot(132)
plt.imshow(Object3D[:,sliceSel,:],vmin=0, vmax=1)
plt.title('3D Object, coronal view')
plt.subplot(133)
plt.imshow(Object3D[:,:,sliceSel],vmin=0, vmax=1)
plt.title('3D Object, sagittal view')
plt.show()
#%%
Horiz_det = int(np.sqrt(2)*N3D_size) # detector column count (horizontal)
Vert_det = N3D_size # detector row count (vertical) (no reason for it to be > N)
angles_num = int(0.5*np.pi*N3D_size); # angles number
angles = np.linspace(0.0,179.9,angles_num,dtype='float32') # in degrees
print ("Building 3D analytical projection data with TomoPhantom")
ProjData3D = TomoP3D.ObjectSino(N3D_size, Horiz_det, Vert_det, angles, myObjects)
intens_max = 60
sliceSel = 150
plt.figure()
plt.subplot(131)
plt.imshow(ProjData3D[:,sliceSel,:],vmin=0, vmax=intens_max)
plt.title('2D Projection (analytical)')
plt.subplot(132)
plt.imshow(ProjData3D[sliceSel,:,:],vmin=0, vmax=intens_max)
plt.title('Sinogram view')
plt.subplot(133)
plt.imshow(ProjData3D[:,:,sliceSel],vmin=0, vmax=intens_max)
plt.title('Tangentogram view')
plt.show()
#%%
| apache-2.0 |
nicolas998/wmf | wmf/plots.py | 2 | 6066 | #!plots.py: conjunto de herramientas para hacer plots de resultados de simulacion
#!Copyright (C) <2018> <Nicolas Velasquez Giron>
#!This program is free software: you can redistribute it and/or modify
#!it under the terms of the GNU General Public License as published by
#!the Free Software Foundation, either version 3 of the License, or
#!(at your option) any later version.
#!This program is distributed in the hope that it will be useful,
#!but WITHOUT ANY WARRANTY; without even the implied warranty of
#!MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#!GNU General Public License for more details.
#!You should have received a copy of the GNU General Public License
#!along with this program. If not, see <http://www.gnu.org/licenses/>.
#Algo
import pandas as pd
import numpy as np
import plotly.graph_objs as go
from plotly import tools
from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot
ColorsDefault = {'blue1': 'rgb(7,255,240)',
'blue2': 'rgb(24,142,172)',
'blue3': 'rgb(44,110,154)',
'green1': 'rgb(12,249,4)',
'green2': 'rgb(30,154,59)',
'green3': 'rgb(47,111,62)',
'red1': 'rgb(7,255,240)',
'red2': 'rgb(7,255,240)',
'red3': 'rgb(7,255,240)',
'amber1': 'rgb(247,143,23)',
'amber2': 'rgb(218,120,64)',
'amber3': 'rgb(182,86,62)'}
def Plot_Streamflow(StreamDic, Rainfall = None, colors = ColorsDefault,
**kwargs):
'''Function to plot streamflow data with dates axis and
rainfall
Parameters:
- Dates: pandas index dates format.
- StreamDic: Dictionary with the data and plot properties:
ej: {'Q1':{data:np.array(data),
'dates': pd.Series.index,
'color': 'rgb(30,114,36)',
'lw': 4, 'ls': '--'}}
Optional:
- Rainfall: Dictionary with rainfall data with the same
structure as StreamDic.'''
#Data definition
cont = 0
data = []
for k in StreamDic.keys():
#Set del trace
try:
setColor = StreamDic[k]['color']
except:
setColor = np.random.choice(list(ColorsDefault.keys()),1)
setColor = ColorsDefault[setColor[0]]
try:
setWeight = StreamDic[k]['lw']
except:
setWeight = kwargs.get('lw',4)
try:
setLineStyle = StreamDic[k]['ls']
except:
setLineStyle = kwargs.get('ls',None)
#Traces definitions
trace = go.Scatter(
x=StreamDic[k]['dates'],
y=StreamDic[k]['data'],
name = k,
line = dict(color = setColor,
width = setWeight,
dash = setLineStyle),
opacity = 1.0)
#Update data
data.append(trace)
#Rainfall
if type(Rainfall) == dict:
trace = go.Scatter(
x = Rainfall['dates'],
y = Rainfall['data'],
line = dict(color = 'blue'),
opacity = 0.8,
yaxis='y2',
fill='tozeroy',
fillcolor='rgba(0, 102, 153,0.2)'
)
data.append(trace)
#Layout definition
layout = dict(showlegend = False,
xaxis = dict(
title='Dates'),
yaxis=dict(
title="Streamflow [m3/s]"),
yaxis2=dict(autorange="reversed",
title="Caudal [m3/s]",
overlaying ='y',
side='right')
)
fig = dict(data=data, layout=layout)
iplot(fig)
def Plot_DurationCurve(StreamDic, Rainfall = None, colors = ColorsDefault,
Pinf = 0.2, Psup = 99.8, Nint = 50, **kwargs):
'''Function to plot streamflow data with dates axis and
rainfall
Parameters:
- Dates: pandas index dates format.
- StreamDic: Dictionary with the data and plot properties:
ej: {'Q1':{data:np.array(data),
'color': 'rgb(30,114,36)',
'lw': 4, 'ls': '--'}}
Optional:
- Pinf: Inferior percentile (0.2)
- Psup: Superior percentile (99.8)
- Nint: Total intervals (50)
- Rainfall: Dictionary with rainfall data with the same
structure as StreamDic.'''
#Obtains the excedance probability and
def GetExcedProb(X):
Qexc = []
for p in np.linspace(Pinf,Psup,Nint):
Qexc.append(np.percentile(X, p))
return Qexc, np.linspace(Pinf,Psup,Nint)[::-1]/100.
#Data definition
cont = 0
data = []
for k in StreamDic.keys():
#Set del trace
try:
setColor = StreamDic[k]['color']
except:
setColor = np.random.choice(list(ColorsDefault.keys()),1)
setColor = ColorsDefault[setColor[0]]
try:
setWeight = StreamDic[k]['lw']
except:
setWeight = kwargs.get('lw',4)
try:
setLineStyle = StreamDic[k]['ls']
except:
setLineStyle = kwargs.get('ls',None)
#Values and P(x>X)
Qexc, P = GetExcedProb(StreamDic[k]['data'])
#Traces definitions
trace = go.Scatter(
x=P,
y=Qexc,
name = k,
line = dict(color = setColor,
width = setWeight,
dash = setLineStyle),
opacity = 1.0)
#Update data
data.append(trace)
#Layout definition
layout = dict(showlegend = False,
xaxis = dict(
title='P(x>X)',
tickfont=dict(
color='rgb(0, 102, 153)',
size = 16),
titlefont=dict(
color='rgb(0, 102, 153)',
size = 20),
),
yaxis=dict(
title="Streamflow [m3/s]",
tickfont=dict(
color='rgb(0, 102, 153)',
size = 16),
titlefont=dict(
color='rgb(0, 102, 153)',
size = 20),
),
)
fig = dict(data=data, layout=layout)
iplot(fig)
| gpl-3.0 |
deanmalmgren/scrubadub | tests/test_detector_address.py | 1 | 1830 | import pandas as pd
import zipfile
import pathlib
import requests
import unittest
import warnings
import scrubadub
class AddressTestCase(unittest.TestCase):
def setUp(self) -> None:
import scrubadub.detectors.address
def tearDown(self) -> None:
if scrubadub.detectors.AddressDetector.name in scrubadub.detectors.detector_configuration:
del scrubadub.detectors.detector_configuration[scrubadub.detectors.AddressDetector.name]
def test_bad_locale(self):
"""test a non existant region"""
with self.assertRaises(ValueError):
scrubadub.detectors.AddressDetector(locale='non_existant')
def test_not_implemented_locale(self):
"""test a non existant region"""
scrubber = scrubadub.Scrubber(locale='fr_FR')
with warnings.catch_warnings():
warnings.simplefilter("error")
with self.assertRaises(UserWarning):
scrubber.add_detector(scrubadub.detectors.AddressDetector)
def test_gb(self):
"""test a simple matching"""
to_test = [
# positive assertions
("59 High Road, East Finchley London, N2 8AW", True),
("25 Fenchurch Avenue, London EC3M 5AD", True),
("12 Street Road, London N1P 2FZ", True),
("99 New London Road,\nChelmsford, CM2 0PP", True)
]
test_str = 'this is a {} test string'
detector = scrubadub.detectors.AddressDetector(locale='en_GB')
for address, result in to_test:
matches = list(detector.iter_filth(test_str.format(address)))
if result:
self.assertEquals(1, len(matches), "Unable to match " + address)
self.assertEquals(matches[0].text, address)
else:
self.assertEquals(matches, [])
| mit |
dwillmer/numpy | numpy/core/fromnumeric.py | 9 | 98023 | """Module containing non-deprecated functions borrowed from Numeric.
"""
from __future__ import division, absolute_import, print_function
import types
import warnings
import numpy as np
from .. import VisibleDeprecationWarning
from . import multiarray as mu
from . import umath as um
from . import numerictypes as nt
from .numeric import asarray, array, asanyarray, concatenate
from . import _methods
_dt_ = nt.sctype2char
# functions that are methods
__all__ = [
'alen', 'all', 'alltrue', 'amax', 'amin', 'any', 'argmax',
'argmin', 'argpartition', 'argsort', 'around', 'choose', 'clip',
'compress', 'cumprod', 'cumproduct', 'cumsum', 'diagonal', 'mean',
'ndim', 'nonzero', 'partition', 'prod', 'product', 'ptp', 'put',
'rank', 'ravel', 'repeat', 'reshape', 'resize', 'round_',
'searchsorted', 'shape', 'size', 'sometrue', 'sort', 'squeeze',
'std', 'sum', 'swapaxes', 'take', 'trace', 'transpose', 'var',
]
try:
_gentype = types.GeneratorType
except AttributeError:
_gentype = type(None)
# save away Python sum
_sum_ = sum
# functions that are now methods
def _wrapit(obj, method, *args, **kwds):
try:
wrap = obj.__array_wrap__
except AttributeError:
wrap = None
result = getattr(asarray(obj), method)(*args, **kwds)
if wrap:
if not isinstance(result, mu.ndarray):
result = asarray(result)
result = wrap(result)
return result
def _wrapfunc(obj, method, *args, **kwds):
try:
return getattr(obj, method)(*args, **kwds)
# An AttributeError occurs if the object does not have
# such a method in its class.
# A TypeError occurs if the object does have such a method
# in its class, but its signature is not identical to that
# of NumPy's. This situation has occurred in the case of
# a downstream library like 'pandas'.
except (AttributeError, TypeError):
return _wrapit(obj, method, *args, **kwds)
def take(a, indices, axis=None, out=None, mode='raise'):
"""
Take elements from an array along an axis.
This function does the same thing as "fancy" indexing (indexing arrays
using arrays); however, it can be easier to use if you need elements
along a given axis.
Parameters
----------
a : array_like
The source array.
indices : array_like
The indices of the values to extract.
.. versionadded:: 1.8.0
Also allow scalars for indices.
axis : int, optional
The axis over which to select values. By default, the flattened
input array is used.
out : ndarray, optional
If provided, the result will be placed in this array. It should
be of the appropriate shape and dtype.
mode : {'raise', 'wrap', 'clip'}, optional
Specifies how out-of-bounds indices will behave.
* 'raise' -- raise an error (default)
* 'wrap' -- wrap around
* 'clip' -- clip to the range
'clip' mode means that all indices that are too large are replaced
by the index that addresses the last element along that axis. Note
that this disables indexing with negative numbers.
Returns
-------
subarray : ndarray
The returned array has the same type as `a`.
See Also
--------
compress : Take elements using a boolean mask
ndarray.take : equivalent method
Examples
--------
>>> a = [4, 3, 5, 7, 6, 8]
>>> indices = [0, 1, 4]
>>> np.take(a, indices)
array([4, 3, 6])
In this example if `a` is an ndarray, "fancy" indexing can be used.
>>> a = np.array(a)
>>> a[indices]
array([4, 3, 6])
If `indices` is not one dimensional, the output also has these dimensions.
>>> np.take(a, [[0, 1], [2, 3]])
array([[4, 3],
[5, 7]])
"""
return _wrapfunc(a, 'take', indices, axis=axis, out=out, mode=mode)
# not deprecated --- copy if necessary, view otherwise
def reshape(a, newshape, order='C'):
"""
Gives a new shape to an array without changing its data.
Parameters
----------
a : array_like
Array to be reshaped.
newshape : int or tuple of ints
The new shape should be compatible with the original shape. If
an integer, then the result will be a 1-D array of that length.
One shape dimension can be -1. In this case, the value is
inferred from the length of the array and remaining dimensions.
order : {'C', 'F', 'A'}, optional
Read the elements of `a` using this index order, and place the
elements into the reshaped array using this index order. 'C'
means to read / write the elements using C-like index order,
with the last axis index changing fastest, back to the first
axis index changing slowest. 'F' means to read / write the
elements using Fortran-like index order, with the first index
changing fastest, and the last index changing slowest. Note that
the 'C' and 'F' options take no account of the memory layout of
the underlying array, and only refer to the order of indexing.
'A' means to read / write the elements in Fortran-like index
order if `a` is Fortran *contiguous* in memory, C-like order
otherwise.
Returns
-------
reshaped_array : ndarray
This will be a new view object if possible; otherwise, it will
be a copy. Note there is no guarantee of the *memory layout* (C- or
Fortran- contiguous) of the returned array.
See Also
--------
ndarray.reshape : Equivalent method.
Notes
-----
It is not always possible to change the shape of an array without
copying the data. If you want an error to be raise if the data is copied,
you should assign the new shape to the shape attribute of the array::
>>> a = np.zeros((10, 2))
# A transpose make the array non-contiguous
>>> b = a.T
# Taking a view makes it possible to modify the shape without modifying
# the initial object.
>>> c = b.view()
>>> c.shape = (20)
AttributeError: incompatible shape for a non-contiguous array
The `order` keyword gives the index ordering both for *fetching* the values
from `a`, and then *placing* the values into the output array.
For example, let's say you have an array:
>>> a = np.arange(6).reshape((3, 2))
>>> a
array([[0, 1],
[2, 3],
[4, 5]])
You can think of reshaping as first raveling the array (using the given
index order), then inserting the elements from the raveled array into the
new array using the same kind of index ordering as was used for the
raveling.
>>> np.reshape(a, (2, 3)) # C-like index ordering
array([[0, 1, 2],
[3, 4, 5]])
>>> np.reshape(np.ravel(a), (2, 3)) # equivalent to C ravel then C reshape
array([[0, 1, 2],
[3, 4, 5]])
>>> np.reshape(a, (2, 3), order='F') # Fortran-like index ordering
array([[0, 4, 3],
[2, 1, 5]])
>>> np.reshape(np.ravel(a, order='F'), (2, 3), order='F')
array([[0, 4, 3],
[2, 1, 5]])
Examples
--------
>>> a = np.array([[1,2,3], [4,5,6]])
>>> np.reshape(a, 6)
array([1, 2, 3, 4, 5, 6])
>>> np.reshape(a, 6, order='F')
array([1, 4, 2, 5, 3, 6])
>>> np.reshape(a, (3,-1)) # the unspecified value is inferred to be 2
array([[1, 2],
[3, 4],
[5, 6]])
"""
return _wrapfunc(a, 'reshape', newshape, order=order)
def choose(a, choices, out=None, mode='raise'):
"""
Construct an array from an index array and a set of arrays to choose from.
First of all, if confused or uncertain, definitely look at the Examples -
in its full generality, this function is less simple than it might
seem from the following code description (below ndi =
`numpy.lib.index_tricks`):
``np.choose(a,c) == np.array([c[a[I]][I] for I in ndi.ndindex(a.shape)])``.
But this omits some subtleties. Here is a fully general summary:
Given an "index" array (`a`) of integers and a sequence of `n` arrays
(`choices`), `a` and each choice array are first broadcast, as necessary,
to arrays of a common shape; calling these *Ba* and *Bchoices[i], i =
0,...,n-1* we have that, necessarily, ``Ba.shape == Bchoices[i].shape``
for each `i`. Then, a new array with shape ``Ba.shape`` is created as
follows:
* if ``mode=raise`` (the default), then, first of all, each element of
`a` (and thus `Ba`) must be in the range `[0, n-1]`; now, suppose that
`i` (in that range) is the value at the `(j0, j1, ..., jm)` position
in `Ba` - then the value at the same position in the new array is the
value in `Bchoices[i]` at that same position;
* if ``mode=wrap``, values in `a` (and thus `Ba`) may be any (signed)
integer; modular arithmetic is used to map integers outside the range
`[0, n-1]` back into that range; and then the new array is constructed
as above;
* if ``mode=clip``, values in `a` (and thus `Ba`) may be any (signed)
integer; negative integers are mapped to 0; values greater than `n-1`
are mapped to `n-1`; and then the new array is constructed as above.
Parameters
----------
a : int array
This array must contain integers in `[0, n-1]`, where `n` is the number
of choices, unless ``mode=wrap`` or ``mode=clip``, in which cases any
integers are permissible.
choices : sequence of arrays
Choice arrays. `a` and all of the choices must be broadcastable to the
same shape. If `choices` is itself an array (not recommended), then
its outermost dimension (i.e., the one corresponding to
``choices.shape[0]``) is taken as defining the "sequence".
out : array, optional
If provided, the result will be inserted into this array. It should
be of the appropriate shape and dtype.
mode : {'raise' (default), 'wrap', 'clip'}, optional
Specifies how indices outside `[0, n-1]` will be treated:
* 'raise' : an exception is raised
* 'wrap' : value becomes value mod `n`
* 'clip' : values < 0 are mapped to 0, values > n-1 are mapped to n-1
Returns
-------
merged_array : array
The merged result.
Raises
------
ValueError: shape mismatch
If `a` and each choice array are not all broadcastable to the same
shape.
See Also
--------
ndarray.choose : equivalent method
Notes
-----
To reduce the chance of misinterpretation, even though the following
"abuse" is nominally supported, `choices` should neither be, nor be
thought of as, a single array, i.e., the outermost sequence-like container
should be either a list or a tuple.
Examples
--------
>>> choices = [[0, 1, 2, 3], [10, 11, 12, 13],
... [20, 21, 22, 23], [30, 31, 32, 33]]
>>> np.choose([2, 3, 1, 0], choices
... # the first element of the result will be the first element of the
... # third (2+1) "array" in choices, namely, 20; the second element
... # will be the second element of the fourth (3+1) choice array, i.e.,
... # 31, etc.
... )
array([20, 31, 12, 3])
>>> np.choose([2, 4, 1, 0], choices, mode='clip') # 4 goes to 3 (4-1)
array([20, 31, 12, 3])
>>> # because there are 4 choice arrays
>>> np.choose([2, 4, 1, 0], choices, mode='wrap') # 4 goes to (4 mod 4)
array([20, 1, 12, 3])
>>> # i.e., 0
A couple examples illustrating how choose broadcasts:
>>> a = [[1, 0, 1], [0, 1, 0], [1, 0, 1]]
>>> choices = [-10, 10]
>>> np.choose(a, choices)
array([[ 10, -10, 10],
[-10, 10, -10],
[ 10, -10, 10]])
>>> # With thanks to Anne Archibald
>>> a = np.array([0, 1]).reshape((2,1,1))
>>> c1 = np.array([1, 2, 3]).reshape((1,3,1))
>>> c2 = np.array([-1, -2, -3, -4, -5]).reshape((1,1,5))
>>> np.choose(a, (c1, c2)) # result is 2x3x5, res[0,:,:]=c1, res[1,:,:]=c2
array([[[ 1, 1, 1, 1, 1],
[ 2, 2, 2, 2, 2],
[ 3, 3, 3, 3, 3]],
[[-1, -2, -3, -4, -5],
[-1, -2, -3, -4, -5],
[-1, -2, -3, -4, -5]]])
"""
return _wrapfunc(a, 'choose', choices, out=out, mode=mode)
def repeat(a, repeats, axis=None):
"""
Repeat elements of an array.
Parameters
----------
a : array_like
Input array.
repeats : int or array of ints
The number of repetitions for each element. `repeats` is broadcasted
to fit the shape of the given axis.
axis : int, optional
The axis along which to repeat values. By default, use the
flattened input array, and return a flat output array.
Returns
-------
repeated_array : ndarray
Output array which has the same shape as `a`, except along
the given axis.
See Also
--------
tile : Tile an array.
Examples
--------
>>> np.repeat(3, 4)
array([3, 3, 3, 3])
>>> x = np.array([[1,2],[3,4]])
>>> np.repeat(x, 2)
array([1, 1, 2, 2, 3, 3, 4, 4])
>>> np.repeat(x, 3, axis=1)
array([[1, 1, 1, 2, 2, 2],
[3, 3, 3, 4, 4, 4]])
>>> np.repeat(x, [1, 2], axis=0)
array([[1, 2],
[3, 4],
[3, 4]])
"""
return _wrapfunc(a, 'repeat', repeats, axis=axis)
def put(a, ind, v, mode='raise'):
"""
Replaces specified elements of an array with given values.
The indexing works on the flattened target array. `put` is roughly
equivalent to:
::
a.flat[ind] = v
Parameters
----------
a : ndarray
Target array.
ind : array_like
Target indices, interpreted as integers.
v : array_like
Values to place in `a` at target indices. If `v` is shorter than
`ind` it will be repeated as necessary.
mode : {'raise', 'wrap', 'clip'}, optional
Specifies how out-of-bounds indices will behave.
* 'raise' -- raise an error (default)
* 'wrap' -- wrap around
* 'clip' -- clip to the range
'clip' mode means that all indices that are too large are replaced
by the index that addresses the last element along that axis. Note
that this disables indexing with negative numbers.
See Also
--------
putmask, place
Examples
--------
>>> a = np.arange(5)
>>> np.put(a, [0, 2], [-44, -55])
>>> a
array([-44, 1, -55, 3, 4])
>>> a = np.arange(5)
>>> np.put(a, 22, -5, mode='clip')
>>> a
array([ 0, 1, 2, 3, -5])
"""
try:
put = a.put
except AttributeError:
raise TypeError("argument 1 must be numpy.ndarray, "
"not {name}".format(name=type(a).__name__))
return put(ind, v, mode=mode)
def swapaxes(a, axis1, axis2):
"""
Interchange two axes of an array.
Parameters
----------
a : array_like
Input array.
axis1 : int
First axis.
axis2 : int
Second axis.
Returns
-------
a_swapped : ndarray
For NumPy >= 1.10.0, if `a` is an ndarray, then a view of `a` is
returned; otherwise a new array is created. For earlier NumPy
versions a view of `a` is returned only if the order of the
axes is changed, otherwise the input array is returned.
Examples
--------
>>> x = np.array([[1,2,3]])
>>> np.swapaxes(x,0,1)
array([[1],
[2],
[3]])
>>> x = np.array([[[0,1],[2,3]],[[4,5],[6,7]]])
>>> x
array([[[0, 1],
[2, 3]],
[[4, 5],
[6, 7]]])
>>> np.swapaxes(x,0,2)
array([[[0, 4],
[2, 6]],
[[1, 5],
[3, 7]]])
"""
return _wrapfunc(a, 'swapaxes', axis1, axis2)
def transpose(a, axes=None):
"""
Permute the dimensions of an array.
Parameters
----------
a : array_like
Input array.
axes : list of ints, optional
By default, reverse the dimensions, otherwise permute the axes
according to the values given.
Returns
-------
p : ndarray
`a` with its axes permuted. A view is returned whenever
possible.
See Also
--------
moveaxis
argsort
Notes
-----
Use `transpose(a, argsort(axes))` to invert the transposition of tensors
when using the `axes` keyword argument.
Transposing a 1-D array returns an unchanged view of the original array.
Examples
--------
>>> x = np.arange(4).reshape((2,2))
>>> x
array([[0, 1],
[2, 3]])
>>> np.transpose(x)
array([[0, 2],
[1, 3]])
>>> x = np.ones((1, 2, 3))
>>> np.transpose(x, (1, 0, 2)).shape
(2, 1, 3)
"""
return _wrapfunc(a, 'transpose', axes)
def partition(a, kth, axis=-1, kind='introselect', order=None):
"""
Return a partitioned copy of an array.
Creates a copy of the array with its elements rearranged in such a
way that the value of the element in k-th position is in the
position it would be in a sorted array. All elements smaller than
the k-th element are moved before this element and all equal or
greater are moved behind it. The ordering of the elements in the two
partitions is undefined.
.. versionadded:: 1.8.0
Parameters
----------
a : array_like
Array to be sorted.
kth : int or sequence of ints
Element index to partition by. The k-th value of the element
will be in its final sorted position and all smaller elements
will be moved before it and all equal or greater elements behind
it. The order all elements in the partitions is undefined. If
provided with a sequence of k-th it will partition all elements
indexed by k-th of them into their sorted position at once.
axis : int or None, optional
Axis along which to sort. If None, the array is flattened before
sorting. The default is -1, which sorts along the last axis.
kind : {'introselect'}, optional
Selection algorithm. Default is 'introselect'.
order : str or list of str, optional
When `a` is an array with fields defined, this argument
specifies which fields to compare first, second, etc. A single
field can be specified as a string. Not all fields need be
specified, but unspecified fields will still be used, in the
order in which they come up in the dtype, to break ties.
Returns
-------
partitioned_array : ndarray
Array of the same type and shape as `a`.
See Also
--------
ndarray.partition : Method to sort an array in-place.
argpartition : Indirect partition.
sort : Full sorting
Notes
-----
The various selection algorithms are characterized by their average
speed, worst case performance, work space size, and whether they are
stable. A stable sort keeps items with the same key in the same
relative order. The available algorithms have the following
properties:
================= ======= ============= ============ =======
kind speed worst case work space stable
================= ======= ============= ============ =======
'introselect' 1 O(n) 0 no
================= ======= ============= ============ =======
All the partition algorithms make temporary copies of the data when
partitioning along any but the last axis. Consequently,
partitioning along the last axis is faster and uses less space than
partitioning along any other axis.
The sort order for complex numbers is lexicographic. If both the
real and imaginary parts are non-nan then the order is determined by
the real parts except when they are equal, in which case the order
is determined by the imaginary parts.
Examples
--------
>>> a = np.array([3, 4, 2, 1])
>>> np.partition(a, 3)
array([2, 1, 3, 4])
>>> np.partition(a, (1, 3))
array([1, 2, 3, 4])
"""
if axis is None:
a = asanyarray(a).flatten()
axis = 0
else:
a = asanyarray(a).copy(order="K")
a.partition(kth, axis=axis, kind=kind, order=order)
return a
def argpartition(a, kth, axis=-1, kind='introselect', order=None):
"""
Perform an indirect partition along the given axis using the
algorithm specified by the `kind` keyword. It returns an array of
indices of the same shape as `a` that index data along the given
axis in partitioned order.
.. versionadded:: 1.8.0
Parameters
----------
a : array_like
Array to sort.
kth : int or sequence of ints
Element index to partition by. The k-th element will be in its
final sorted position and all smaller elements will be moved
before it and all larger elements behind it. The order all
elements in the partitions is undefined. If provided with a
sequence of k-th it will partition all of them into their sorted
position at once.
axis : int or None, optional
Axis along which to sort. The default is -1 (the last axis). If
None, the flattened array is used.
kind : {'introselect'}, optional
Selection algorithm. Default is 'introselect'
order : str or list of str, optional
When `a` is an array with fields defined, this argument
specifies which fields to compare first, second, etc. A single
field can be specified as a string, and not all fields need be
specified, but unspecified fields will still be used, in the
order in which they come up in the dtype, to break ties.
Returns
-------
index_array : ndarray, int
Array of indices that partition `a` along the specified axis.
In other words, ``a[index_array]`` yields a partitioned `a`.
See Also
--------
partition : Describes partition algorithms used.
ndarray.partition : Inplace partition.
argsort : Full indirect sort
Notes
-----
See `partition` for notes on the different selection algorithms.
Examples
--------
One dimensional array:
>>> x = np.array([3, 4, 2, 1])
>>> x[np.argpartition(x, 3)]
array([2, 1, 3, 4])
>>> x[np.argpartition(x, (1, 3))]
array([1, 2, 3, 4])
>>> x = [3, 4, 2, 1]
>>> np.array(x)[np.argpartition(x, 3)]
array([2, 1, 3, 4])
"""
return _wrapfunc(a, 'argpartition', kth, axis=axis, kind=kind, order=order)
def sort(a, axis=-1, kind='quicksort', order=None):
"""
Return a sorted copy of an array.
Parameters
----------
a : array_like
Array to be sorted.
axis : int or None, optional
Axis along which to sort. If None, the array is flattened before
sorting. The default is -1, which sorts along the last axis.
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm. Default is 'quicksort'.
order : str or list of str, optional
When `a` is an array with fields defined, this argument specifies
which fields to compare first, second, etc. A single field can
be specified as a string, and not all fields need be specified,
but unspecified fields will still be used, in the order in which
they come up in the dtype, to break ties.
Returns
-------
sorted_array : ndarray
Array of the same type and shape as `a`.
See Also
--------
ndarray.sort : Method to sort an array in-place.
argsort : Indirect sort.
lexsort : Indirect stable sort on multiple keys.
searchsorted : Find elements in a sorted array.
partition : Partial sort.
Notes
-----
The various sorting algorithms are characterized by their average speed,
worst case performance, work space size, and whether they are stable. A
stable sort keeps items with the same key in the same relative
order. The three available algorithms have the following
properties:
=========== ======= ============= ============ =======
kind speed worst case work space stable
=========== ======= ============= ============ =======
'quicksort' 1 O(n^2) 0 no
'mergesort' 2 O(n*log(n)) ~n/2 yes
'heapsort' 3 O(n*log(n)) 0 no
=========== ======= ============= ============ =======
All the sort algorithms make temporary copies of the data when
sorting along any but the last axis. Consequently, sorting along
the last axis is faster and uses less space than sorting along
any other axis.
The sort order for complex numbers is lexicographic. If both the real
and imaginary parts are non-nan then the order is determined by the
real parts except when they are equal, in which case the order is
determined by the imaginary parts.
Previous to numpy 1.4.0 sorting real and complex arrays containing nan
values led to undefined behaviour. In numpy versions >= 1.4.0 nan
values are sorted to the end. The extended sort order is:
* Real: [R, nan]
* Complex: [R + Rj, R + nanj, nan + Rj, nan + nanj]
where R is a non-nan real value. Complex values with the same nan
placements are sorted according to the non-nan part if it exists.
Non-nan values are sorted as before.
.. versionadded:: 1.12.0
quicksort has been changed to an introsort which will switch
heapsort when it does not make enough progress. This makes its
worst case O(n*log(n)).
Examples
--------
>>> a = np.array([[1,4],[3,1]])
>>> np.sort(a) # sort along the last axis
array([[1, 4],
[1, 3]])
>>> np.sort(a, axis=None) # sort the flattened array
array([1, 1, 3, 4])
>>> np.sort(a, axis=0) # sort along the first axis
array([[1, 1],
[3, 4]])
Use the `order` keyword to specify a field to use when sorting a
structured array:
>>> dtype = [('name', 'S10'), ('height', float), ('age', int)]
>>> values = [('Arthur', 1.8, 41), ('Lancelot', 1.9, 38),
... ('Galahad', 1.7, 38)]
>>> a = np.array(values, dtype=dtype) # create a structured array
>>> np.sort(a, order='height') # doctest: +SKIP
array([('Galahad', 1.7, 38), ('Arthur', 1.8, 41),
('Lancelot', 1.8999999999999999, 38)],
dtype=[('name', '|S10'), ('height', '<f8'), ('age', '<i4')])
Sort by age, then height if ages are equal:
>>> np.sort(a, order=['age', 'height']) # doctest: +SKIP
array([('Galahad', 1.7, 38), ('Lancelot', 1.8999999999999999, 38),
('Arthur', 1.8, 41)],
dtype=[('name', '|S10'), ('height', '<f8'), ('age', '<i4')])
"""
if axis is None:
a = asanyarray(a).flatten()
axis = 0
else:
a = asanyarray(a).copy(order="K")
a.sort(axis=axis, kind=kind, order=order)
return a
def argsort(a, axis=-1, kind='quicksort', order=None):
"""
Returns the indices that would sort an array.
Perform an indirect sort along the given axis using the algorithm specified
by the `kind` keyword. It returns an array of indices of the same shape as
`a` that index data along the given axis in sorted order.
Parameters
----------
a : array_like
Array to sort.
axis : int or None, optional
Axis along which to sort. The default is -1 (the last axis). If None,
the flattened array is used.
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm.
order : str or list of str, optional
When `a` is an array with fields defined, this argument specifies
which fields to compare first, second, etc. A single field can
be specified as a string, and not all fields need be specified,
but unspecified fields will still be used, in the order in which
they come up in the dtype, to break ties.
Returns
-------
index_array : ndarray, int
Array of indices that sort `a` along the specified axis.
If `a` is one-dimensional, ``a[index_array]`` yields a sorted `a`.
See Also
--------
sort : Describes sorting algorithms used.
lexsort : Indirect stable sort with multiple keys.
ndarray.sort : Inplace sort.
argpartition : Indirect partial sort.
Notes
-----
See `sort` for notes on the different sorting algorithms.
As of NumPy 1.4.0 `argsort` works with real/complex arrays containing
nan values. The enhanced sort order is documented in `sort`.
Examples
--------
One dimensional array:
>>> x = np.array([3, 1, 2])
>>> np.argsort(x)
array([1, 2, 0])
Two-dimensional array:
>>> x = np.array([[0, 3], [2, 2]])
>>> x
array([[0, 3],
[2, 2]])
>>> np.argsort(x, axis=0)
array([[0, 1],
[1, 0]])
>>> np.argsort(x, axis=1)
array([[0, 1],
[0, 1]])
Sorting with keys:
>>> x = np.array([(1, 0), (0, 1)], dtype=[('x', '<i4'), ('y', '<i4')])
>>> x
array([(1, 0), (0, 1)],
dtype=[('x', '<i4'), ('y', '<i4')])
>>> np.argsort(x, order=('x','y'))
array([1, 0])
>>> np.argsort(x, order=('y','x'))
array([0, 1])
"""
return _wrapfunc(a, 'argsort', axis=axis, kind=kind, order=order)
def argmax(a, axis=None, out=None):
"""
Returns the indices of the maximum values along an axis.
Parameters
----------
a : array_like
Input array.
axis : int, optional
By default, the index is into the flattened array, otherwise
along the specified axis.
out : array, optional
If provided, the result will be inserted into this array. It should
be of the appropriate shape and dtype.
Returns
-------
index_array : ndarray of ints
Array of indices into the array. It has the same shape as `a.shape`
with the dimension along `axis` removed.
See Also
--------
ndarray.argmax, argmin
amax : The maximum value along a given axis.
unravel_index : Convert a flat index into an index tuple.
Notes
-----
In case of multiple occurrences of the maximum values, the indices
corresponding to the first occurrence are returned.
Examples
--------
>>> a = np.arange(6).reshape(2,3)
>>> a
array([[0, 1, 2],
[3, 4, 5]])
>>> np.argmax(a)
5
>>> np.argmax(a, axis=0)
array([1, 1, 1])
>>> np.argmax(a, axis=1)
array([2, 2])
>>> b = np.arange(6)
>>> b[1] = 5
>>> b
array([0, 5, 2, 3, 4, 5])
>>> np.argmax(b) # Only the first occurrence is returned.
1
"""
return _wrapfunc(a, 'argmax', axis=axis, out=out)
def argmin(a, axis=None, out=None):
"""
Returns the indices of the minimum values along an axis.
Parameters
----------
a : array_like
Input array.
axis : int, optional
By default, the index is into the flattened array, otherwise
along the specified axis.
out : array, optional
If provided, the result will be inserted into this array. It should
be of the appropriate shape and dtype.
Returns
-------
index_array : ndarray of ints
Array of indices into the array. It has the same shape as `a.shape`
with the dimension along `axis` removed.
See Also
--------
ndarray.argmin, argmax
amin : The minimum value along a given axis.
unravel_index : Convert a flat index into an index tuple.
Notes
-----
In case of multiple occurrences of the minimum values, the indices
corresponding to the first occurrence are returned.
Examples
--------
>>> a = np.arange(6).reshape(2,3)
>>> a
array([[0, 1, 2],
[3, 4, 5]])
>>> np.argmin(a)
0
>>> np.argmin(a, axis=0)
array([0, 0, 0])
>>> np.argmin(a, axis=1)
array([0, 0])
>>> b = np.arange(6)
>>> b[4] = 0
>>> b
array([0, 1, 2, 3, 0, 5])
>>> np.argmin(b) # Only the first occurrence is returned.
0
"""
return _wrapfunc(a, 'argmin', axis=axis, out=out)
def searchsorted(a, v, side='left', sorter=None):
"""
Find indices where elements should be inserted to maintain order.
Find the indices into a sorted array `a` such that, if the
corresponding elements in `v` were inserted before the indices, the
order of `a` would be preserved.
Parameters
----------
a : 1-D array_like
Input array. If `sorter` is None, then it must be sorted in
ascending order, otherwise `sorter` must be an array of indices
that sort it.
v : array_like
Values to insert into `a`.
side : {'left', 'right'}, optional
If 'left', the index of the first suitable location found is given.
If 'right', return the last such index. If there is no suitable
index, return either 0 or N (where N is the length of `a`).
sorter : 1-D array_like, optional
Optional array of integer indices that sort array a into ascending
order. They are typically the result of argsort.
.. versionadded:: 1.7.0
Returns
-------
indices : array of ints
Array of insertion points with the same shape as `v`.
See Also
--------
sort : Return a sorted copy of an array.
histogram : Produce histogram from 1-D data.
Notes
-----
Binary search is used to find the required insertion points.
As of NumPy 1.4.0 `searchsorted` works with real/complex arrays containing
`nan` values. The enhanced sort order is documented in `sort`.
Examples
--------
>>> np.searchsorted([1,2,3,4,5], 3)
2
>>> np.searchsorted([1,2,3,4,5], 3, side='right')
3
>>> np.searchsorted([1,2,3,4,5], [-10, 10, 2, 3])
array([0, 5, 1, 2])
"""
return _wrapfunc(a, 'searchsorted', v, side=side, sorter=sorter)
def resize(a, new_shape):
"""
Return a new array with the specified shape.
If the new array is larger than the original array, then the new
array is filled with repeated copies of `a`. Note that this behavior
is different from a.resize(new_shape) which fills with zeros instead
of repeated copies of `a`.
Parameters
----------
a : array_like
Array to be resized.
new_shape : int or tuple of int
Shape of resized array.
Returns
-------
reshaped_array : ndarray
The new array is formed from the data in the old array, repeated
if necessary to fill out the required number of elements. The
data are repeated in the order that they are stored in memory.
See Also
--------
ndarray.resize : resize an array in-place.
Examples
--------
>>> a=np.array([[0,1],[2,3]])
>>> np.resize(a,(2,3))
array([[0, 1, 2],
[3, 0, 1]])
>>> np.resize(a,(1,4))
array([[0, 1, 2, 3]])
>>> np.resize(a,(2,4))
array([[0, 1, 2, 3],
[0, 1, 2, 3]])
"""
if isinstance(new_shape, (int, nt.integer)):
new_shape = (new_shape,)
a = ravel(a)
Na = len(a)
if not Na:
return mu.zeros(new_shape, a.dtype)
total_size = um.multiply.reduce(new_shape)
n_copies = int(total_size / Na)
extra = total_size % Na
if total_size == 0:
return a[:0]
if extra != 0:
n_copies = n_copies+1
extra = Na-extra
a = concatenate((a,)*n_copies)
if extra > 0:
a = a[:-extra]
return reshape(a, new_shape)
def squeeze(a, axis=None):
"""
Remove single-dimensional entries from the shape of an array.
Parameters
----------
a : array_like
Input data.
axis : None or int or tuple of ints, optional
.. versionadded:: 1.7.0
Selects a subset of the single-dimensional entries in the
shape. If an axis is selected with shape entry greater than
one, an error is raised.
Returns
-------
squeezed : ndarray
The input array, but with all or a subset of the
dimensions of length 1 removed. This is always `a` itself
or a view into `a`.
Examples
--------
>>> x = np.array([[[0], [1], [2]]])
>>> x.shape
(1, 3, 1)
>>> np.squeeze(x).shape
(3,)
>>> np.squeeze(x, axis=(2,)).shape
(1, 3)
"""
try:
squeeze = a.squeeze
except AttributeError:
return _wrapit(a, 'squeeze')
try:
# First try to use the new axis= parameter
return squeeze(axis=axis)
except TypeError:
# For backwards compatibility
return squeeze()
def diagonal(a, offset=0, axis1=0, axis2=1):
"""
Return specified diagonals.
If `a` is 2-D, returns the diagonal of `a` with the given offset,
i.e., the collection of elements of the form ``a[i, i+offset]``. If
`a` has more than two dimensions, then the axes specified by `axis1`
and `axis2` are used to determine the 2-D sub-array whose diagonal is
returned. The shape of the resulting array can be determined by
removing `axis1` and `axis2` and appending an index to the right equal
to the size of the resulting diagonals.
In versions of NumPy prior to 1.7, this function always returned a new,
independent array containing a copy of the values in the diagonal.
In NumPy 1.7 and 1.8, it continues to return a copy of the diagonal,
but depending on this fact is deprecated. Writing to the resulting
array continues to work as it used to, but a FutureWarning is issued.
Starting in NumPy 1.9 it returns a read-only view on the original array.
Attempting to write to the resulting array will produce an error.
In some future release, it will return a read/write view and writing to
the returned array will alter your original array. The returned array
will have the same type as the input array.
If you don't write to the array returned by this function, then you can
just ignore all of the above.
If you depend on the current behavior, then we suggest copying the
returned array explicitly, i.e., use ``np.diagonal(a).copy()`` instead
of just ``np.diagonal(a)``. This will work with both past and future
versions of NumPy.
Parameters
----------
a : array_like
Array from which the diagonals are taken.
offset : int, optional
Offset of the diagonal from the main diagonal. Can be positive or
negative. Defaults to main diagonal (0).
axis1 : int, optional
Axis to be used as the first axis of the 2-D sub-arrays from which
the diagonals should be taken. Defaults to first axis (0).
axis2 : int, optional
Axis to be used as the second axis of the 2-D sub-arrays from
which the diagonals should be taken. Defaults to second axis (1).
Returns
-------
array_of_diagonals : ndarray
If `a` is 2-D and not a matrix, a 1-D array of the same type as `a`
containing the diagonal is returned. If `a` is a matrix, a 1-D
array containing the diagonal is returned in order to maintain
backward compatibility. If the dimension of `a` is greater than
two, then an array of diagonals is returned, "packed" from
left-most dimension to right-most (e.g., if `a` is 3-D, then the
diagonals are "packed" along rows).
Raises
------
ValueError
If the dimension of `a` is less than 2.
See Also
--------
diag : MATLAB work-a-like for 1-D and 2-D arrays.
diagflat : Create diagonal arrays.
trace : Sum along diagonals.
Examples
--------
>>> a = np.arange(4).reshape(2,2)
>>> a
array([[0, 1],
[2, 3]])
>>> a.diagonal()
array([0, 3])
>>> a.diagonal(1)
array([1])
A 3-D example:
>>> a = np.arange(8).reshape(2,2,2); a
array([[[0, 1],
[2, 3]],
[[4, 5],
[6, 7]]])
>>> a.diagonal(0, # Main diagonals of two arrays created by skipping
... 0, # across the outer(left)-most axis last and
... 1) # the "middle" (row) axis first.
array([[0, 6],
[1, 7]])
The sub-arrays whose main diagonals we just obtained; note that each
corresponds to fixing the right-most (column) axis, and that the
diagonals are "packed" in rows.
>>> a[:,:,0] # main diagonal is [0 6]
array([[0, 2],
[4, 6]])
>>> a[:,:,1] # main diagonal is [1 7]
array([[1, 3],
[5, 7]])
"""
if isinstance(a, np.matrix):
# Make diagonal of matrix 1-D to preserve backward compatibility.
return asarray(a).diagonal(offset=offset, axis1=axis1, axis2=axis2)
else:
return asanyarray(a).diagonal(offset=offset, axis1=axis1, axis2=axis2)
def trace(a, offset=0, axis1=0, axis2=1, dtype=None, out=None):
"""
Return the sum along diagonals of the array.
If `a` is 2-D, the sum along its diagonal with the given offset
is returned, i.e., the sum of elements ``a[i,i+offset]`` for all i.
If `a` has more than two dimensions, then the axes specified by axis1 and
axis2 are used to determine the 2-D sub-arrays whose traces are returned.
The shape of the resulting array is the same as that of `a` with `axis1`
and `axis2` removed.
Parameters
----------
a : array_like
Input array, from which the diagonals are taken.
offset : int, optional
Offset of the diagonal from the main diagonal. Can be both positive
and negative. Defaults to 0.
axis1, axis2 : int, optional
Axes to be used as the first and second axis of the 2-D sub-arrays
from which the diagonals should be taken. Defaults are the first two
axes of `a`.
dtype : dtype, optional
Determines the data-type of the returned array and of the accumulator
where the elements are summed. If dtype has the value None and `a` is
of integer type of precision less than the default integer
precision, then the default integer precision is used. Otherwise,
the precision is the same as that of `a`.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and
it must be of the right shape to hold the output.
Returns
-------
sum_along_diagonals : ndarray
If `a` is 2-D, the sum along the diagonal is returned. If `a` has
larger dimensions, then an array of sums along diagonals is returned.
See Also
--------
diag, diagonal, diagflat
Examples
--------
>>> np.trace(np.eye(3))
3.0
>>> a = np.arange(8).reshape((2,2,2))
>>> np.trace(a)
array([6, 8])
>>> a = np.arange(24).reshape((2,2,2,3))
>>> np.trace(a).shape
(2, 3)
"""
if isinstance(a, np.matrix):
# Get trace of matrix via an array to preserve backward compatibility.
return asarray(a).trace(offset=offset, axis1=axis1, axis2=axis2, dtype=dtype, out=out)
else:
return asanyarray(a).trace(offset=offset, axis1=axis1, axis2=axis2, dtype=dtype, out=out)
def ravel(a, order='C'):
"""Return a contiguous flattened array.
A 1-D array, containing the elements of the input, is returned. A copy is
made only if needed.
As of NumPy 1.10, the returned array will have the same type as the input
array. (for example, a masked array will be returned for a masked array
input)
Parameters
----------
a : array_like
Input array. The elements in `a` are read in the order specified by
`order`, and packed as a 1-D array.
order : {'C','F', 'A', 'K'}, optional
The elements of `a` are read using this index order. 'C' means
to index the elements in row-major, C-style order,
with the last axis index changing fastest, back to the first
axis index changing slowest. 'F' means to index the elements
in column-major, Fortran-style order, with the
first index changing fastest, and the last index changing
slowest. Note that the 'C' and 'F' options take no account of
the memory layout of the underlying array, and only refer to
the order of axis indexing. 'A' means to read the elements in
Fortran-like index order if `a` is Fortran *contiguous* in
memory, C-like order otherwise. 'K' means to read the
elements in the order they occur in memory, except for
reversing the data when strides are negative. By default, 'C'
index order is used.
Returns
-------
y : array_like
If `a` is a matrix, y is a 1-D ndarray, otherwise y is an array of
the same subtype as `a`. The shape of the returned array is
``(a.size,)``. Matrices are special cased for backward
compatibility.
See Also
--------
ndarray.flat : 1-D iterator over an array.
ndarray.flatten : 1-D array copy of the elements of an array
in row-major order.
ndarray.reshape : Change the shape of an array without changing its data.
Notes
-----
In row-major, C-style order, in two dimensions, the row index
varies the slowest, and the column index the quickest. This can
be generalized to multiple dimensions, where row-major order
implies that the index along the first axis varies slowest, and
the index along the last quickest. The opposite holds for
column-major, Fortran-style index ordering.
When a view is desired in as many cases as possible, ``arr.reshape(-1)``
may be preferable.
Examples
--------
It is equivalent to ``reshape(-1, order=order)``.
>>> x = np.array([[1, 2, 3], [4, 5, 6]])
>>> print(np.ravel(x))
[1 2 3 4 5 6]
>>> print(x.reshape(-1))
[1 2 3 4 5 6]
>>> print(np.ravel(x, order='F'))
[1 4 2 5 3 6]
When ``order`` is 'A', it will preserve the array's 'C' or 'F' ordering:
>>> print(np.ravel(x.T))
[1 4 2 5 3 6]
>>> print(np.ravel(x.T, order='A'))
[1 2 3 4 5 6]
When ``order`` is 'K', it will preserve orderings that are neither 'C'
nor 'F', but won't reverse axes:
>>> a = np.arange(3)[::-1]; a
array([2, 1, 0])
>>> a.ravel(order='C')
array([2, 1, 0])
>>> a.ravel(order='K')
array([2, 1, 0])
>>> a = np.arange(12).reshape(2,3,2).swapaxes(1,2); a
array([[[ 0, 2, 4],
[ 1, 3, 5]],
[[ 6, 8, 10],
[ 7, 9, 11]]])
>>> a.ravel(order='C')
array([ 0, 2, 4, 1, 3, 5, 6, 8, 10, 7, 9, 11])
>>> a.ravel(order='K')
array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11])
"""
if isinstance(a, np.matrix):
return asarray(a).ravel(order=order)
else:
return asanyarray(a).ravel(order=order)
def nonzero(a):
"""
Return the indices of the elements that are non-zero.
Returns a tuple of arrays, one for each dimension of `a`,
containing the indices of the non-zero elements in that
dimension. The values in `a` are always tested and returned in
row-major, C-style order. The corresponding non-zero
values can be obtained with::
a[nonzero(a)]
To group the indices by element, rather than dimension, use::
transpose(nonzero(a))
The result of this is always a 2-D array, with a row for
each non-zero element.
Parameters
----------
a : array_like
Input array.
Returns
-------
tuple_of_arrays : tuple
Indices of elements that are non-zero.
See Also
--------
flatnonzero :
Return indices that are non-zero in the flattened version of the input
array.
ndarray.nonzero :
Equivalent ndarray method.
count_nonzero :
Counts the number of non-zero elements in the input array.
Examples
--------
>>> x = np.eye(3)
>>> x
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
>>> np.nonzero(x)
(array([0, 1, 2]), array([0, 1, 2]))
>>> x[np.nonzero(x)]
array([ 1., 1., 1.])
>>> np.transpose(np.nonzero(x))
array([[0, 0],
[1, 1],
[2, 2]])
A common use for ``nonzero`` is to find the indices of an array, where
a condition is True. Given an array `a`, the condition `a` > 3 is a
boolean array and since False is interpreted as 0, np.nonzero(a > 3)
yields the indices of the `a` where the condition is true.
>>> a = np.array([[1,2,3],[4,5,6],[7,8,9]])
>>> a > 3
array([[False, False, False],
[ True, True, True],
[ True, True, True]], dtype=bool)
>>> np.nonzero(a > 3)
(array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2]))
The ``nonzero`` method of the boolean array can also be called.
>>> (a > 3).nonzero()
(array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2]))
"""
return _wrapfunc(a, 'nonzero')
def shape(a):
"""
Return the shape of an array.
Parameters
----------
a : array_like
Input array.
Returns
-------
shape : tuple of ints
The elements of the shape tuple give the lengths of the
corresponding array dimensions.
See Also
--------
alen
ndarray.shape : Equivalent array method.
Examples
--------
>>> np.shape(np.eye(3))
(3, 3)
>>> np.shape([[1, 2]])
(1, 2)
>>> np.shape([0])
(1,)
>>> np.shape(0)
()
>>> a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
>>> np.shape(a)
(2,)
>>> a.shape
(2,)
"""
try:
result = a.shape
except AttributeError:
result = asarray(a).shape
return result
def compress(condition, a, axis=None, out=None):
"""
Return selected slices of an array along given axis.
When working along a given axis, a slice along that axis is returned in
`output` for each index where `condition` evaluates to True. When
working on a 1-D array, `compress` is equivalent to `extract`.
Parameters
----------
condition : 1-D array of bools
Array that selects which entries to return. If len(condition)
is less than the size of `a` along the given axis, then output is
truncated to the length of the condition array.
a : array_like
Array from which to extract a part.
axis : int, optional
Axis along which to take slices. If None (default), work on the
flattened array.
out : ndarray, optional
Output array. Its type is preserved and it must be of the right
shape to hold the output.
Returns
-------
compressed_array : ndarray
A copy of `a` without the slices along axis for which `condition`
is false.
See Also
--------
take, choose, diag, diagonal, select
ndarray.compress : Equivalent method in ndarray
np.extract: Equivalent method when working on 1-D arrays
numpy.doc.ufuncs : Section "Output arguments"
Examples
--------
>>> a = np.array([[1, 2], [3, 4], [5, 6]])
>>> a
array([[1, 2],
[3, 4],
[5, 6]])
>>> np.compress([0, 1], a, axis=0)
array([[3, 4]])
>>> np.compress([False, True, True], a, axis=0)
array([[3, 4],
[5, 6]])
>>> np.compress([False, True], a, axis=1)
array([[2],
[4],
[6]])
Working on the flattened array does not return slices along an axis but
selects elements.
>>> np.compress([False, True], a)
array([2])
"""
return _wrapfunc(a, 'compress', condition, axis=axis, out=out)
def clip(a, a_min, a_max, out=None):
"""
Clip (limit) the values in an array.
Given an interval, values outside the interval are clipped to
the interval edges. For example, if an interval of ``[0, 1]``
is specified, values smaller than 0 become 0, and values larger
than 1 become 1.
Parameters
----------
a : array_like
Array containing elements to clip.
a_min : scalar or array_like
Minimum value.
a_max : scalar or array_like
Maximum value. If `a_min` or `a_max` are array_like, then they will
be broadcasted to the shape of `a`.
out : ndarray, optional
The results will be placed in this array. It may be the input
array for in-place clipping. `out` must be of the right shape
to hold the output. Its type is preserved.
Returns
-------
clipped_array : ndarray
An array with the elements of `a`, but where values
< `a_min` are replaced with `a_min`, and those > `a_max`
with `a_max`.
See Also
--------
numpy.doc.ufuncs : Section "Output arguments"
Examples
--------
>>> a = np.arange(10)
>>> np.clip(a, 1, 8)
array([1, 1, 2, 3, 4, 5, 6, 7, 8, 8])
>>> a
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> np.clip(a, 3, 6, out=a)
array([3, 3, 3, 3, 4, 5, 6, 6, 6, 6])
>>> a = np.arange(10)
>>> a
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> np.clip(a, [3,4,1,1,1,4,4,4,4,4], 8)
array([3, 4, 2, 3, 4, 5, 6, 7, 8, 8])
"""
return _wrapfunc(a, 'clip', a_min, a_max, out=out)
def sum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue):
"""
Sum of array elements over a given axis.
Parameters
----------
a : array_like
Elements to sum.
axis : None or int or tuple of ints, optional
Axis or axes along which a sum is performed. The default,
axis=None, will sum all of the elements of the input array. If
axis is negative it counts from the last to the first axis.
.. versionadded:: 1.7.0
If axis is a tuple of ints, a sum is performed on all of the axes
specified in the tuple instead of a single axis or all the axes as
before.
dtype : dtype, optional
The type of the returned array and of the accumulator in which the
elements are summed. The dtype of `a` is used by default unless `a`
has an integer dtype of less precision than the default platform
integer. In that case, if `a` is signed then the platform integer
is used while if `a` is unsigned then an unsigned integer of the
same precision as the platform integer is used.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output, but the type of the output
values will be cast if necessary.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `sum` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-classes `sum` method does not implement `keepdims` any
exceptions will be raised.
Returns
-------
sum_along_axis : ndarray
An array with the same shape as `a`, with the specified
axis removed. If `a` is a 0-d array, or if `axis` is None, a scalar
is returned. If an output array is specified, a reference to
`out` is returned.
See Also
--------
ndarray.sum : Equivalent method.
cumsum : Cumulative sum of array elements.
trapz : Integration of array values using the composite trapezoidal rule.
mean, average
Notes
-----
Arithmetic is modular when using integer types, and no error is
raised on overflow.
The sum of an empty array is the neutral element 0:
>>> np.sum([])
0.0
Examples
--------
>>> np.sum([0.5, 1.5])
2.0
>>> np.sum([0.5, 0.7, 0.2, 1.5], dtype=np.int32)
1
>>> np.sum([[0, 1], [0, 5]])
6
>>> np.sum([[0, 1], [0, 5]], axis=0)
array([0, 6])
>>> np.sum([[0, 1], [0, 5]], axis=1)
array([1, 5])
If the accumulator is too small, overflow occurs:
>>> np.ones(128, dtype=np.int8).sum(dtype=np.int8)
-128
"""
kwargs = {}
if keepdims is not np._NoValue:
kwargs['keepdims'] = keepdims
if isinstance(a, _gentype):
res = _sum_(a)
if out is not None:
out[...] = res
return out
return res
if type(a) is not mu.ndarray:
try:
sum = a.sum
except AttributeError:
pass
else:
return sum(axis=axis, dtype=dtype, out=out, **kwargs)
return _methods._sum(a, axis=axis, dtype=dtype,
out=out, **kwargs)
def product(a, axis=None, dtype=None, out=None, keepdims=np._NoValue):
"""
Return the product of array elements over a given axis.
See Also
--------
prod : equivalent function; see for details.
"""
kwargs = {}
if keepdims is not np._NoValue:
kwargs['keepdims'] = keepdims
return um.multiply.reduce(a, axis=axis, dtype=dtype, out=out, **kwargs)
def sometrue(a, axis=None, out=None, keepdims=np._NoValue):
"""
Check whether some values are true.
Refer to `any` for full documentation.
See Also
--------
any : equivalent function
"""
arr = asanyarray(a)
kwargs = {}
if keepdims is not np._NoValue:
kwargs['keepdims'] = keepdims
return arr.any(axis=axis, out=out, **kwargs)
def alltrue(a, axis=None, out=None, keepdims=np._NoValue):
"""
Check if all elements of input array are true.
See Also
--------
numpy.all : Equivalent function; see for details.
"""
arr = asanyarray(a)
kwargs = {}
if keepdims is not np._NoValue:
kwargs['keepdims'] = keepdims
return arr.all(axis=axis, out=out, **kwargs)
def any(a, axis=None, out=None, keepdims=np._NoValue):
"""
Test whether any array element along a given axis evaluates to True.
Returns single boolean unless `axis` is not ``None``
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
axis : None or int or tuple of ints, optional
Axis or axes along which a logical OR reduction is performed.
The default (`axis` = `None`) is to perform a logical OR over all
the dimensions of the input array. `axis` may be negative, in
which case it counts from the last to the first axis.
.. versionadded:: 1.7.0
If this is a tuple of ints, a reduction is performed on multiple
axes, instead of a single axis or all the axes as before.
out : ndarray, optional
Alternate output array in which to place the result. It must have
the same shape as the expected output and its type is preserved
(e.g., if it is of type float, then it will remain so, returning
1.0 for True and 0.0 for False, regardless of the type of `a`).
See `doc.ufuncs` (Section "Output arguments") for details.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `any` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-classes `sum` method does not implement `keepdims` any
exceptions will be raised.
Returns
-------
any : bool or ndarray
A new boolean or `ndarray` is returned unless `out` is specified,
in which case a reference to `out` is returned.
See Also
--------
ndarray.any : equivalent method
all : Test whether all elements along a given axis evaluate to True.
Notes
-----
Not a Number (NaN), positive infinity and negative infinity evaluate
to `True` because these are not equal to zero.
Examples
--------
>>> np.any([[True, False], [True, True]])
True
>>> np.any([[True, False], [False, False]], axis=0)
array([ True, False], dtype=bool)
>>> np.any([-1, 0, 5])
True
>>> np.any(np.nan)
True
>>> o=np.array([False])
>>> z=np.any([-1, 4, 5], out=o)
>>> z, o
(array([ True], dtype=bool), array([ True], dtype=bool))
>>> # Check now that z is a reference to o
>>> z is o
True
>>> id(z), id(o) # identity of z and o # doctest: +SKIP
(191614240, 191614240)
"""
arr = asanyarray(a)
kwargs = {}
if keepdims is not np._NoValue:
kwargs['keepdims'] = keepdims
return arr.any(axis=axis, out=out, **kwargs)
def all(a, axis=None, out=None, keepdims=np._NoValue):
"""
Test whether all array elements along a given axis evaluate to True.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
axis : None or int or tuple of ints, optional
Axis or axes along which a logical AND reduction is performed.
The default (`axis` = `None`) is to perform a logical AND over all
the dimensions of the input array. `axis` may be negative, in
which case it counts from the last to the first axis.
.. versionadded:: 1.7.0
If this is a tuple of ints, a reduction is performed on multiple
axes, instead of a single axis or all the axes as before.
out : ndarray, optional
Alternate output array in which to place the result.
It must have the same shape as the expected output and its
type is preserved (e.g., if ``dtype(out)`` is float, the result
will consist of 0.0's and 1.0's). See `doc.ufuncs` (Section
"Output arguments") for more details.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `all` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-classes `sum` method does not implement `keepdims` any
exceptions will be raised.
Returns
-------
all : ndarray, bool
A new boolean or array is returned unless `out` is specified,
in which case a reference to `out` is returned.
See Also
--------
ndarray.all : equivalent method
any : Test whether any element along a given axis evaluates to True.
Notes
-----
Not a Number (NaN), positive infinity and negative infinity
evaluate to `True` because these are not equal to zero.
Examples
--------
>>> np.all([[True,False],[True,True]])
False
>>> np.all([[True,False],[True,True]], axis=0)
array([ True, False], dtype=bool)
>>> np.all([-1, 4, 5])
True
>>> np.all([1.0, np.nan])
True
>>> o=np.array([False])
>>> z=np.all([-1, 4, 5], out=o)
>>> id(z), id(o), z # doctest: +SKIP
(28293632, 28293632, array([ True], dtype=bool))
"""
arr = asanyarray(a)
kwargs = {}
if keepdims is not np._NoValue:
kwargs['keepdims'] = keepdims
return arr.all(axis=axis, out=out, **kwargs)
def cumsum(a, axis=None, dtype=None, out=None):
"""
Return the cumulative sum of the elements along a given axis.
Parameters
----------
a : array_like
Input array.
axis : int, optional
Axis along which the cumulative sum is computed. The default
(None) is to compute the cumsum over the flattened array.
dtype : dtype, optional
Type of the returned array and of the accumulator in which the
elements are summed. If `dtype` is not specified, it defaults
to the dtype of `a`, unless `a` has an integer dtype with a
precision less than that of the default platform integer. In
that case, the default platform integer is used.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output
but the type will be cast if necessary. See `doc.ufuncs`
(Section "Output arguments") for more details.
Returns
-------
cumsum_along_axis : ndarray.
A new array holding the result is returned unless `out` is
specified, in which case a reference to `out` is returned. The
result has the same size as `a`, and the same shape as `a` if
`axis` is not None or `a` is a 1-d array.
See Also
--------
sum : Sum array elements.
trapz : Integration of array values using the composite trapezoidal rule.
diff : Calculate the n-th discrete difference along given axis.
Notes
-----
Arithmetic is modular when using integer types, and no error is
raised on overflow.
Examples
--------
>>> a = np.array([[1,2,3], [4,5,6]])
>>> a
array([[1, 2, 3],
[4, 5, 6]])
>>> np.cumsum(a)
array([ 1, 3, 6, 10, 15, 21])
>>> np.cumsum(a, dtype=float) # specifies type of output value(s)
array([ 1., 3., 6., 10., 15., 21.])
>>> np.cumsum(a,axis=0) # sum over rows for each of the 3 columns
array([[1, 2, 3],
[5, 7, 9]])
>>> np.cumsum(a,axis=1) # sum over columns for each of the 2 rows
array([[ 1, 3, 6],
[ 4, 9, 15]])
"""
return _wrapfunc(a, 'cumsum', axis=axis, dtype=dtype, out=out)
def cumproduct(a, axis=None, dtype=None, out=None):
"""
Return the cumulative product over the given axis.
See Also
--------
cumprod : equivalent function; see for details.
"""
return _wrapfunc(a, 'cumprod', axis=axis, dtype=dtype, out=out)
def ptp(a, axis=None, out=None):
"""
Range of values (maximum - minimum) along an axis.
The name of the function comes from the acronym for 'peak to peak'.
Parameters
----------
a : array_like
Input values.
axis : int, optional
Axis along which to find the peaks. By default, flatten the
array.
out : array_like
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output,
but the type of the output values will be cast if necessary.
Returns
-------
ptp : ndarray
A new array holding the result, unless `out` was
specified, in which case a reference to `out` is returned.
Examples
--------
>>> x = np.arange(4).reshape((2,2))
>>> x
array([[0, 1],
[2, 3]])
>>> np.ptp(x, axis=0)
array([2, 2])
>>> np.ptp(x, axis=1)
array([1, 1])
"""
return _wrapfunc(a, 'ptp', axis=axis, out=out)
def amax(a, axis=None, out=None, keepdims=np._NoValue):
"""
Return the maximum of an array or maximum along an axis.
Parameters
----------
a : array_like
Input data.
axis : None or int or tuple of ints, optional
Axis or axes along which to operate. By default, flattened input is
used.
.. versionadded:: 1.7.0
If this is a tuple of ints, the maximum is selected over multiple axes,
instead of a single axis or all the axes as before.
out : ndarray, optional
Alternative output array in which to place the result. Must
be of the same shape and buffer length as the expected output.
See `doc.ufuncs` (Section "Output arguments") for more details.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `amax` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-classes `sum` method does not implement `keepdims` any
exceptions will be raised.
Returns
-------
amax : ndarray or scalar
Maximum of `a`. If `axis` is None, the result is a scalar value.
If `axis` is given, the result is an array of dimension
``a.ndim - 1``.
See Also
--------
amin :
The minimum value of an array along a given axis, propagating any NaNs.
nanmax :
The maximum value of an array along a given axis, ignoring any NaNs.
maximum :
Element-wise maximum of two arrays, propagating any NaNs.
fmax :
Element-wise maximum of two arrays, ignoring any NaNs.
argmax :
Return the indices of the maximum values.
nanmin, minimum, fmin
Notes
-----
NaN values are propagated, that is if at least one item is NaN, the
corresponding max value will be NaN as well. To ignore NaN values
(MATLAB behavior), please use nanmax.
Don't use `amax` for element-wise comparison of 2 arrays; when
``a.shape[0]`` is 2, ``maximum(a[0], a[1])`` is faster than
``amax(a, axis=0)``.
Examples
--------
>>> a = np.arange(4).reshape((2,2))
>>> a
array([[0, 1],
[2, 3]])
>>> np.amax(a) # Maximum of the flattened array
3
>>> np.amax(a, axis=0) # Maxima along the first axis
array([2, 3])
>>> np.amax(a, axis=1) # Maxima along the second axis
array([1, 3])
>>> b = np.arange(5, dtype=np.float)
>>> b[2] = np.NaN
>>> np.amax(b)
nan
>>> np.nanmax(b)
4.0
"""
kwargs = {}
if keepdims is not np._NoValue:
kwargs['keepdims'] = keepdims
if type(a) is not mu.ndarray:
try:
amax = a.max
except AttributeError:
pass
else:
return amax(axis=axis, out=out, **kwargs)
return _methods._amax(a, axis=axis,
out=out, **kwargs)
def amin(a, axis=None, out=None, keepdims=np._NoValue):
"""
Return the minimum of an array or minimum along an axis.
Parameters
----------
a : array_like
Input data.
axis : None or int or tuple of ints, optional
Axis or axes along which to operate. By default, flattened input is
used.
.. versionadded:: 1.7.0
If this is a tuple of ints, the minimum is selected over multiple axes,
instead of a single axis or all the axes as before.
out : ndarray, optional
Alternative output array in which to place the result. Must
be of the same shape and buffer length as the expected output.
See `doc.ufuncs` (Section "Output arguments") for more details.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `amin` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-classes `sum` method does not implement `keepdims` any
exceptions will be raised.
Returns
-------
amin : ndarray or scalar
Minimum of `a`. If `axis` is None, the result is a scalar value.
If `axis` is given, the result is an array of dimension
``a.ndim - 1``.
See Also
--------
amax :
The maximum value of an array along a given axis, propagating any NaNs.
nanmin :
The minimum value of an array along a given axis, ignoring any NaNs.
minimum :
Element-wise minimum of two arrays, propagating any NaNs.
fmin :
Element-wise minimum of two arrays, ignoring any NaNs.
argmin :
Return the indices of the minimum values.
nanmax, maximum, fmax
Notes
-----
NaN values are propagated, that is if at least one item is NaN, the
corresponding min value will be NaN as well. To ignore NaN values
(MATLAB behavior), please use nanmin.
Don't use `amin` for element-wise comparison of 2 arrays; when
``a.shape[0]`` is 2, ``minimum(a[0], a[1])`` is faster than
``amin(a, axis=0)``.
Examples
--------
>>> a = np.arange(4).reshape((2,2))
>>> a
array([[0, 1],
[2, 3]])
>>> np.amin(a) # Minimum of the flattened array
0
>>> np.amin(a, axis=0) # Minima along the first axis
array([0, 1])
>>> np.amin(a, axis=1) # Minima along the second axis
array([0, 2])
>>> b = np.arange(5, dtype=np.float)
>>> b[2] = np.NaN
>>> np.amin(b)
nan
>>> np.nanmin(b)
0.0
"""
kwargs = {}
if keepdims is not np._NoValue:
kwargs['keepdims'] = keepdims
if type(a) is not mu.ndarray:
try:
amin = a.min
except AttributeError:
pass
else:
return amin(axis=axis, out=out, **kwargs)
return _methods._amin(a, axis=axis,
out=out, **kwargs)
def alen(a):
"""
Return the length of the first dimension of the input array.
Parameters
----------
a : array_like
Input array.
Returns
-------
alen : int
Length of the first dimension of `a`.
See Also
--------
shape, size
Examples
--------
>>> a = np.zeros((7,4,5))
>>> a.shape[0]
7
>>> np.alen(a)
7
"""
try:
return len(a)
except TypeError:
return len(array(a, ndmin=1))
def prod(a, axis=None, dtype=None, out=None, keepdims=np._NoValue):
"""
Return the product of array elements over a given axis.
Parameters
----------
a : array_like
Input data.
axis : None or int or tuple of ints, optional
Axis or axes along which a product is performed. The default,
axis=None, will calculate the product of all the elements in the
input array. If axis is negative it counts from the last to the
first axis.
.. versionadded:: 1.7.0
If axis is a tuple of ints, a product is performed on all of the
axes specified in the tuple instead of a single axis or all the
axes as before.
dtype : dtype, optional
The type of the returned array, as well as of the accumulator in
which the elements are multiplied. The dtype of `a` is used by
default unless `a` has an integer dtype of less precision than the
default platform integer. In that case, if `a` is signed then the
platform integer is used while if `a` is unsigned then an unsigned
integer of the same precision as the platform integer is used.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output, but the type of the output
values will be cast if necessary.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left in the
result as dimensions with size one. With this option, the result
will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `prod` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-classes `sum` method does not implement `keepdims` any
exceptions will be raised.
Returns
-------
product_along_axis : ndarray, see `dtype` parameter above.
An array shaped as `a` but with the specified axis removed.
Returns a reference to `out` if specified.
See Also
--------
ndarray.prod : equivalent method
numpy.doc.ufuncs : Section "Output arguments"
Notes
-----
Arithmetic is modular when using integer types, and no error is
raised on overflow. That means that, on a 32-bit platform:
>>> x = np.array([536870910, 536870910, 536870910, 536870910])
>>> np.prod(x) #random
16
The product of an empty array is the neutral element 1:
>>> np.prod([])
1.0
Examples
--------
By default, calculate the product of all elements:
>>> np.prod([1.,2.])
2.0
Even when the input array is two-dimensional:
>>> np.prod([[1.,2.],[3.,4.]])
24.0
But we can also specify the axis over which to multiply:
>>> np.prod([[1.,2.],[3.,4.]], axis=1)
array([ 2., 12.])
If the type of `x` is unsigned, then the output type is
the unsigned platform integer:
>>> x = np.array([1, 2, 3], dtype=np.uint8)
>>> np.prod(x).dtype == np.uint
True
If `x` is of a signed integer type, then the output type
is the default platform integer:
>>> x = np.array([1, 2, 3], dtype=np.int8)
>>> np.prod(x).dtype == np.int
True
"""
kwargs = {}
if keepdims is not np._NoValue:
kwargs['keepdims'] = keepdims
if type(a) is not mu.ndarray:
try:
prod = a.prod
except AttributeError:
pass
else:
return prod(axis=axis, dtype=dtype, out=out, **kwargs)
return _methods._prod(a, axis=axis, dtype=dtype,
out=out, **kwargs)
def cumprod(a, axis=None, dtype=None, out=None):
"""
Return the cumulative product of elements along a given axis.
Parameters
----------
a : array_like
Input array.
axis : int, optional
Axis along which the cumulative product is computed. By default
the input is flattened.
dtype : dtype, optional
Type of the returned array, as well as of the accumulator in which
the elements are multiplied. If *dtype* is not specified, it
defaults to the dtype of `a`, unless `a` has an integer dtype with
a precision less than that of the default platform integer. In
that case, the default platform integer is used instead.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output
but the type of the resulting values will be cast if necessary.
Returns
-------
cumprod : ndarray
A new array holding the result is returned unless `out` is
specified, in which case a reference to out is returned.
See Also
--------
numpy.doc.ufuncs : Section "Output arguments"
Notes
-----
Arithmetic is modular when using integer types, and no error is
raised on overflow.
Examples
--------
>>> a = np.array([1,2,3])
>>> np.cumprod(a) # intermediate results 1, 1*2
... # total product 1*2*3 = 6
array([1, 2, 6])
>>> a = np.array([[1, 2, 3], [4, 5, 6]])
>>> np.cumprod(a, dtype=float) # specify type of output
array([ 1., 2., 6., 24., 120., 720.])
The cumulative product for each column (i.e., over the rows) of `a`:
>>> np.cumprod(a, axis=0)
array([[ 1, 2, 3],
[ 4, 10, 18]])
The cumulative product for each row (i.e. over the columns) of `a`:
>>> np.cumprod(a,axis=1)
array([[ 1, 2, 6],
[ 4, 20, 120]])
"""
return _wrapfunc(a, 'cumprod', axis=axis, dtype=dtype, out=out)
def ndim(a):
"""
Return the number of dimensions of an array.
Parameters
----------
a : array_like
Input array. If it is not already an ndarray, a conversion is
attempted.
Returns
-------
number_of_dimensions : int
The number of dimensions in `a`. Scalars are zero-dimensional.
See Also
--------
ndarray.ndim : equivalent method
shape : dimensions of array
ndarray.shape : dimensions of array
Examples
--------
>>> np.ndim([[1,2,3],[4,5,6]])
2
>>> np.ndim(np.array([[1,2,3],[4,5,6]]))
2
>>> np.ndim(1)
0
"""
try:
return a.ndim
except AttributeError:
return asarray(a).ndim
def rank(a):
"""
Return the number of dimensions of an array.
If `a` is not already an array, a conversion is attempted.
Scalars are zero dimensional.
.. note::
This function is deprecated in NumPy 1.9 to avoid confusion with
`numpy.linalg.matrix_rank`. The ``ndim`` attribute or function
should be used instead.
Parameters
----------
a : array_like
Array whose number of dimensions is desired. If `a` is not an array,
a conversion is attempted.
Returns
-------
number_of_dimensions : int
The number of dimensions in the array.
See Also
--------
ndim : equivalent function
ndarray.ndim : equivalent property
shape : dimensions of array
ndarray.shape : dimensions of array
Notes
-----
In the old Numeric package, `rank` was the term used for the number of
dimensions, but in NumPy `ndim` is used instead.
Examples
--------
>>> np.rank([1,2,3])
1
>>> np.rank(np.array([[1,2,3],[4,5,6]]))
2
>>> np.rank(1)
0
"""
# 2014-04-12, 1.9
warnings.warn(
"`rank` is deprecated; use the `ndim` attribute or function instead. "
"To find the rank of a matrix see `numpy.linalg.matrix_rank`.",
VisibleDeprecationWarning, stacklevel=2)
try:
return a.ndim
except AttributeError:
return asarray(a).ndim
def size(a, axis=None):
"""
Return the number of elements along a given axis.
Parameters
----------
a : array_like
Input data.
axis : int, optional
Axis along which the elements are counted. By default, give
the total number of elements.
Returns
-------
element_count : int
Number of elements along the specified axis.
See Also
--------
shape : dimensions of array
ndarray.shape : dimensions of array
ndarray.size : number of elements in array
Examples
--------
>>> a = np.array([[1,2,3],[4,5,6]])
>>> np.size(a)
6
>>> np.size(a,1)
3
>>> np.size(a,0)
2
"""
if axis is None:
try:
return a.size
except AttributeError:
return asarray(a).size
else:
try:
return a.shape[axis]
except AttributeError:
return asarray(a).shape[axis]
def around(a, decimals=0, out=None):
"""
Evenly round to the given number of decimals.
Parameters
----------
a : array_like
Input data.
decimals : int, optional
Number of decimal places to round to (default: 0). If
decimals is negative, it specifies the number of positions to
the left of the decimal point.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output, but the type of the output
values will be cast if necessary. See `doc.ufuncs` (Section
"Output arguments") for details.
Returns
-------
rounded_array : ndarray
An array of the same type as `a`, containing the rounded values.
Unless `out` was specified, a new array is created. A reference to
the result is returned.
The real and imaginary parts of complex numbers are rounded
separately. The result of rounding a float is a float.
See Also
--------
ndarray.round : equivalent method
ceil, fix, floor, rint, trunc
Notes
-----
For values exactly halfway between rounded decimal values, NumPy
rounds to the nearest even value. Thus 1.5 and 2.5 round to 2.0,
-0.5 and 0.5 round to 0.0, etc. Results may also be surprising due
to the inexact representation of decimal fractions in the IEEE
floating point standard [1]_ and errors introduced when scaling
by powers of ten.
References
----------
.. [1] "Lecture Notes on the Status of IEEE 754", William Kahan,
http://www.cs.berkeley.edu/~wkahan/ieee754status/IEEE754.PDF
.. [2] "How Futile are Mindless Assessments of
Roundoff in Floating-Point Computation?", William Kahan,
http://www.cs.berkeley.edu/~wkahan/Mindless.pdf
Examples
--------
>>> np.around([0.37, 1.64])
array([ 0., 2.])
>>> np.around([0.37, 1.64], decimals=1)
array([ 0.4, 1.6])
>>> np.around([.5, 1.5, 2.5, 3.5, 4.5]) # rounds to nearest even value
array([ 0., 2., 2., 4., 4.])
>>> np.around([1,2,3,11], decimals=1) # ndarray of ints is returned
array([ 1, 2, 3, 11])
>>> np.around([1,2,3,11], decimals=-1)
array([ 0, 0, 0, 10])
"""
return _wrapfunc(a, 'round', decimals=decimals, out=out)
def round_(a, decimals=0, out=None):
"""
Round an array to the given number of decimals.
Refer to `around` for full documentation.
See Also
--------
around : equivalent function
"""
return around(a, decimals=decimals, out=out)
def mean(a, axis=None, dtype=None, out=None, keepdims=np._NoValue):
"""
Compute the arithmetic mean along the specified axis.
Returns the average of the array elements. The average is taken over
the flattened array by default, otherwise over the specified axis.
`float64` intermediate and return values are used for integer inputs.
Parameters
----------
a : array_like
Array containing numbers whose mean is desired. If `a` is not an
array, a conversion is attempted.
axis : None or int or tuple of ints, optional
Axis or axes along which the means are computed. The default is to
compute the mean of the flattened array.
.. versionadded:: 1.7.0
If this is a tuple of ints, a mean is performed over multiple axes,
instead of a single axis or all the axes as before.
dtype : data-type, optional
Type to use in computing the mean. For integer inputs, the default
is `float64`; for floating point inputs, it is the same as the
input dtype.
out : ndarray, optional
Alternate output array in which to place the result. The default
is ``None``; if provided, it must have the same shape as the
expected output, but the type will be cast if necessary.
See `doc.ufuncs` for details.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `mean` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-classes `sum` method does not implement `keepdims` any
exceptions will be raised.
Returns
-------
m : ndarray, see dtype parameter above
If `out=None`, returns a new array containing the mean values,
otherwise a reference to the output array is returned.
See Also
--------
average : Weighted average
std, var, nanmean, nanstd, nanvar
Notes
-----
The arithmetic mean is the sum of the elements along the axis divided
by the number of elements.
Note that for floating-point input, the mean is computed using the
same precision the input has. Depending on the input data, this can
cause the results to be inaccurate, especially for `float32` (see
example below). Specifying a higher-precision accumulator using the
`dtype` keyword can alleviate this issue.
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> np.mean(a)
2.5
>>> np.mean(a, axis=0)
array([ 2., 3.])
>>> np.mean(a, axis=1)
array([ 1.5, 3.5])
In single precision, `mean` can be inaccurate:
>>> a = np.zeros((2, 512*512), dtype=np.float32)
>>> a[0, :] = 1.0
>>> a[1, :] = 0.1
>>> np.mean(a)
0.54999924
Computing the mean in float64 is more accurate:
>>> np.mean(a, dtype=np.float64)
0.55000000074505806
"""
kwargs = {}
if keepdims is not np._NoValue:
kwargs['keepdims'] = keepdims
if type(a) is not mu.ndarray:
try:
mean = a.mean
except AttributeError:
pass
else:
return mean(axis=axis, dtype=dtype, out=out, **kwargs)
return _methods._mean(a, axis=axis, dtype=dtype,
out=out, **kwargs)
def std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue):
"""
Compute the standard deviation along the specified axis.
Returns the standard deviation, a measure of the spread of a distribution,
of the array elements. The standard deviation is computed for the
flattened array by default, otherwise over the specified axis.
Parameters
----------
a : array_like
Calculate the standard deviation of these values.
axis : None or int or tuple of ints, optional
Axis or axes along which the standard deviation is computed. The
default is to compute the standard deviation of the flattened array.
.. versionadded:: 1.7.0
If this is a tuple of ints, a standard deviation is performed over
multiple axes, instead of a single axis or all the axes as before.
dtype : dtype, optional
Type to use in computing the standard deviation. For arrays of
integer type the default is float64, for arrays of float types it is
the same as the array type.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output but the type (of the calculated
values) will be cast if necessary.
ddof : int, optional
Means Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.
By default `ddof` is zero.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `std` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-classes `sum` method does not implement `keepdims` any
exceptions will be raised.
Returns
-------
standard_deviation : ndarray, see dtype parameter above.
If `out` is None, return a new array containing the standard deviation,
otherwise return a reference to the output array.
See Also
--------
var, mean, nanmean, nanstd, nanvar
numpy.doc.ufuncs : Section "Output arguments"
Notes
-----
The standard deviation is the square root of the average of the squared
deviations from the mean, i.e., ``std = sqrt(mean(abs(x - x.mean())**2))``.
The average squared deviation is normally calculated as
``x.sum() / N``, where ``N = len(x)``. If, however, `ddof` is specified,
the divisor ``N - ddof`` is used instead. In standard statistical
practice, ``ddof=1`` provides an unbiased estimator of the variance
of the infinite population. ``ddof=0`` provides a maximum likelihood
estimate of the variance for normally distributed variables. The
standard deviation computed in this function is the square root of
the estimated variance, so even with ``ddof=1``, it will not be an
unbiased estimate of the standard deviation per se.
Note that, for complex numbers, `std` takes the absolute
value before squaring, so that the result is always real and nonnegative.
For floating-point input, the *std* is computed using the same
precision the input has. Depending on the input data, this can cause
the results to be inaccurate, especially for float32 (see example below).
Specifying a higher-accuracy accumulator using the `dtype` keyword can
alleviate this issue.
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> np.std(a)
1.1180339887498949
>>> np.std(a, axis=0)
array([ 1., 1.])
>>> np.std(a, axis=1)
array([ 0.5, 0.5])
In single precision, std() can be inaccurate:
>>> a = np.zeros((2, 512*512), dtype=np.float32)
>>> a[0, :] = 1.0
>>> a[1, :] = 0.1
>>> np.std(a)
0.45000005
Computing the standard deviation in float64 is more accurate:
>>> np.std(a, dtype=np.float64)
0.44999999925494177
"""
kwargs = {}
if keepdims is not np._NoValue:
kwargs['keepdims'] = keepdims
if type(a) is not mu.ndarray:
try:
std = a.std
except AttributeError:
pass
else:
return std(axis=axis, dtype=dtype, out=out, ddof=ddof, **kwargs)
return _methods._std(a, axis=axis, dtype=dtype, out=out, ddof=ddof,
**kwargs)
def var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue):
"""
Compute the variance along the specified axis.
Returns the variance of the array elements, a measure of the spread of a
distribution. The variance is computed for the flattened array by
default, otherwise over the specified axis.
Parameters
----------
a : array_like
Array containing numbers whose variance is desired. If `a` is not an
array, a conversion is attempted.
axis : None or int or tuple of ints, optional
Axis or axes along which the variance is computed. The default is to
compute the variance of the flattened array.
.. versionadded:: 1.7.0
If this is a tuple of ints, a variance is performed over multiple axes,
instead of a single axis or all the axes as before.
dtype : data-type, optional
Type to use in computing the variance. For arrays of integer type
the default is `float32`; for arrays of float types it is the same as
the array type.
out : ndarray, optional
Alternate output array in which to place the result. It must have
the same shape as the expected output, but the type is cast if
necessary.
ddof : int, optional
"Delta Degrees of Freedom": the divisor used in the calculation is
``N - ddof``, where ``N`` represents the number of elements. By
default `ddof` is zero.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `var` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-classes `sum` method does not implement `keepdims` any
exceptions will be raised.
Returns
-------
variance : ndarray, see dtype parameter above
If ``out=None``, returns a new array containing the variance;
otherwise, a reference to the output array is returned.
See Also
--------
std , mean, nanmean, nanstd, nanvar
numpy.doc.ufuncs : Section "Output arguments"
Notes
-----
The variance is the average of the squared deviations from the mean,
i.e., ``var = mean(abs(x - x.mean())**2)``.
The mean is normally calculated as ``x.sum() / N``, where ``N = len(x)``.
If, however, `ddof` is specified, the divisor ``N - ddof`` is used
instead. In standard statistical practice, ``ddof=1`` provides an
unbiased estimator of the variance of a hypothetical infinite population.
``ddof=0`` provides a maximum likelihood estimate of the variance for
normally distributed variables.
Note that for complex numbers, the absolute value is taken before
squaring, so that the result is always real and nonnegative.
For floating-point input, the variance is computed using the same
precision the input has. Depending on the input data, this can cause
the results to be inaccurate, especially for `float32` (see example
below). Specifying a higher-accuracy accumulator using the ``dtype``
keyword can alleviate this issue.
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> np.var(a)
1.25
>>> np.var(a, axis=0)
array([ 1., 1.])
>>> np.var(a, axis=1)
array([ 0.25, 0.25])
In single precision, var() can be inaccurate:
>>> a = np.zeros((2, 512*512), dtype=np.float32)
>>> a[0, :] = 1.0
>>> a[1, :] = 0.1
>>> np.var(a)
0.20250003
Computing the variance in float64 is more accurate:
>>> np.var(a, dtype=np.float64)
0.20249999932944759
>>> ((1-0.55)**2 + (0.1-0.55)**2)/2
0.2025
"""
kwargs = {}
if keepdims is not np._NoValue:
kwargs['keepdims'] = keepdims
if type(a) is not mu.ndarray:
try:
var = a.var
except AttributeError:
pass
else:
return var(axis=axis, dtype=dtype, out=out, ddof=ddof, **kwargs)
return _methods._var(a, axis=axis, dtype=dtype, out=out, ddof=ddof,
**kwargs)
| bsd-3-clause |
kushalbhola/MyStuff | Practice/PythonApplication/env/Lib/site-packages/pandas/tests/tools/test_numeric.py | 2 | 17980 | import decimal
import numpy as np
from numpy import iinfo
import pytest
import pandas as pd
from pandas import DataFrame, Index, Series, to_numeric
from pandas.util import testing as tm
@pytest.fixture(params=[None, "ignore", "raise", "coerce"])
def errors(request):
return request.param
@pytest.fixture(params=[True, False])
def signed(request):
return request.param
@pytest.fixture(params=[lambda x: x, str], ids=["identity", "str"])
def transform(request):
return request.param
@pytest.fixture(params=[47393996303418497800, 100000000000000000000])
def large_val(request):
return request.param
@pytest.fixture(params=[True, False])
def multiple_elts(request):
return request.param
@pytest.fixture(
params=[
(lambda x: Index(x, name="idx"), tm.assert_index_equal),
(lambda x: Series(x, name="ser"), tm.assert_series_equal),
(lambda x: np.array(Index(x).values), tm.assert_numpy_array_equal),
]
)
def transform_assert_equal(request):
return request.param
@pytest.mark.parametrize(
"input_kwargs,result_kwargs",
[
(dict(), dict(dtype=np.int64)),
(dict(errors="coerce", downcast="integer"), dict(dtype=np.int8)),
],
)
def test_empty(input_kwargs, result_kwargs):
# see gh-16302
ser = Series([], dtype=object)
result = to_numeric(ser, **input_kwargs)
expected = Series([], **result_kwargs)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("last_val", ["7", 7])
def test_series(last_val):
ser = Series(["1", "-3.14", last_val])
result = to_numeric(ser)
expected = Series([1, -3.14, 7])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"data",
[
[1, 3, 4, 5],
[1.0, 3.0, 4.0, 5.0],
# Bool is regarded as numeric.
[True, False, True, True],
],
)
def test_series_numeric(data):
ser = Series(data, index=list("ABCD"), name="EFG")
result = to_numeric(ser)
tm.assert_series_equal(result, ser)
@pytest.mark.parametrize(
"data,msg",
[
([1, -3.14, "apple"], 'Unable to parse string "apple" at position 2'),
(
["orange", 1, -3.14, "apple"],
'Unable to parse string "orange" at position 0',
),
],
)
def test_error(data, msg):
ser = Series(data)
with pytest.raises(ValueError, match=msg):
to_numeric(ser, errors="raise")
@pytest.mark.parametrize(
"errors,exp_data", [("ignore", [1, -3.14, "apple"]), ("coerce", [1, -3.14, np.nan])]
)
def test_ignore_error(errors, exp_data):
ser = Series([1, -3.14, "apple"])
result = to_numeric(ser, errors=errors)
expected = Series(exp_data)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"errors,exp",
[
("raise", 'Unable to parse string "apple" at position 2'),
("ignore", [True, False, "apple"]),
# Coerces to float.
("coerce", [1.0, 0.0, np.nan]),
],
)
def test_bool_handling(errors, exp):
ser = Series([True, False, "apple"])
if isinstance(exp, str):
with pytest.raises(ValueError, match=exp):
to_numeric(ser, errors=errors)
else:
result = to_numeric(ser, errors=errors)
expected = Series(exp)
tm.assert_series_equal(result, expected)
def test_list():
ser = ["1", "-3.14", "7"]
res = to_numeric(ser)
expected = np.array([1, -3.14, 7])
tm.assert_numpy_array_equal(res, expected)
@pytest.mark.parametrize(
"data,arr_kwargs",
[
([1, 3, 4, 5], dict(dtype=np.int64)),
([1.0, 3.0, 4.0, 5.0], dict()),
# Boolean is regarded as numeric.
([True, False, True, True], dict()),
],
)
def test_list_numeric(data, arr_kwargs):
result = to_numeric(data)
expected = np.array(data, **arr_kwargs)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("kwargs", [dict(dtype="O"), dict()])
def test_numeric(kwargs):
data = [1, -3.14, 7]
ser = Series(data, **kwargs)
result = to_numeric(ser)
expected = Series(data)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"columns",
[
# One column.
"a",
# Multiple columns.
["a", "b"],
],
)
def test_numeric_df_columns(columns):
# see gh-14827
df = DataFrame(
dict(
a=[1.2, decimal.Decimal(3.14), decimal.Decimal("infinity"), "0.1"],
b=[1.0, 2.0, 3.0, 4.0],
)
)
expected = DataFrame(dict(a=[1.2, 3.14, np.inf, 0.1], b=[1.0, 2.0, 3.0, 4.0]))
df_copy = df.copy()
df_copy[columns] = df_copy[columns].apply(to_numeric)
tm.assert_frame_equal(df_copy, expected)
@pytest.mark.parametrize(
"data,exp_data",
[
(
[[decimal.Decimal(3.14), 1.0], decimal.Decimal(1.6), 0.1],
[[3.14, 1.0], 1.6, 0.1],
),
([np.array([decimal.Decimal(3.14), 1.0]), 0.1], [[3.14, 1.0], 0.1]),
],
)
def test_numeric_embedded_arr_likes(data, exp_data):
# Test to_numeric with embedded lists and arrays
df = DataFrame(dict(a=data))
df["a"] = df["a"].apply(to_numeric)
expected = DataFrame(dict(a=exp_data))
tm.assert_frame_equal(df, expected)
def test_all_nan():
ser = Series(["a", "b", "c"])
result = to_numeric(ser, errors="coerce")
expected = Series([np.nan, np.nan, np.nan])
tm.assert_series_equal(result, expected)
def test_type_check(errors):
# see gh-11776
df = DataFrame({"a": [1, -3.14, 7], "b": ["4", "5", "6"]})
kwargs = dict(errors=errors) if errors is not None else dict()
error_ctx = pytest.raises(TypeError, match="1-d array")
with error_ctx:
to_numeric(df, **kwargs)
@pytest.mark.parametrize("val", [1, 1.1, 20001])
def test_scalar(val, signed, transform):
val = -val if signed else val
assert to_numeric(transform(val)) == float(val)
def test_really_large_scalar(large_val, signed, transform, errors):
# see gh-24910
kwargs = dict(errors=errors) if errors is not None else dict()
val = -large_val if signed else large_val
val = transform(val)
val_is_string = isinstance(val, str)
if val_is_string and errors in (None, "raise"):
msg = "Integer out of range. at position 0"
with pytest.raises(ValueError, match=msg):
to_numeric(val, **kwargs)
else:
expected = float(val) if (errors == "coerce" and val_is_string) else val
tm.assert_almost_equal(to_numeric(val, **kwargs), expected)
def test_really_large_in_arr(large_val, signed, transform, multiple_elts, errors):
# see gh-24910
kwargs = dict(errors=errors) if errors is not None else dict()
val = -large_val if signed else large_val
val = transform(val)
extra_elt = "string"
arr = [val] + multiple_elts * [extra_elt]
val_is_string = isinstance(val, str)
coercing = errors == "coerce"
if errors in (None, "raise") and (val_is_string or multiple_elts):
if val_is_string:
msg = "Integer out of range. at position 0"
else:
msg = 'Unable to parse string "string" at position 1'
with pytest.raises(ValueError, match=msg):
to_numeric(arr, **kwargs)
else:
result = to_numeric(arr, **kwargs)
exp_val = float(val) if (coercing and val_is_string) else val
expected = [exp_val]
if multiple_elts:
if coercing:
expected.append(np.nan)
exp_dtype = float
else:
expected.append(extra_elt)
exp_dtype = object
else:
exp_dtype = float if isinstance(exp_val, (int, float)) else object
tm.assert_almost_equal(result, np.array(expected, dtype=exp_dtype))
def test_really_large_in_arr_consistent(large_val, signed, multiple_elts, errors):
# see gh-24910
#
# Even if we discover that we have to hold float, does not mean
# we should be lenient on subsequent elements that fail to be integer.
kwargs = dict(errors=errors) if errors is not None else dict()
arr = [str(-large_val if signed else large_val)]
if multiple_elts:
arr.insert(0, large_val)
if errors in (None, "raise"):
index = int(multiple_elts)
msg = "Integer out of range. at position {index}".format(index=index)
with pytest.raises(ValueError, match=msg):
to_numeric(arr, **kwargs)
else:
result = to_numeric(arr, **kwargs)
if errors == "coerce":
expected = [float(i) for i in arr]
exp_dtype = float
else:
expected = arr
exp_dtype = object
tm.assert_almost_equal(result, np.array(expected, dtype=exp_dtype))
@pytest.mark.parametrize(
"errors,checker",
[
("raise", 'Unable to parse string "fail" at position 0'),
("ignore", lambda x: x == "fail"),
("coerce", lambda x: np.isnan(x)),
],
)
def test_scalar_fail(errors, checker):
scalar = "fail"
if isinstance(checker, str):
with pytest.raises(ValueError, match=checker):
to_numeric(scalar, errors=errors)
else:
assert checker(to_numeric(scalar, errors=errors))
@pytest.mark.parametrize("data", [[1, 2, 3], [1.0, np.nan, 3, np.nan]])
def test_numeric_dtypes(data, transform_assert_equal):
transform, assert_equal = transform_assert_equal
data = transform(data)
result = to_numeric(data)
assert_equal(result, data)
@pytest.mark.parametrize(
"data,exp",
[
(["1", "2", "3"], np.array([1, 2, 3], dtype="int64")),
(["1.5", "2.7", "3.4"], np.array([1.5, 2.7, 3.4])),
],
)
def test_str(data, exp, transform_assert_equal):
transform, assert_equal = transform_assert_equal
result = to_numeric(transform(data))
expected = transform(exp)
assert_equal(result, expected)
def test_datetime_like(tz_naive_fixture, transform_assert_equal):
transform, assert_equal = transform_assert_equal
idx = pd.date_range("20130101", periods=3, tz=tz_naive_fixture)
result = to_numeric(transform(idx))
expected = transform(idx.asi8)
assert_equal(result, expected)
def test_timedelta(transform_assert_equal):
transform, assert_equal = transform_assert_equal
idx = pd.timedelta_range("1 days", periods=3, freq="D")
result = to_numeric(transform(idx))
expected = transform(idx.asi8)
assert_equal(result, expected)
def test_period(transform_assert_equal):
transform, assert_equal = transform_assert_equal
idx = pd.period_range("2011-01", periods=3, freq="M", name="")
inp = transform(idx)
if isinstance(inp, Index):
result = to_numeric(inp)
expected = transform(idx.asi8)
assert_equal(result, expected)
else:
# TODO: PeriodDtype, so support it in to_numeric.
pytest.skip("Missing PeriodDtype support in to_numeric")
@pytest.mark.parametrize(
"errors,expected",
[
("raise", "Invalid object type at position 0"),
("ignore", Series([[10.0, 2], 1.0, "apple"])),
("coerce", Series([np.nan, 1.0, np.nan])),
],
)
def test_non_hashable(errors, expected):
# see gh-13324
ser = Series([[10.0, 2], 1.0, "apple"])
if isinstance(expected, str):
with pytest.raises(TypeError, match=expected):
to_numeric(ser, errors=errors)
else:
result = to_numeric(ser, errors=errors)
tm.assert_series_equal(result, expected)
def test_downcast_invalid_cast():
# see gh-13352
data = ["1", 2, 3]
invalid_downcast = "unsigned-integer"
msg = "invalid downcasting method provided"
with pytest.raises(ValueError, match=msg):
to_numeric(data, downcast=invalid_downcast)
def test_errors_invalid_value():
# see gh-26466
data = ["1", 2, 3]
invalid_error_value = "invalid"
msg = "invalid error value specified"
with pytest.raises(ValueError, match=msg):
to_numeric(data, errors=invalid_error_value)
@pytest.mark.parametrize(
"data",
[
["1", 2, 3],
[1, 2, 3],
np.array(["1970-01-02", "1970-01-03", "1970-01-04"], dtype="datetime64[D]"),
],
)
@pytest.mark.parametrize(
"kwargs,exp_dtype",
[
# Basic function tests.
(dict(), np.int64),
(dict(downcast=None), np.int64),
# Support below np.float32 is rare and far between.
(dict(downcast="float"), np.dtype(np.float32).char),
# Basic dtype support.
(dict(downcast="unsigned"), np.dtype(np.typecodes["UnsignedInteger"][0])),
],
)
def test_downcast_basic(data, kwargs, exp_dtype):
# see gh-13352
result = to_numeric(data, **kwargs)
expected = np.array([1, 2, 3], dtype=exp_dtype)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("signed_downcast", ["integer", "signed"])
@pytest.mark.parametrize(
"data",
[
["1", 2, 3],
[1, 2, 3],
np.array(["1970-01-02", "1970-01-03", "1970-01-04"], dtype="datetime64[D]"),
],
)
def test_signed_downcast(data, signed_downcast):
# see gh-13352
smallest_int_dtype = np.dtype(np.typecodes["Integer"][0])
expected = np.array([1, 2, 3], dtype=smallest_int_dtype)
res = to_numeric(data, downcast=signed_downcast)
tm.assert_numpy_array_equal(res, expected)
def test_ignore_downcast_invalid_data():
# If we can't successfully cast the given
# data to a numeric dtype, do not bother
# with the downcast parameter.
data = ["foo", 2, 3]
expected = np.array(data, dtype=object)
res = to_numeric(data, errors="ignore", downcast="unsigned")
tm.assert_numpy_array_equal(res, expected)
def test_ignore_downcast_neg_to_unsigned():
# Cannot cast to an unsigned integer
# because we have a negative number.
data = ["-1", 2, 3]
expected = np.array([-1, 2, 3], dtype=np.int64)
res = to_numeric(data, downcast="unsigned")
tm.assert_numpy_array_equal(res, expected)
@pytest.mark.parametrize("downcast", ["integer", "signed", "unsigned"])
@pytest.mark.parametrize(
"data,expected",
[
(["1.1", 2, 3], np.array([1.1, 2, 3], dtype=np.float64)),
(
[10000.0, 20000, 3000, 40000.36, 50000, 50000.00],
np.array(
[10000.0, 20000, 3000, 40000.36, 50000, 50000.00], dtype=np.float64
),
),
],
)
def test_ignore_downcast_cannot_convert_float(data, expected, downcast):
# Cannot cast to an integer (signed or unsigned)
# because we have a float number.
res = to_numeric(data, downcast=downcast)
tm.assert_numpy_array_equal(res, expected)
@pytest.mark.parametrize(
"downcast,expected_dtype",
[("integer", np.int16), ("signed", np.int16), ("unsigned", np.uint16)],
)
def test_downcast_not8bit(downcast, expected_dtype):
# the smallest integer dtype need not be np.(u)int8
data = ["256", 257, 258]
expected = np.array([256, 257, 258], dtype=expected_dtype)
res = to_numeric(data, downcast=downcast)
tm.assert_numpy_array_equal(res, expected)
@pytest.mark.parametrize(
"dtype,downcast,min_max",
[
("int8", "integer", [iinfo(np.int8).min, iinfo(np.int8).max]),
("int16", "integer", [iinfo(np.int16).min, iinfo(np.int16).max]),
("int32", "integer", [iinfo(np.int32).min, iinfo(np.int32).max]),
("int64", "integer", [iinfo(np.int64).min, iinfo(np.int64).max]),
("uint8", "unsigned", [iinfo(np.uint8).min, iinfo(np.uint8).max]),
("uint16", "unsigned", [iinfo(np.uint16).min, iinfo(np.uint16).max]),
("uint32", "unsigned", [iinfo(np.uint32).min, iinfo(np.uint32).max]),
("uint64", "unsigned", [iinfo(np.uint64).min, iinfo(np.uint64).max]),
("int16", "integer", [iinfo(np.int8).min, iinfo(np.int8).max + 1]),
("int32", "integer", [iinfo(np.int16).min, iinfo(np.int16).max + 1]),
("int64", "integer", [iinfo(np.int32).min, iinfo(np.int32).max + 1]),
("int16", "integer", [iinfo(np.int8).min - 1, iinfo(np.int16).max]),
("int32", "integer", [iinfo(np.int16).min - 1, iinfo(np.int32).max]),
("int64", "integer", [iinfo(np.int32).min - 1, iinfo(np.int64).max]),
("uint16", "unsigned", [iinfo(np.uint8).min, iinfo(np.uint8).max + 1]),
("uint32", "unsigned", [iinfo(np.uint16).min, iinfo(np.uint16).max + 1]),
("uint64", "unsigned", [iinfo(np.uint32).min, iinfo(np.uint32).max + 1]),
],
)
def test_downcast_limits(dtype, downcast, min_max):
# see gh-14404: test the limits of each downcast.
series = to_numeric(Series(min_max), downcast=downcast)
assert series.dtype == dtype
@pytest.mark.parametrize(
"data,exp_data",
[
(
[200, 300, "", "NaN", 30000000000000000000],
[200, 300, np.nan, np.nan, 30000000000000000000],
),
(
["12345678901234567890", "1234567890", "ITEM"],
[12345678901234567890, 1234567890, np.nan],
),
],
)
def test_coerce_uint64_conflict(data, exp_data):
# see gh-17007 and gh-17125
#
# Still returns float despite the uint64-nan conflict,
# which would normally force the casting to object.
result = to_numeric(Series(data), errors="coerce")
expected = Series(exp_data, dtype=float)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"errors,exp",
[
("ignore", Series(["12345678901234567890", "1234567890", "ITEM"])),
("raise", "Unable to parse string"),
],
)
def test_non_coerce_uint64_conflict(errors, exp):
# see gh-17007 and gh-17125
#
# For completeness.
ser = Series(["12345678901234567890", "1234567890", "ITEM"])
if isinstance(exp, str):
with pytest.raises(ValueError, match=exp):
to_numeric(ser, errors=errors)
else:
result = to_numeric(ser, errors=errors)
tm.assert_series_equal(result, ser)
| apache-2.0 |
mjabri/holoviews | holoviews/plotting/mpl/plot.py | 1 | 43893 | from __future__ import division
from collections import defaultdict
import numpy as np
import matplotlib as mpl
from mpl_toolkits.mplot3d import Axes3D # pyflakes:ignore (For 3D plots)
from matplotlib import pyplot as plt
from matplotlib import gridspec, animation
import param
from ...core import OrderedDict, HoloMap, AdjointLayout, NdLayout,\
GridSpace, Element, CompositeOverlay, Element3D, Empty, Collator
from ...core.options import Store, Compositor
from ...core import traversal
from ...core.util import int_to_roman,\
int_to_alpha, basestring
from ..plot import DimensionedPlot, GenericLayoutPlot, GenericCompositePlot
from .renderer import MPLRenderer
class MPLPlot(DimensionedPlot):
"""
An MPLPlot object draws a matplotlib figure object when called or
indexed but can also return a matplotlib animation object as
appropriate. MPLPlots take element objects such as Image, Contours
or Points as inputs and plots them in the appropriate format using
matplotlib. As HoloMaps are supported, all plots support animation
via the anim() method.
"""
renderer = MPLRenderer
sideplots = {}
fig_alpha = param.Number(default=1.0, bounds=(0, 1), doc="""
Alpha of the overall figure background.""")
fig_bounds = param.NumericTuple(default=(0.15, 0.15, 0.85, 0.85),
doc="""
The bounds of the overall figure as a 4-tuple of the form
(left, bottom, right, top), defining the size of the border
around the subplots.""")
fig_inches = param.Parameter(default=4, doc="""
The overall matplotlib figure size in inches. May be set as
an integer in which case it will be used to autocompute a
size. Alternatively may be set with an explicit tuple or list,
in which case it will be applied directly after being scaled
by fig_size. If either the width or height is set to None,
it will be computed automatically.""")
fig_latex = param.Boolean(default=False, doc="""
Whether to use LaTeX text in the overall figure.""")
fig_rcparams = param.Dict(default={}, doc="""
matplotlib rc parameters to apply to the overall figure.""")
fig_size = param.Integer(default=100, bounds=(1, None), doc="""
Size relative to the supplied overall fig_inches in percent.""")
finalize_hooks = param.HookList(default=[], doc="""
Optional list of hooks called when finalizing an axis.
The hook is passed the full set of plot handles and the
displayed object.""")
sublabel_format = param.String(default=None, allow_None=True, doc="""
Allows labeling the subaxes in each plot with various formatters
including {Alpha}, {alpha}, {numeric} and {roman}.""")
sublabel_position = param.NumericTuple(default=(-0.35, 0.85), doc="""
Position relative to the plot for placing the optional subfigure label.""")
sublabel_size = param.Number(default=18, doc="""
Size of optional subfigure label.""")
projection = param.ObjectSelector(default=None,
objects=['3d', 'polar', None], doc="""
The projection of the plot axis, default of None is equivalent to
2D plot, '3d' and 'polar' are also supported.""")
show_frame = param.Boolean(default=True, doc="""
Whether or not to show a complete frame around the plot.""")
_close_figures = True
def __init__(self, fig=None, axis=None, **params):
self._create_fig = True
super(MPLPlot, self).__init__(**params)
# List of handles to matplotlib objects for animation update
scale = self.fig_size/100.
if isinstance(self.fig_inches, (tuple, list)):
self.fig_inches = [None if i is None else i*scale
for i in self.fig_inches]
else:
self.fig_inches *= scale
fig, axis = self._init_axis(fig, axis)
self.handles['fig'] = fig
self.handles['axis'] = axis
def _init_axis(self, fig, axis):
"""
Return an axis which may need to be initialized from
a new figure.
"""
if not fig and self._create_fig:
rc_params = self.fig_rcparams
if self.fig_latex:
rc_params['text.usetex'] = True
with mpl.rc_context(rc=rc_params):
fig = plt.figure()
l, b, r, t = self.fig_bounds
inches = self.fig_inches
fig.subplots_adjust(left=l, bottom=b, right=r, top=t)
fig.patch.set_alpha(self.fig_alpha)
if isinstance(inches, (tuple, list)):
inches = list(inches)
if inches[0] is None:
inches[0] = inches[1]
elif inches[1] is None:
inches[1] = inches[0]
fig.set_size_inches(list(inches))
else:
fig.set_size_inches([inches, inches])
axis = fig.add_subplot(111, projection=self.projection)
axis.set_aspect('auto')
return fig, axis
def _subplot_label(self, axis):
layout_num = self.layout_num if self.subplot else 1
if self.sublabel_format and not self.adjoined and layout_num > 0:
from mpl_toolkits.axes_grid1.anchored_artists import AnchoredText
labels = {}
if '{Alpha}' in self.sublabel_format:
labels['Alpha'] = int_to_alpha(layout_num-1)
elif '{alpha}' in self.sublabel_format:
labels['alpha'] = int_to_alpha(layout_num-1, upper=False)
elif '{numeric}' in self.sublabel_format:
labels['numeric'] = self.layout_num
elif '{Roman}' in self.sublabel_format:
labels['Roman'] = int_to_roman(layout_num)
elif '{roman}' in self.sublabel_format:
labels['roman'] = int_to_roman(layout_num).lower()
at = AnchoredText(self.sublabel_format.format(**labels), loc=3,
bbox_to_anchor=self.sublabel_position, frameon=False,
prop=dict(size=self.sublabel_size, weight='bold'),
bbox_transform=axis.transAxes)
at.patch.set_visible(False)
axis.add_artist(at)
def _finalize_axis(self, key):
"""
General method to finalize the axis and plot.
"""
if 'title' in self.handles:
self.handles['title'].set_visible(self.show_title)
self.drawn = True
if self.subplot:
return self.handles['axis']
else:
fig = self.handles['fig']
if self._close_figures: plt.close(fig)
return fig
@property
def state(self):
return self.handles['fig']
def anim(self, start=0, stop=None, fps=30):
"""
Method to return a matplotlib animation. The start and stop
frames may be specified as well as the fps.
"""
figure = self.initialize_plot()
anim = animation.FuncAnimation(figure, self.update_frame,
frames=self.keys,
interval = 1000.0/fps)
# Close the figure handle
if self._close_figures: plt.close(figure)
return anim
def update(self, key):
rc_params = self.fig_rcparams
if self.fig_latex:
rc_params['text.usetex'] = True
mpl.rcParams.update(rc_params)
if len(self) == 1 and key == 0 and not self.drawn:
return self.initialize_plot()
return self.__getitem__(key)
class CompositePlot(GenericCompositePlot, MPLPlot):
"""
CompositePlot provides a baseclass for plots coordinate multiple
subplots to form a Layout.
"""
def update_frame(self, key, ranges=None):
ranges = self.compute_ranges(self.layout, key, ranges)
for subplot in self.subplots.values():
subplot.update_frame(key, ranges=ranges)
axis = self.handles['axis']
self.update_handles(axis, self.layout, key, ranges)
class GridPlot(CompositePlot):
"""
Plot a group of elements in a grid layout based on a GridSpace element
object.
"""
aspect = param.Parameter(default='equal', doc="""
Aspect ratios on GridPlot should be automatically determined.""")
padding = param.Number(default=0.1, doc="""
The amount of padding as a fraction of the total Grid size""")
shared_xaxis = param.Boolean(default=False, doc="""
If enabled the x-axes of the GridSpace will be drawn from the
objects inside the Grid rather than the GridSpace dimensions.""")
shared_yaxis = param.Boolean(default=False, doc="""
If enabled the x-axes of the GridSpace will be drawn from the
objects inside the Grid rather than the GridSpace dimensions.""")
show_frame = param.Boolean(default=False, doc="""
Whether to draw a frame around the Grid.""")
show_legend = param.Boolean(default=False, doc="""
Legends add to much clutter in a grid and are disabled by default.""")
tick_format = param.String(default="%.2f", doc="""
Formatting string for the GridPlot ticklabels.""")
xaxis = param.ObjectSelector(default='bottom',
objects=['bottom', 'top', None], doc="""
Whether and where to display the xaxis, supported options are
'bottom', 'top' and None.""")
yaxis = param.ObjectSelector(default='left',
objects=['left', 'right', None], doc="""
Whether and where to display the yaxis, supported options are
'left', 'right' and None.""")
xrotation = param.Integer(default=0, bounds=(0, 360), doc="""
Rotation angle of the xticks.""")
yrotation = param.Integer(default=0, bounds=(0, 360), doc="""
Rotation angle of the xticks.""")
def __init__(self, layout, axis=None, create_axes=True, ranges=None,
keys=None, dimensions=None, layout_num=1, **params):
if not isinstance(layout, GridSpace):
raise Exception("GridPlot only accepts GridSpace.")
self.layout = layout
self.cols, self.rows = layout.shape
self.layout_num = layout_num
extra_opts = self.lookup_options(layout, 'plot').options
if not keys or not dimensions:
dimensions, keys = traversal.unique_dimkeys(layout)
if 'uniform' not in params:
params['uniform'] = traversal.uniform(layout)
super(GridPlot, self).__init__(keys=keys, dimensions=dimensions,
**dict(extra_opts, **params))
# Compute ranges layoutwise
grid_kwargs = {}
if axis is not None:
bbox = axis.get_position()
l, b, w, h = bbox.x0, bbox.y0, bbox.width, bbox.height
grid_kwargs = {'left': l, 'right': l+w, 'bottom': b, 'top': b+h}
self.position = (l, b, w, h)
self.fig_inches = self._get_size()
self._layoutspec = gridspec.GridSpec(self.rows, self.cols, **grid_kwargs)
self.subplots, self.subaxes, self.layout = self._create_subplots(layout, axis, ranges, create_axes)
def _get_size(self):
max_dim = max(self.layout.shape)
# Reduce plot size as GridSpace gets larger
shape_factor = 1. / max_dim
# Expand small grids to a sensible viewing size
expand_factor = 1 + (max_dim - 1) * 0.1
scale_factor = expand_factor * shape_factor
cols, rows = self.layout.shape
if isinstance(self.fig_inches, (tuple, list)):
fig_inches = list(self.fig_inches)
if fig_inches[0] is None:
fig_inches[0] = fig_inches[1] * (cols/rows)
if fig_inches[1] is None:
fig_inches[1] = fig_inches[0] * (rows/cols)
return fig_inches
else:
fig_inches = (self.fig_inches,)*2
return (scale_factor * cols * fig_inches[0],
scale_factor * rows * fig_inches[1])
def _create_subplots(self, layout, axis, ranges, create_axes):
layout = layout.map(Compositor.collapse_element, [CompositeOverlay],
clone=False)
norm_opts = self._deep_options(layout, 'norm', ['axiswise'], [Element])
axiswise = all(v.get('axiswise', False) for v in norm_opts.values())
if not ranges:
self.handles['fig'].set_size_inches(self.fig_inches)
subplots, subaxes = OrderedDict(), OrderedDict()
frame_ranges = self.compute_ranges(layout, None, ranges)
frame_ranges = OrderedDict([(key, self.compute_ranges(layout, key, frame_ranges))
for key in self.keys])
collapsed_layout = layout.clone(shared_data=False, id=layout.id)
r, c = (0, 0)
for coord in layout.keys(full_grid=True):
if not isinstance(coord, tuple): coord = (coord,)
view = layout.data.get(coord, None)
# Create subplot
if view is not None:
vtype = view.type if isinstance(view, HoloMap) else view.__class__
opts = self.lookup_options(view, 'plot').options
# Create axes
kwargs = {}
if create_axes:
threed = issubclass(vtype, Element3D)
subax = plt.subplot(self._layoutspec[r, c],
projection='3d' if threed else None)
if not axiswise and self.shared_xaxis and self.xaxis is not None:
self.xaxis = 'top'
if not axiswise and self.shared_yaxis and self.yaxis is not None:
self.yaxis = 'right'
# Disable subplot axes depending on shared axis options
# and the position in the grid
if (self.shared_xaxis or self.shared_yaxis) and not axiswise:
if c == 0 and r != 0:
subax.xaxis.set_ticks_position('none')
kwargs['xaxis'] = 'bottom-bare'
if c != 0 and r == 0 and not layout.ndims == 1:
subax.yaxis.set_ticks_position('none')
kwargs['yaxis'] = 'left-bare'
if r != 0 and c != 0:
kwargs['xaxis'] = 'bottom-bare'
kwargs['yaxis'] = 'left-bare'
if not self.shared_xaxis:
kwargs['xaxis'] = 'bottom-bare'
if not self.shared_yaxis:
kwargs['yaxis'] = 'left-bare'
else:
kwargs['xaxis'] = 'bottom-bare'
kwargs['yaxis'] = 'left-bare'
subaxes[(r, c)] = subax
else:
subax = None
# Create subplot
if view is not None:
plotting_class = Store.registry['matplotlib'][vtype]
subplot = plotting_class(view, fig=self.handles['fig'], axis=subax,
dimensions=self.dimensions, show_title=False,
subplot=not create_axes, ranges=frame_ranges,
uniform=self.uniform, keys=self.keys,
show_legend=False, **dict(opts, **kwargs))
collapsed_layout[coord] = subplot.layout if isinstance(subplot, CompositePlot) else subplot.hmap
subplots[(r, c)] = subplot
else:
subax.set_visible(False)
if r != self.rows-1:
r += 1
else:
r = 0
c += 1
if create_axes:
self.handles['axis'] = self._layout_axis(layout, axis)
self._adjust_subplots(self.handles['axis'], subaxes)
return subplots, subaxes, collapsed_layout
def initialize_plot(self, ranges=None):
# Get the extent of the layout elements (not the whole layout)
key = self.keys[-1]
axis = self.handles['axis']
subplot_kwargs = dict()
ranges = self.compute_ranges(self.layout, key, ranges)
for subplot in self.subplots.values():
subplot.initialize_plot(ranges=ranges, **subplot_kwargs)
if self.show_title:
title = axis.set_title(self._format_title(key),
**self._fontsize('title'))
self.handles['title'] = title
self._readjust_axes(axis)
self.drawn = True
if self.subplot: return self.handles['axis']
if self._close_figures: plt.close(self.handles['fig'])
return self.handles['fig']
def _readjust_axes(self, axis):
if self.subplot:
axis.set_position(self.position)
if self.aspect == 'equal':
axis.set_aspect(float(self.rows)/self.cols)
self.handles['fig'].canvas.draw()
self._adjust_subplots(self.handles['axis'], self.subaxes)
def update_handles(self, axis, view, key, ranges=None):
"""
Should be called by the update_frame class to update
any handles on the plot.
"""
if self.show_title:
title = axis.set_title(self._format_title(key),
**self._fontsize('title'))
self.handles['title'] = title
def _layout_axis(self, layout, axis):
fig = self.handles['fig']
axkwargs = {'gid': str(self.position)} if axis else {}
layout_axis = fig.add_subplot(1,1,1, **axkwargs)
if axis:
axis.set_visible(False)
layout_axis.set_position(self.position)
layout_axis.patch.set_visible(False)
tick_fontsize = self._fontsize('ticks','labelsize',common=False)
if tick_fontsize: layout_axis.tick_params(**tick_fontsize)
# Set labels
layout_axis.set_xlabel(str(layout.kdims[0]),
**self._fontsize('xlabel'))
if layout.ndims == 2:
layout_axis.set_ylabel(str(layout.kdims[1]),
**self._fontsize('ylabel'))
# Compute and set x- and y-ticks
dims = layout.kdims
keys = layout.keys()
if layout.ndims == 1:
dim1_keys = keys
dim2_keys = [0]
layout_axis.get_yaxis().set_visible(False)
else:
dim1_keys, dim2_keys = zip(*keys)
layout_axis.set_ylabel(str(dims[1]))
layout_axis.set_aspect(float(self.rows)/self.cols)
# Process ticks
plot_width = (1.0 - self.padding) / self.cols
border_width = self.padding / (self.cols-1)
xticks = [(plot_width/2)+(r*(plot_width+border_width)) for r in range(self.cols)]
plot_height = (1.0 - self.padding) / self.rows
border_height = self.padding / (self.rows-1) if layout.ndims > 1 else 0
yticks = [(plot_height/2)+(r*(plot_height+border_height)) for r in range(self.rows)]
layout_axis.set_xticks(xticks)
layout_axis.set_xticklabels(self._process_ticklabels(sorted(set(dim1_keys)), dims[0]))
for tick in layout_axis.get_xticklabels():
tick.set_rotation(self.xrotation)
ydim = dims[1] if layout.ndims > 1 else None
layout_axis.set_yticks(yticks)
layout_axis.set_yticklabels(self._process_ticklabels(sorted(set(dim2_keys)), ydim))
for tick in layout_axis.get_yticklabels():
tick.set_rotation(self.yrotation)
if not self.show_frame:
layout_axis.spines['right' if self.yaxis == 'left' else 'left'].set_visible(False)
layout_axis.spines['bottom' if self.xaxis == 'top' else 'top'].set_visible(False)
axis = layout_axis
if self.xaxis is not None:
axis.xaxis.set_ticks_position(self.xaxis)
axis.xaxis.set_label_position(self.xaxis)
else:
axis.xaxis.set_visible(False)
if self.yaxis is not None:
axis.yaxis.set_ticks_position(self.yaxis)
axis.yaxis.set_label_position(self.yaxis)
else:
axis.yaxis.set_visible(False)
for pos in ['left', 'right', 'top', 'bottom']:
axis.spines[pos].set_visible(False)
return layout_axis
def _process_ticklabels(self, labels, dim):
formatted_labels = []
for k in labels:
if dim and dim.formatter:
k = dim.formatter(k)
elif not isinstance(k, (str, type(None))):
k = self.tick_format % k
elif k is None:
k = ''
formatted_labels.append(k)
return formatted_labels
def _adjust_subplots(self, axis, subaxes):
bbox = axis.get_position()
l, b, w, h = bbox.x0, bbox.y0, bbox.width, bbox.height
if self.padding:
width_padding = w/(1./self.padding)
height_padding = h/(1./self.padding)
else:
width_padding, height_padding = 0, 0
if self.cols == 1:
b_w = 0
else:
b_w = width_padding / (self.cols - 1)
if self.rows == 1:
b_h = 0
else:
b_h = height_padding / (self.rows - 1)
ax_w = (w - (width_padding if self.cols > 1 else 0)) / self.cols
ax_h = (h - (height_padding if self.rows > 1 else 0)) / self.rows
r, c = (0, 0)
for ax in subaxes.values():
xpos = l + (c*ax_w) + (c * b_w)
ypos = b + (r*ax_h) + (r * b_h)
if r != self.rows-1:
r += 1
else:
r = 0
c += 1
if not ax is None:
ax.set_position([xpos, ypos, ax_w, ax_h])
class AdjointLayoutPlot(CompositePlot):
"""
LayoutPlot allows placing up to three Views in a number of
predefined and fixed layouts, which are defined by the layout_dict
class attribute. This allows placing subviews next to a main plot
in either a 'top' or 'right' position.
Initially, a LayoutPlot computes an appropriate layout based for
the number of Views in the AdjointLayout object it has been given, but
when embedded in a NdLayout, it can recompute the layout to
match the number of rows and columns as part of a larger grid.
"""
layout_dict = {'Single': {'width_ratios': [4],
'height_ratios': [4],
'positions': ['main']},
'Dual': {'width_ratios': [4, 1],
'height_ratios': [4],
'positions': ['main', 'right']},
'Triple': {'width_ratios': [4, 1],
'height_ratios': [1, 4],
'positions': ['top', None,
'main', 'right']},
'Embedded Dual': {'width_ratios': [4],
'height_ratios': [1, 4],
'positions': [None, 'main']}}
border_size = param.Number(default=0.25, doc="""
The size of the border expressed as a fraction of the main plot.""")
subplot_size = param.Number(default=0.25, doc="""
The size subplots as expressed as a fraction of the main plot.""")
def __init__(self, layout, layout_type, subaxes, subplots, **params):
# The AdjointLayout ViewableElement object
self.layout = layout
# Type may be set to 'Embedded Dual' by a call it grid_situate
self.layout_type = layout_type
self.view_positions = self.layout_dict[self.layout_type]['positions']
# The supplied (axes, view) objects as indexed by position
self.subaxes = {pos: ax for ax, pos in zip(subaxes, self.view_positions)}
super(AdjointLayoutPlot, self).__init__(subplots=subplots, **params)
def initialize_plot(self, ranges=None):
"""
Plot all the views contained in the AdjointLayout Object using axes
appropriate to the layout configuration. All the axes are
supplied by LayoutPlot - the purpose of the call is to
invoke subplots with correct options and styles and hide any
empty axes as necessary.
"""
for pos in self.view_positions:
# Pos will be one of 'main', 'top' or 'right' or None
view = self.layout.get(pos, None)
subplot = self.subplots.get(pos, None)
ax = self.subaxes.get(pos, None)
# If no view object or empty position, disable the axis
if None in [view, pos, subplot]:
ax.set_axis_off()
continue
subplot.initialize_plot(ranges=ranges)
self.adjust_positions()
self.drawn = True
def adjust_positions(self):
"""
Make adjustments to the positions of subplots (if available)
relative to the main plot axes as required.
This method is called by LayoutPlot after an initial pass
used to position all the Layouts together. This method allows
LayoutPlots to make final adjustments to the axis positions.
"""
checks = [self.view_positions, self.subaxes, self.subplots]
right = all('right' in check for check in checks)
top = all('top' in check for check in checks)
if not 'main' in self.subplots or not (top or right):
return
self.handles['fig'].canvas.draw()
main_ax = self.subplots['main'].handles['axis']
bbox = main_ax.get_position()
if right:
ax = self.subaxes['right']
subplot = self.subplots['right']
ax.set_position([bbox.x1 + bbox.width * self.border_size,
bbox.y0,
bbox.width * self.subplot_size, bbox.height])
if isinstance(subplot, GridPlot):
ax.set_aspect('equal')
if top:
ax = self.subaxes['top']
subplot = self.subplots['top']
ax.set_position([bbox.x0,
bbox.y1 + bbox.height * self.border_size,
bbox.width, bbox.height * self.subplot_size])
if isinstance(subplot, GridPlot):
ax.set_aspect('equal')
def update_frame(self, key, ranges=None):
for pos in self.view_positions:
subplot = self.subplots.get(pos)
if subplot is not None:
subplot.update_frame(key, ranges)
def __len__(self):
return max([1 if self.keys is None else len(self.keys), 1])
class LayoutPlot(GenericLayoutPlot, CompositePlot):
"""
A LayoutPlot accepts either a Layout or a NdLayout and
displays the elements in a cartesian grid in scanline order.
"""
aspect_weight = param.Number(default=0, doc="""
Weighting of the individual aspects when computing the Layout
grid aspects and overall figure size.""")
fig_bounds = param.NumericTuple(default=(0.05, 0.05, 0.95, 0.95), doc="""
The bounds of the figure as a 4-tuple of the form
(left, bottom, right, top), defining the size of the border
around the subplots.""")
tight = param.Boolean(default=False, doc="""
Tightly fit the axes in the layout within the fig_bounds
and tight_padding.""")
tight_padding = param.Parameter(default=3, doc="""
Integer or tuple specifying the padding in inches in a tight layout.""")
hspace = param.Number(default=0.5, doc="""
Specifies the space between horizontally adjacent elements in the grid.
Default value is set conservatively to avoid overlap of subplots.""")
vspace = param.Number(default=0.1, doc="""
Specifies the space between vertically adjacent elements in the grid.
Default value is set conservatively to avoid overlap of subplots.""")
fontsize = param.Parameter(default={'title':16}, allow_None=True)
def __init__(self, layout, **params):
super(LayoutPlot, self).__init__(layout=layout, **params)
self.subplots, self.subaxes, self.layout = self._compute_gridspec(layout)
def _compute_gridspec(self, layout):
"""
Computes the tallest and widest cell for each row and column
by examining the Layouts in the GridSpace. The GridSpec is then
instantiated and the LayoutPlots are configured with the
appropriate embedded layout_types. The first element of the
returned tuple is a dictionary of all the LayoutPlots indexed
by row and column. The second dictionary in the tuple supplies
the grid indicies needed to instantiate the axes for each
LayoutPlot.
"""
layout_items = layout.grid_items()
layout_dimensions = layout.kdims if isinstance(layout, NdLayout) else None
layouts = {}
row_heightratios, col_widthratios = {}, {}
col_aspects, row_aspects = defaultdict(lambda: [0, 0]), defaultdict(lambda: [0, 0])
for (r, c) in self.coords:
# Get view at layout position and wrap in AdjointLayout
_, view = layout_items.get((r, c), (None, None))
layout_view = view if isinstance(view, AdjointLayout) else AdjointLayout([view])
layouts[(r, c)] = layout_view
# Compute shape of AdjointLayout element
layout_lens = {1:'Single', 2:'Dual', 3:'Triple'}
layout_type = layout_lens[len(layout_view)]
hidx = 0
# Get aspects
main = layout_view.main
main = main.last if isinstance(main, HoloMap) else main
main_options = self.lookup_options(main, 'plot').options if main else {}
if main and not isinstance(main_options.get('aspect', 1), basestring):
main_aspect = main_options.get('aspect', 1)
main_aspect = self.aspect_weight*main_aspect + 1-self.aspect_weight
else:
main_aspect = 1
if layout_type == 'Triple':
row_aspect = [0.25, 1./main_aspect]
else:
row_aspect = [1./main_aspect, 0]
if layout_type in ['Dual', 'Triple']:
col_aspect = [main_aspect, 0.25]
else:
col_aspect = [main_aspect, 0]
# Compute width and height ratios
width_ratios = AdjointLayoutPlot.layout_dict[layout_type]['width_ratios'][:]
height_ratios = AdjointLayoutPlot.layout_dict[layout_type]['height_ratios'][:]
if not isinstance(main_aspect, (basestring, type(None))):
width_ratios[0] = (width_ratios[0] * main_aspect)
height_ratios[0] = (height_ratios[hidx] * 1./main_aspect)
layout_shape = (len(width_ratios), len(height_ratios))
# For each row and column record the width and height ratios
# of the LayoutPlot with the most horizontal or vertical splits
# and largest aspect
if layout_shape[1] > row_heightratios.get(r, (0, None))[0]:
row_heightratios[r] = [layout_shape[1], height_ratios]
if height_ratios[hidx] > row_heightratios[r][1][hidx]:
row_heightratios[r][1][hidx] = height_ratios[hidx]
if layout_shape[0] > col_widthratios.get(c, (0, None))[0]:
col_widthratios[c] = (layout_shape[0], width_ratios)
if width_ratios[0] > col_widthratios[c][1][0]:
col_widthratios[c][1][0] = width_ratios[0]
for i in range(2):
if col_aspect[i] > col_aspects.get(c, [0,0])[i]:
col_aspects[c][i] = col_aspect[i]
if row_aspect[i] > row_aspects.get(r, [0,0])[i]:
row_aspects[r][i] = row_aspect[i]
# In order of row/column collect the largest width and height ratios
height_ratios = [v[1] for k, v in sorted(row_heightratios.items())]
width_ratios = [v[1] for k, v in sorted(col_widthratios.items())]
col_aspect_ratios = [v for k, v in sorted(col_aspects.items())]
row_aspect_ratios = [v for k, v in sorted(row_aspects.items())]
# Compute the number of rows and cols
cols = np.sum([len(wr) for wr in width_ratios])
rows = np.sum([len(hr) for hr in height_ratios])
# Flatten the width and height ratio lists
wr_list = [wr for wrs in width_ratios for wr in wrs]
hr_list = [hr for hrs in height_ratios for hr in hrs]
# Compute and set the plot size if not explicitly supplied
col_ars = [ar for ars in col_aspect_ratios for ar in ars]
row_ars = [ar for ars in row_aspect_ratios for ar in ars]
width = len(col_ars[::2]) + sum(col_ars[1::2])
yscale = sum(col_ars)/sum(row_ars)
xinches, yinches = None, None
if not isinstance(self.fig_inches, (tuple, list)):
xinches = self.fig_inches * width
yinches = xinches/yscale
elif self.fig_inches[0] is None:
xinches = self.fig_inches[1] * yscale
yinches = self.fig_inches[1]
elif self.fig_inches[1] is None:
xinches = self.fig_inches[0]
yinches = self.fig_inches[0] / yscale
if xinches and yinches:
self.handles['fig'].set_size_inches([xinches, yinches])
self.gs = gridspec.GridSpec(rows, cols,
width_ratios=wr_list,
height_ratios=hr_list,
wspace=self.hspace,
hspace=self.vspace)
# Situate all the Layouts in the grid and compute the gridspec
# indices for all the axes required by each LayoutPlot.
gidx = 0
layout_count = 0
tight = self.tight
collapsed_layout = layout.clone(shared_data=False, id=layout.id)
frame_ranges = self.compute_ranges(layout, None, None)
frame_ranges = OrderedDict([(key, self.compute_ranges(layout, key, frame_ranges))
for key in self.keys])
layout_subplots, layout_axes = {}, {}
for r, c in self.coords:
# Compute the layout type from shape
wsplits = len(width_ratios[c])
hsplits = len(height_ratios[r])
if (wsplits, hsplits) == (1,1):
layout_type = 'Single'
elif (wsplits, hsplits) == (2,1):
layout_type = 'Dual'
elif (wsplits, hsplits) == (1,2):
layout_type = 'Embedded Dual'
elif (wsplits, hsplits) == (2,2):
layout_type = 'Triple'
# Get the AdjoinLayout at the specified coordinate
view = layouts[(r, c)]
positions = AdjointLayoutPlot.layout_dict[layout_type]['positions']
# Create temporary subplots to get projections types
# to create the correct subaxes for all plots in the layout
_, _, projs = self._create_subplots(layouts[(r, c)], positions,
None, frame_ranges, create=False)
gidx, gsinds = self.grid_situate(gidx, layout_type, cols)
layout_key, _ = layout_items.get((r, c), (None, None))
if isinstance(layout, NdLayout) and layout_key:
layout_dimensions = OrderedDict(zip(layout_dimensions, layout_key))
# Generate the axes and create the subplots with the appropriate
# axis objects, handling any Empty objects.
obj = layouts[(r, c)]
empty = isinstance(obj.main, Empty)
if empty:
obj = AdjointLayout([])
else:
layout_count += 1
subaxes = [plt.subplot(self.gs[ind], projection=proj)
for ind, proj in zip(gsinds, projs)]
subplot_data = self._create_subplots(obj, positions,
layout_dimensions, frame_ranges,
dict(zip(positions, subaxes)),
num=0 if empty else layout_count)
subplots, adjoint_layout, _ = subplot_data
layout_axes[(r, c)] = subaxes
# Generate the AdjointLayoutsPlot which will coordinate
# plotting of AdjointLayouts in the larger grid
plotopts = self.lookup_options(view, 'plot').options
layout_plot = AdjointLayoutPlot(adjoint_layout, layout_type, subaxes, subplots,
fig=self.handles['fig'], **plotopts)
layout_subplots[(r, c)] = layout_plot
tight = not any(type(p) is GridPlot for p in layout_plot.subplots.values()) and tight
if layout_key:
collapsed_layout[layout_key] = adjoint_layout
# Apply tight layout if enabled and incompatible
# GridPlot isn't present.
if tight:
if isinstance(self.tight_padding, (tuple, list)):
wpad, hpad = self.tight_padding
padding = dict(w_pad=wpad, h_pad=hpad)
else:
padding = dict(w_pad=self.tight_padding, h_pad=self.tight_padding)
self.gs.tight_layout(self.handles['fig'], rect=self.fig_bounds, **padding)
# Create title handle
if self.show_title and len(self.coords) > 1:
title = self.handles['fig'].suptitle('', **self._fontsize('title'))
self.handles['title'] = title
return layout_subplots, layout_axes, collapsed_layout
def grid_situate(self, current_idx, layout_type, subgrid_width):
"""
Situate the current AdjointLayoutPlot in a LayoutPlot. The
LayoutPlot specifies a layout_type into which the AdjointLayoutPlot
must be embedded. This enclosing layout is guaranteed to have
enough cells to display all the views.
Based on this enforced layout format, a starting index
supplied by LayoutPlot (indexing into a large gridspec
arrangement) is updated to the appropriate embedded value. It
will also return a list of gridspec indices associated with
the all the required layout axes.
"""
# Set the layout configuration as situated in a NdLayout
if layout_type == 'Single':
start, inds = current_idx+1, [current_idx]
elif layout_type == 'Dual':
start, inds = current_idx+2, [current_idx, current_idx+1]
bottom_idx = current_idx + subgrid_width
if layout_type == 'Embedded Dual':
bottom = ((current_idx+1) % subgrid_width) == 0
grid_idx = (bottom_idx if bottom else current_idx)+1
start, inds = grid_idx, [current_idx, bottom_idx]
elif layout_type == 'Triple':
bottom = ((current_idx+2) % subgrid_width) == 0
grid_idx = (bottom_idx if bottom else current_idx) + 2
start, inds = grid_idx, [current_idx, current_idx+1,
bottom_idx, bottom_idx+1]
return start, inds
def _create_subplots(self, layout, positions, layout_dimensions, ranges, axes={}, num=1, create=True):
"""
Plot all the views contained in the AdjointLayout Object using axes
appropriate to the layout configuration. All the axes are
supplied by LayoutPlot - the purpose of the call is to
invoke subplots with correct options and styles and hide any
empty axes as necessary.
"""
subplots = {}
projections = []
adjoint_clone = layout.clone(shared_data=False, id=layout.id)
subplot_opts = dict(show_title=False, adjoined=layout)
for pos in positions:
# Pos will be one of 'main', 'top' or 'right' or None
view = layout.get(pos, None)
ax = axes.get(pos, None)
if view is None:
projections.append(None)
continue
# Determine projection type for plot
components = view.traverse(lambda x: x)
projs = ['3d' if isinstance(c, Element3D) else
self.lookup_options(c, 'plot').options.get('projection', None)
for c in components]
projs = [p for p in projs if p is not None]
if len(set(projs)) > 1:
raise Exception("A single axis may only be assigned one projection type")
elif projs:
projections.append(projs[0])
else:
projections.append(None)
if not create:
continue
# Customize plotopts depending on position.
plotopts = self.lookup_options(view, 'plot').options
# Options common for any subplot
override_opts = {}
sublabel_opts = {}
if pos == 'main':
own_params = self.get_param_values(onlychanged=True)
sublabel_opts = {k: v for k, v in own_params
if 'sublabel_' in k}
if not isinstance(view, GridSpace):
override_opts = dict(aspect='square')
elif pos == 'right':
right_opts = dict(orientation='vertical',
xaxis=None, yaxis='left')
override_opts = dict(subplot_opts, **right_opts)
elif pos == 'top':
top_opts = dict(xaxis='bottom', yaxis=None)
override_opts = dict(subplot_opts, **top_opts)
# Override the plotopts as required
plotopts = dict(sublabel_opts, **plotopts)
plotopts.update(override_opts, fig=self.handles['fig'])
vtype = view.type if isinstance(view, HoloMap) else view.__class__
if isinstance(view, GridSpace):
plotopts['create_axes'] = ax is not None
if pos == 'main':
plot_type = Store.registry['matplotlib'][vtype]
else:
plot_type = MPLPlot.sideplots[vtype]
num = num if len(self.coords) > 1 else 0
subplots[pos] = plot_type(view, axis=ax, keys=self.keys,
dimensions=self.dimensions,
layout_dimensions=layout_dimensions,
ranges=ranges, subplot=True,
uniform=self.uniform, layout_num=num,
**plotopts)
if isinstance(view, (Element, HoloMap, Collator, CompositeOverlay)):
adjoint_clone[pos] = subplots[pos].hmap
else:
adjoint_clone[pos] = subplots[pos].layout
return subplots, adjoint_clone, projections
def update_handles(self, axis, view, key, ranges=None):
"""
Should be called by the update_frame class to update
any handles on the plot.
"""
if self.show_title and 'title' in self.handles and len(self.coords) > 1:
self.handles['title'].set_text(self._format_title(key))
def initialize_plot(self):
axis = self.handles['axis']
self.update_handles(axis, None, self.keys[-1])
ranges = self.compute_ranges(self.layout, self.keys[-1], None)
for subplot in self.subplots.values():
subplot.initialize_plot(ranges=ranges)
return self._finalize_axis(None)
| bsd-3-clause |
larsoner/mne-python | mne/externals/tqdm/_tqdm/gui.py | 14 | 11601 | """
GUI progressbar decorator for iterators.
Includes a default (x)range iterator printing to stderr.
Usage:
>>> from tqdm.gui import trange[, tqdm]
>>> for i in trange(10): #same as: for i in tqdm(xrange(10))
... ...
"""
# future division is important to divide integers and get as
# a result precise floating numbers (instead of truncated int)
from __future__ import division, absolute_import
# import compatibility functions and utilities
from .utils import _range
# to inherit from the tqdm class
from .std import tqdm as std_tqdm
from .std import TqdmExperimentalWarning
from warnings import warn
__author__ = {"github.com/": ["casperdcl", "lrq3000"]}
__all__ = ['tqdm_gui', 'tgrange', 'tqdm', 'trange']
class tqdm_gui(std_tqdm): # pragma: no cover
"""
Experimental GUI version of tqdm!
"""
# TODO: @classmethod: write() on GUI?
def __init__(self, *args, **kwargs):
import matplotlib as mpl
import matplotlib.pyplot as plt
from collections import deque
kwargs['gui'] = True
super(tqdm_gui, self).__init__(*args, **kwargs)
# Initialize the GUI display
if self.disable or not kwargs['gui']:
return
warn('GUI is experimental/alpha', TqdmExperimentalWarning, stacklevel=2)
self.mpl = mpl
self.plt = plt
self.sp = None
# Remember if external environment uses toolbars
self.toolbar = self.mpl.rcParams['toolbar']
self.mpl.rcParams['toolbar'] = 'None'
self.mininterval = max(self.mininterval, 0.5)
self.fig, ax = plt.subplots(figsize=(9, 2.2))
# self.fig.subplots_adjust(bottom=0.2)
total = len(self)
if total is not None:
self.xdata = []
self.ydata = []
self.zdata = []
else:
self.xdata = deque([])
self.ydata = deque([])
self.zdata = deque([])
self.line1, = ax.plot(self.xdata, self.ydata, color='b')
self.line2, = ax.plot(self.xdata, self.zdata, color='k')
ax.set_ylim(0, 0.001)
if total is not None:
ax.set_xlim(0, 100)
ax.set_xlabel('percent')
self.fig.legend((self.line1, self.line2), ('cur', 'est'),
loc='center right')
# progressbar
self.hspan = plt.axhspan(0, 0.001,
xmin=0, xmax=0, color='g')
else:
# ax.set_xlim(-60, 0)
ax.set_xlim(0, 60)
ax.invert_xaxis()
ax.set_xlabel('seconds')
ax.legend(('cur', 'est'), loc='lower left')
ax.grid()
# ax.set_xlabel('seconds')
ax.set_ylabel((self.unit if self.unit else 'it') + '/s')
if self.unit_scale:
plt.ticklabel_format(style='sci', axis='y',
scilimits=(0, 0))
ax.yaxis.get_offset_text().set_x(-0.15)
# Remember if external environment is interactive
self.wasion = plt.isinteractive()
plt.ion()
self.ax = ax
def __iter__(self):
# TODO: somehow allow the following:
# if not self.gui:
# return super(tqdm_gui, self).__iter__()
iterable = self.iterable
if self.disable:
for obj in iterable:
yield obj
return
# ncols = self.ncols
mininterval = self.mininterval
maxinterval = self.maxinterval
miniters = self.miniters
dynamic_miniters = self.dynamic_miniters
last_print_t = self.last_print_t
last_print_n = self.last_print_n
n = self.n
# dynamic_ncols = self.dynamic_ncols
smoothing = self.smoothing
avg_time = self.avg_time
time = self._time
for obj in iterable:
yield obj
# Update and possibly print the progressbar.
# Note: does not call self.update(1) for speed optimisation.
n += 1
# check counter first to avoid calls to time()
if n - last_print_n >= self.miniters:
miniters = self.miniters # watch monitoring thread changes
delta_t = time() - last_print_t
if delta_t >= mininterval:
cur_t = time()
delta_it = n - last_print_n
# EMA (not just overall average)
if smoothing and delta_t and delta_it:
rate = delta_t / delta_it
avg_time = self.ema(rate, avg_time, smoothing)
self.avg_time = avg_time
self.n = n
self.display()
# If no `miniters` was specified, adjust automatically
# to the max iteration rate seen so far between 2 prints
if dynamic_miniters:
if maxinterval and delta_t >= maxinterval:
# Adjust miniters to time interval by rule of 3
if mininterval:
# Set miniters to correspond to mininterval
miniters = delta_it * mininterval / delta_t
else:
# Set miniters to correspond to maxinterval
miniters = delta_it * maxinterval / delta_t
elif smoothing:
# EMA-weight miniters to converge
# towards the timeframe of mininterval
rate = delta_it
if mininterval and delta_t:
rate *= mininterval / delta_t
miniters = self.ema(rate, miniters, smoothing)
else:
# Maximum nb of iterations between 2 prints
miniters = max(miniters, delta_it)
# Store old values for next call
self.n = self.last_print_n = last_print_n = n
self.last_print_t = last_print_t = cur_t
self.miniters = miniters
# Closing the progress bar.
# Update some internal variables for close().
self.last_print_n = last_print_n
self.n = n
self.miniters = miniters
self.close()
def update(self, n=1):
# if not self.gui:
# return super(tqdm_gui, self).close()
if self.disable:
return
if n < 0:
self.last_print_n += n # for auto-refresh logic to work
self.n += n
# check counter first to reduce calls to time()
if self.n - self.last_print_n >= self.miniters:
delta_t = self._time() - self.last_print_t
if delta_t >= self.mininterval:
cur_t = self._time()
delta_it = self.n - self.last_print_n # >= n
# elapsed = cur_t - self.start_t
# EMA (not just overall average)
if self.smoothing and delta_t and delta_it:
rate = delta_t / delta_it
self.avg_time = self.ema(
rate, self.avg_time, self.smoothing)
self.display()
# If no `miniters` was specified, adjust automatically to the
# maximum iteration rate seen so far between two prints.
# e.g.: After running `tqdm.update(5)`, subsequent
# calls to `tqdm.update()` will only cause an update after
# at least 5 more iterations.
if self.dynamic_miniters:
if self.maxinterval and delta_t >= self.maxinterval:
if self.mininterval:
self.miniters = delta_it * self.mininterval \
/ delta_t
else:
self.miniters = delta_it * self.maxinterval \
/ delta_t
elif self.smoothing:
self.miniters = self.smoothing * delta_it * \
(self.mininterval / delta_t
if self.mininterval and delta_t
else 1) + \
(1 - self.smoothing) * self.miniters
else:
self.miniters = max(self.miniters, delta_it)
# Store old values for next call
self.last_print_n = self.n
self.last_print_t = cur_t
def close(self):
# if not self.gui:
# return super(tqdm_gui, self).close()
if self.disable:
return
self.disable = True
with self.get_lock():
self._instances.remove(self)
# Restore toolbars
self.mpl.rcParams['toolbar'] = self.toolbar
# Return to non-interactive mode
if not self.wasion:
self.plt.ioff()
if not self.leave:
self.plt.close(self.fig)
def display(self):
n = self.n
cur_t = self._time()
elapsed = cur_t - self.start_t
delta_it = n - self.last_print_n
delta_t = cur_t - self.last_print_t
# Inline due to multiple calls
total = self.total
xdata = self.xdata
ydata = self.ydata
zdata = self.zdata
ax = self.ax
line1 = self.line1
line2 = self.line2
# instantaneous rate
y = delta_it / delta_t
# overall rate
z = n / elapsed
# update line data
xdata.append(n * 100.0 / total if total else cur_t)
ydata.append(y)
zdata.append(z)
# Discard old values
# xmin, xmax = ax.get_xlim()
# if (not total) and elapsed > xmin * 1.1:
if (not total) and elapsed > 66:
xdata.popleft()
ydata.popleft()
zdata.popleft()
ymin, ymax = ax.get_ylim()
if y > ymax or z > ymax:
ymax = 1.1 * y
ax.set_ylim(ymin, ymax)
ax.figure.canvas.draw()
if total:
line1.set_data(xdata, ydata)
line2.set_data(xdata, zdata)
try:
poly_lims = self.hspan.get_xy()
except AttributeError:
self.hspan = self.plt.axhspan(
0, 0.001, xmin=0, xmax=0, color='g')
poly_lims = self.hspan.get_xy()
poly_lims[0, 1] = ymin
poly_lims[1, 1] = ymax
poly_lims[2] = [n / total, ymax]
poly_lims[3] = [poly_lims[2, 0], ymin]
if len(poly_lims) > 4:
poly_lims[4, 1] = ymin
self.hspan.set_xy(poly_lims)
else:
t_ago = [cur_t - i for i in xdata]
line1.set_data(t_ago, ydata)
line2.set_data(t_ago, zdata)
ax.set_title(self.format_meter(
n, total, elapsed, 0,
self.desc, self.ascii, self.unit, self.unit_scale,
1 / self.avg_time if self.avg_time else None, self.bar_format,
self.postfix, self.unit_divisor),
fontname="DejaVu Sans Mono", fontsize=11)
self.plt.pause(1e-9)
def tgrange(*args, **kwargs):
"""
A shortcut for `tqdm.gui.tqdm(xrange(*args), **kwargs)`.
On Python3+, `range` is used instead of `xrange`.
"""
return tqdm_gui(_range(*args), **kwargs)
# Aliases
tqdm = tqdm_gui
trange = tgrange
| bsd-3-clause |
theoryno3/scikit-learn | examples/cluster/plot_cluster_iris.py | 350 | 2593 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
K-means Clustering
=========================================================
The plots display firstly what a K-means algorithm would yield
using three clusters. It is then shown what the effect of a bad
initialization is on the classification process:
By setting n_init to only 1 (default is 10), the amount of
times that the algorithm will be run with different centroid
seeds is reduced.
The next plot displays what using eight clusters would deliver
and finally the ground truth.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn.cluster import KMeans
from sklearn import datasets
np.random.seed(5)
centers = [[1, 1], [-1, -1], [1, -1]]
iris = datasets.load_iris()
X = iris.data
y = iris.target
estimators = {'k_means_iris_3': KMeans(n_clusters=3),
'k_means_iris_8': KMeans(n_clusters=8),
'k_means_iris_bad_init': KMeans(n_clusters=3, n_init=1,
init='random')}
fignum = 1
for name, est in estimators.items():
fig = plt.figure(fignum, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
plt.cla()
est.fit(X)
labels = est.labels_
ax.scatter(X[:, 3], X[:, 0], X[:, 2], c=labels.astype(np.float))
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
ax.set_xlabel('Petal width')
ax.set_ylabel('Sepal length')
ax.set_zlabel('Petal length')
fignum = fignum + 1
# Plot the ground truth
fig = plt.figure(fignum, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
plt.cla()
for name, label in [('Setosa', 0),
('Versicolour', 1),
('Virginica', 2)]:
ax.text3D(X[y == label, 3].mean(),
X[y == label, 0].mean() + 1.5,
X[y == label, 2].mean(), name,
horizontalalignment='center',
bbox=dict(alpha=.5, edgecolor='w', facecolor='w'))
# Reorder the labels to have colors matching the cluster results
y = np.choose(y, [1, 2, 0]).astype(np.float)
ax.scatter(X[:, 3], X[:, 0], X[:, 2], c=y)
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
ax.set_xlabel('Petal width')
ax.set_ylabel('Sepal length')
ax.set_zlabel('Petal length')
plt.show()
| bsd-3-clause |
ldirer/scikit-learn | examples/cluster/plot_face_segmentation.py | 26 | 2561 | """
===================================================
Segmenting the picture of a raccoon face in regions
===================================================
This example uses :ref:`spectral_clustering` on a graph created from
voxel-to-voxel difference on an image to break this image into multiple
partly-homogeneous regions.
This procedure (spectral clustering on an image) is an efficient
approximate solution for finding normalized graph cuts.
There are two options to assign labels:
* with 'kmeans' spectral clustering will cluster samples in the embedding space
using a kmeans algorithm
* whereas 'discrete' will iteratively search for the closest partition
space to the embedding space.
"""
print(__doc__)
# Author: Gael Varoquaux <gael.varoquaux@normalesup.org>, Brian Cheung
# License: BSD 3 clause
import time
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from sklearn.feature_extraction import image
from sklearn.cluster import spectral_clustering
# load the raccoon face as a numpy array
try: # SciPy >= 0.16 have face in misc
from scipy.misc import face
face = face(gray=True)
except ImportError:
face = sp.face(gray=True)
# Resize it to 10% of the original size to speed up the processing
face = sp.misc.imresize(face, 0.10) / 255.
# Convert the image into a graph with the value of the gradient on the
# edges.
graph = image.img_to_graph(face)
# Take a decreasing function of the gradient: an exponential
# The smaller beta is, the more independent the segmentation is of the
# actual image. For beta=1, the segmentation is close to a voronoi
beta = 5
eps = 1e-6
graph.data = np.exp(-beta * graph.data / graph.data.std()) + eps
# Apply spectral clustering (this step goes much faster if you have pyamg
# installed)
N_REGIONS = 25
#############################################################################
# Visualize the resulting regions
for assign_labels in ('kmeans', 'discretize'):
t0 = time.time()
labels = spectral_clustering(graph, n_clusters=N_REGIONS,
assign_labels=assign_labels, random_state=1)
t1 = time.time()
labels = labels.reshape(face.shape)
plt.figure(figsize=(5, 5))
plt.imshow(face, cmap=plt.cm.gray)
for l in range(N_REGIONS):
plt.contour(labels == l, contours=1,
colors=[plt.cm.spectral(l / float(N_REGIONS))])
plt.xticks(())
plt.yticks(())
title = 'Spectral clustering: %s, %.2fs' % (assign_labels, (t1 - t0))
print(title)
plt.title(title)
plt.show()
| bsd-3-clause |
algui91/GraphicNetworkMonitoring | src/test.py | 1 | 1286 | #!/usr/bin/env python3.2
"""
Graphic Network Monitoring tool
"""
__author__ = """Alejandro Alcalde (algui91@gmail.com)"""
try:
import matplotlib.pyplot as plt
except:
raise
import json
import networkx as nx
from networkx.algorithms import bipartite
import gnm
def pretty(d, indent=0):
lTam = len(d['Dir Local.'])
rTam = len(d['Dir Remota.'])
#G=nx.cycle_graph(lTam + rTam)
#pos=nx.spring_layout(G,iterations=200)
G=nx.path_graph(lTam + rTam)
pos=nx.circular_layout(G)
labels={}
edges=[]
for i in range(lTam):
labels[i] = d['Dir Remota.'][i]
#print json.dumps(labels, sort_keys=True, indent=4)
for i in range(rTam):
labels[i + lTam] = d['Dir Local.'][i]
for i in range(rTam):
edges.append((i,i+rTam))
#print json.dumps(labels, sort_keys=True, indent=4)
nx.draw(G,pos,node_color=range(lTam + rTam),node_size=800,cmap=plt.cm.Blues, labels=labels)
nx.draw_networkx_edges(G,pos,edgelist=edges,width=1,alpha=0.5,edge_color='r')
#nx.draw_networkx_labels(G,labels,font_size=16)
plt.axis('off')
plt.show() # display
print json.dumps(d, sort_keys=True, indent=4)
pretty(gnm.helloC())
| gpl-3.0 |
annoviko/pyclustering | pyclustering/nnet/tests/unit/ut_sync.py | 1 | 8703 | """!
@brief Unit-tests for Oscillatory Neural Network based on Kuramoto model.
@authors Andrei Novikov (pyclustering@yandex.ru)
@date 2014-2020
@copyright BSD-3-Clause
"""
import unittest;
# Generate images without having a window appear.
import matplotlib;
matplotlib.use('Agg');
from pyclustering.nnet.tests.sync_templates import SyncTestTemplates;
from pyclustering.nnet import solve_type, conn_type;
from pyclustering.nnet.sync import sync_network, sync_dynamic, sync_visualizer;
from pyclustering.utils import pi;
class SyncUnitTest(unittest.TestCase):
def testCreateNetwork(self):
SyncTestTemplates.templateCreateNetwork(1, False);
SyncTestTemplates.templateCreateNetwork(10, False);
SyncTestTemplates.templateCreateNetwork(55, False);
def testConnectionsApi(self):
SyncTestTemplates.templateConnectionsApi(1, False);
SyncTestTemplates.templateConnectionsApi(5, False);
SyncTestTemplates.templateConnectionsApi(10, False);
def testSyncOrderSingleOscillator(self):
# Check for order parameter of network with one oscillator
network = sync_network(1, 1, ccore=False);
assert network.sync_order() == 1;
def testSyncOrderNetwork(self):
# Check for order parameter of network with several oscillators
network = sync_network(2, 1, ccore=False);
sync_state = 1;
tolerance = 0.1;
network.simulate(50, 20, solve_type.RK4);
assert (abs(network.sync_order() - sync_state) < tolerance) == True;
def testSyncLocalOrderSingleOscillator(self):
network = sync_network(1, 1);
assert network.sync_local_order() == 0;
def testOutputNormalization(self):
network = sync_network(20, 1, ccore=False);
output_dynamic = network.simulate(50, 20, solve_type.RK4);
t = output_dynamic.time;
dyn = output_dynamic.output;
for iteration in range(len(dyn)):
for index_oscillator in range(len(dyn[iteration])):
assert (dyn[iteration][index_oscillator] >= 0);
assert (dyn[iteration][index_oscillator] <= 2.0 * pi);
def testFastSolution(self):
# Check for convergence when solution using fast way of calculation of derivative
SyncTestTemplates.templateSimulateTest(10, 1, solve_type.FAST, False);
def testRK4Solution(self):
# Check for convergence when solution using RK4 function of calculation of derivative
SyncTestTemplates.templateSimulateTest(10, 1, solve_type.RK4, False);
def testLargeNetwork(self):
# Check for convergence of phases in large network - network that contains large number of oscillators
SyncTestTemplates.templateSimulateTest(128, 1, solve_type.FAST, False);
def testOutputDynamicAroundZero(self):
phases = [ [ 0.01, 0.02, 0.04, 6.27, 6.28, 6.25, 0.03] ];
time = [ 10.0 ];
output_sync_dynamic = sync_dynamic(phases, time, None);
assert len(output_sync_dynamic.allocate_sync_ensembles(0.2)) == 1;
assert len(output_sync_dynamic.allocate_sync_ensembles(0.1)) == 1;
phases = [ [ 1.02, 1.05, 1.52, 5.87, 5.98, 5.14] ];
output_sync_dynamic = sync_dynamic(phases, time, None);
assert len(output_sync_dynamic.allocate_sync_ensembles(3.0)) == 1;
assert len(output_sync_dynamic.allocate_sync_ensembles(2.0)) == 1;
def testDynamicSimulationAllToAll(self):
SyncTestTemplates.templateDynamicSimulationConnectionTypeTest(10, 1, conn_type.ALL_TO_ALL, False);
SyncTestTemplates.templateDynamicSimulationConnectionTypeTest(50, 1, conn_type.ALL_TO_ALL, False);
def testDynamicSimulationGridFour(self):
SyncTestTemplates.templateDynamicSimulationConnectionTypeTest(9, 1, conn_type.GRID_FOUR, False);
SyncTestTemplates.templateDynamicSimulationConnectionTypeTest(25, 1, conn_type.GRID_FOUR, False);
def testDynamicSimulationGridEight(self):
SyncTestTemplates.templateDynamicSimulationConnectionTypeTest(9, 1, conn_type.GRID_FOUR, False);
SyncTestTemplates.templateDynamicSimulationConnectionTypeTest(25, 1, conn_type.GRID_FOUR, False);
def testDynamicSimulationBidir(self):
SyncTestTemplates.templateDynamicSimulationConnectionTypeTest(5, 1, conn_type.LIST_BIDIR, False);
SyncTestTemplates.templateDynamicSimulationConnectionTypeTest(10, 1, conn_type.LIST_BIDIR, False);
def testTwoOscillatorDynamic(self):
SyncTestTemplates.templateDynamicSimulationConvergence(2, 1, conn_type.ALL_TO_ALL, False);
def testThreeOscillatorDynamic(self):
SyncTestTemplates.templateDynamicSimulationConvergence(3, 1, conn_type.ALL_TO_ALL, False);
def testFourOscillatorDynamic(self):
SyncTestTemplates.templateDynamicSimulationConvergence(4, 1, conn_type.ALL_TO_ALL, False);
def testFiveOscillatorDynamic(self):
SyncTestTemplates.templateDynamicSimulationConvergence(5, 1, conn_type.ALL_TO_ALL, False);
def testSixOscillatorDynamic(self):
SyncTestTemplates.templateDynamicSimulationConvergence(6, 1, conn_type.ALL_TO_ALL, False);
def testSevenOscillatorDynamic(self):
SyncTestTemplates.templateDynamicSimulationConvergence(7, 1, conn_type.ALL_TO_ALL, False);
def testOutputDynamicLengthSimulation(self):
net = sync_network(5, ccore=False);
output_dynamic = net.simulate(10, 10, solution = solve_type.FAST, collect_dynamic = True);
assert len(output_dynamic) == 11; # 10 steps without initial values.
def testOutputDynamicLengthStaticSimulation(self):
net = sync_network(5, ccore=False);
output_dynamic = net.simulate_static(10, 10, solution = solve_type.FAST, collect_dynamic = True);
assert len(output_dynamic) == 11; # 10 steps without initial values.
def testOutputDynamicLengthStaticSimulationWithouCollecting(self):
net = sync_network(5, ccore=False);
output_dynamic = net.simulate_static(10, 10, solution = solve_type.FAST, collect_dynamic = False);
assert len(output_dynamic) == 1; # 10 steps without initial values.
def testOutputDynamicLengthDynamicSimulation(self):
net = sync_network(5, ccore=False);
output_dynamic = net.simulate_dynamic(solution = solve_type.FAST, collect_dynamic = True);
assert len(output_dynamic) > 1;
def testOutputDynamicLengthDynamicSimulationWithoutCollecting(self):
net = sync_network(5, ccore=False);
output_dynamic = net.simulate_dynamic(solution = solve_type.FAST, collect_dynamic = False);
assert len(output_dynamic) == 1;
def testInfoAllicationWithNoSimulation(self):
output_dynamic = sync_dynamic(None, None, None);
ensembles = output_dynamic.allocate_sync_ensembles();
assert ensembles == [];
matrix = output_dynamic.allocate_correlation_matrix();
assert matrix == [];
def testOutputDynamicCalculateOrderParameter(self):
SyncTestTemplates.templateOutputDynamicCalculateOrderParameter(False);
def testOutputDynamicCalculateLocalOrderParameter(self):
SyncTestTemplates.templateOutputDynamicCalculateLocalOrderParameter(False);
def testVisualizerOrderParameterNoFailures(self):
net = sync_network(10, ccore = False);
output_dynamic = net.simulate_static(20, 10, solution = solve_type.FAST, collect_dynamic = True);
sync_visualizer.show_order_parameter(output_dynamic);
sync_visualizer.show_order_parameter(output_dynamic, 0);
sync_visualizer.show_order_parameter(output_dynamic, 5);
sync_visualizer.show_order_parameter(output_dynamic, 5, 20);
def testVisualizeLocalOrderParameterNoFailures(self):
net = sync_network(10, ccore = False);
output_dynamic = net.simulate_static(20, 10, solution = solve_type.FAST, collect_dynamic = True);
sync_visualizer.show_local_order_parameter(output_dynamic, net);
sync_visualizer.show_local_order_parameter(output_dynamic, net, 0);
sync_visualizer.show_local_order_parameter(output_dynamic, net, 5);
sync_visualizer.show_local_order_parameter(output_dynamic, net, 5, 20);
def testVisualizerNoFailures(self):
SyncTestTemplates.templateVisualizerNoFailures(5, 10, False);
| gpl-3.0 |
c11/yatsm | yatsm/regression/pickles/serialize.py | 3 | 1859 | """ Setup script to pickle various statistical estimators for distribution
Available pickles to build:
* glmnet_Lasso20.pkl
* sklearn_Lasso20.pkl
"""
from __future__ import print_function
import json
import logging
import os
import traceback
# Don't alias to ``np``: https://github.com/numba/numba/issues/1559
import numpy
import sklearn.linear_model
from sklearn.externals import joblib as jl
import six
logger = logging.getLogger()
# GLMNET pickles
try:
import glmnet
_glmnet_pickles = {
'glmnet_Lasso20.pkl': glmnet.Lasso(lambdas=20),
'glmnet_LassoCV_n50.pkl': glmnet.LassoCV(
lambdas=numpy.logspace(1e-4, 35, 50)),
}
except:
logger.error('Could not produce pickles from package "glmnet". '
'Check if it is installed')
print(traceback.format_exc())
_glmnet_pickles = {}
# scikit-learn pickles
_sklearn_pickles = {
'OLS.pkl': sklearn.linear_model.LinearRegression(),
'sklearn_Lasso20.pkl': sklearn.linear_model.Lasso(alpha=20.0),
'sklearn_LassoCV_n50.pkl': sklearn.linear_model.LassoCV(
alphas=numpy.logspace(1e-4, 35, 50)),
}
# YATSM pickles
from ..robust_fit import RLM # flake8: noqa
_yatsm_pickles = {
'rlm_maxiter10.pkl': RLM(maxiter=10)
}
pickles = [_glmnet_pickles, _sklearn_pickles, _yatsm_pickles]
here = os.path.dirname(__file__)
pickles_json = os.path.join(here, 'pickles.json')
def make_pickles():
logger.info('Serializing estimators to pickles...')
packaged = {}
for pickle in pickles:
for fname, obj in six.iteritems(pickle):
jl.dump(obj, os.path.join(here, fname), compress=5)
packaged[os.path.splitext(fname)[0]] = obj.__class__.__name__
with open(pickles_json, 'w') as f:
json.dump(packaged, f, indent=4)
logger.info('Wrote pickles.json to %s' % pickles_json)
| mit |
github4ry/pathomx | pathomx/figures.py | 2 | 21263 | # -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
from collections import OrderedDict
from . import utils
# from matplotlib.figure import Figure
from matplotlib.path import Path
from matplotlib.patches import BoxStyle, Ellipse, Rectangle
from matplotlib.transforms import Affine2D, Bbox, BboxBase
import matplotlib.cm as cm
import matplotlib.pyplot as plt
Figure = plt.figure
FIGURE_SIZE = (5, 5)
FIGURE_DPI = 300
class EntityBoxStyle(BoxStyle._Base):
"""
A simple box.
"""
def __init__(self, pad=0.1):
"""
The arguments need to be floating numbers and need to have
default values.
*pad*
amount of padding
"""
self.pad = pad
super(EntityBoxStyle, self).__init__()
def transmute(self, x0, y0, width, height, mutation_size):
"""
Given the location and size of the box, return the path of
the box around it.
- *x0*, *y0*, *width*, *height* : location and size of the box
- *mutation_size* : a reference scale for the mutation.
Often, the *mutation_size* is the font size of the text.
You don't need to worry about the rotation as it is
automatically taken care of.
"""
# padding
pad = mutation_size * self.pad
# width and height with padding added.
width, height = width + 2. * pad, \
height + 2. * pad,
# boundary of the padded box
x0, y0 = x0 - pad, y0 - pad,
x1, y1 = x0 + width, y0 + height
cp = [(x0, y0),
(x1, y0), (x1, y1), (x0, y1),
(x0 - pad, (y0 + y1) / 2.), (x0, y0),
(x0, y0)]
com = [Path.MOVETO,
Path.LINETO, Path.LINETO, Path.LINETO,
Path.LINETO, Path.LINETO,
Path.CLOSEPOLY]
path = Path(cp, com)
return path
# register the custom style
BoxStyle._style_list["entity-tip"] = EntityBoxStyle
def get_text_bbox_screen_coords(fig, t):
renderer = fig.canvas.get_renderer()
bbox = t.get_window_extent(renderer)
return bbox.get_points()
def get_text_bbox_data_coords(fig, ax, t):
renderer = fig.canvas.get_renderer()
bbox = t.get_window_extent(renderer)
axbox = bbox.transformed(ax.transData.inverted())
return axbox.get_points()
def extend_limits(a, b):
# Extend a to meet b where applicable
ax, ay = list(a[0]), list(a[1])
bx, by = b[:, 0], b[:, 1]
ax[0] = bx[0] if bx[0] < ax[0] else ax[0]
ax[1] = bx[1] if bx[1] > ax[1] else ax[1]
ay[0] = by[0] if by[0] < ay[0] else ay[0]
ay[1] = by[1] if by[1] > ay[1] else ay[1]
return [ax, ay]
def find_linear_scale(data):
scale = []
scale_name = []
linear_scale = False
longest = None
if type(data.columns) == pd.MultiIndex:
for n, l in enumerate(data.columns.levels):
if l.dtype == np.dtype('O'): # Object; maybe str?
if longest is None or len(l) > longest:
longest = len(l)
elif np.issubdtype(l.dtype, np.integer) or np.issubdtype(l.dtype, np.float):
linear_scale = True
scale = [v[n] for v in data.columns.values]
scale_name = data.columns.names[n]
if np.issubdtype(l.dtype, np.float):
# Prefer float scales, assume more accurate
break
else:
scale = []
linear_scale = True
for x in data.columns.values:
try:
scale.append(float(x))
except:
linear_scale = False
break
return scale, linear_scale, scale_name
def spectra(data, figure=None, ax=None, styles=None, regions=None):
if figure is None:
figure = Figure(figsize=FIGURE_SIZE, dpi=FIGURE_DPI)
if ax is None:
ax = figure.add_subplot(111)
ax = figure.axes[0]
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
ax.cla()
if data is None:
assert False
#if not float in [type(t) for t in dso.scales[1]]:
# # Add fake axis scale for plotting
# dso.scales[1] = list(range( len(dso.scales[1])))
if 'Class' in data.index.names and len(data.index.levels[data.index.names.index('Class')]) > 1:
class_idx = data.index.names.index('Class')
classes = list(data.index.levels[class_idx])
else:
class_idx = None
classes = False
if class_idx is not None and len(classes) > 1: # We have (more than one) classes
# More than one data row (class) so plot each class
# Calculate a mean for each class
data_mean = data.mean(level=class_idx)
data_max = data_mean.max()
data_abs_max = data_mean.abs().max()
else:
data_mean = data.mean()
data_max = data.max()
data_abs_max = data.abs().max()
# Annotate using the most non-numeric column index that is most complete
data_headers = None
longest_level = None
longest = None
linear_scale = False
linear_scale_idx = None
scale, linear_scale, scale_name = find_linear_scale(data)
if longest:
data_headers = np.array([v[longest_level] for v in data.columns.values])
# Temporary scale
if linear_scale:
scale = np.array(scale)
is_scale_reversed = scale[0] > scale[-1]
else:
scale = np.arange(0, data.shape[1])
is_scale_reversed = False
if is_scale_reversed:
ax.invert_xaxis()
if classes:
# More than one data row (class) so plot each class
# Calculate a mean for each class
plots = OrderedDict()
for n, c in enumerate(classes):
if styles:
ls = styles.get_style_for_class(c).line_kwargs
else:
ls = {}
row = data_mean.ix[c]
plots[c], = ax.plot(scale, row, **ls)
legend = ax.legend(list(plots.values()),
list(plots.keys()),
loc='best') #, bbox_to_anchor=(1, 1))
legend.get_frame().set_facecolor('k')
legend.get_frame().set_alpha(0.05)
else:
# Only one data row (class) so plot individual data; with a mean line
data_mean = np.mean(data, axis=0)
data_individual = data
for n in range(0, data_individual.shape[0]):
row = data_individual.iloc[n]
ax.plot(scale, row.values, linewidth=0.75, alpha=0.25, color=utils.category10[0])
ax.plot(scale, data_mean.values, linewidth=0.75, color=utils.category10[0])
axlimits = ( ax.get_xlim(), ax.get_ylim() )
if data_headers is not None:
mask = np.isnan(data_abs_max)
data_abs_max_ma = np.ma.masked_array(data_abs_max, mask=mask)
idx = list(np.argsort(data_abs_max_ma))[-10:]
anno_label = data_headers[idx]
anno_scale = scale[idx]
anno_y = data_max[idx]
for x, y, l in zip(anno_scale, anno_y, anno_label):
if y >= 0:
r = '60'
else:
r = '-60'
if l:
t = ax.text(x, y, l, rotation=r, rotation_mode='anchor', size=6.5,
bbox=dict(boxstyle="round,pad=0.1", fc="#eeeeee", ec="none"))
bounds = get_text_bbox_data_coords(figure, ax, t)
if y >= 0:
bounds[1, 1] = bounds[1, 1] * 1.25
else:
bounds[0, 1] = bounds[0, 1] * 1.25
axlimits = extend_limits(axlimits, bounds)
#ax.set_xlim( axlimits[0] )
ax.set_ylim(axlimits[1])
if scale_name:
ax.set_xlabel(scale_name)
if regions: # Plot defined x0, y0, x1, y2 regions onto the plot
for x0, y0, x1, y1 in regions:
ax.add_patch( Rectangle( (x0, y0), x1-x0, y1-y0, facecolor="grey", alpha=0.3))
return figure
def category_bar(data, figure=None, styles=None):
if figure is None:
figure = Figure(figsize=FIGURE_SIZE, dpi=FIGURE_DPI)
if ax is None:
ax = figure.add_subplot(111)
ax = figure.axes[0]
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
if data is None:
assert False
# Build x positions; we're grouping by X (entity) then plotting the classes
ax.cla()
limit_to = 10
# FIXME: Remove this once UI allows selection of data to plot
fd = np.mean(dso.data, axis=0)
fdm = list(zip(dso.labels[1], fd))
sms = sorted(fdm, key=lambda x: abs(x[1]), reverse=True)
labels = [m for m, s in sms]
plots = OrderedDict()
classes = dso.classes[0]
#labels = [e if e is not None else dso.labels[1][n] for n,e in enumerate(dso.entities[1][0:limit_to]) ]
#data = dso.data[:,0:limit_to]
data = np.array([dso.data[:, dso.labels[1].index(l)] for l in labels]).T[:, :limit_to]
#0,1,-,4,5,-,6,7
# 2 classes
# 3 data points
# 3*2 = 6; 3*(2+1) = 9
# Build spaced sets (around middle value)
# 0 -0.5->+0.5,
xa = []
for n, ag in enumerate(data.T): # Axis groups (can reverse with classes; later)
xa.append(np.arange(0, len(classes)) + n * (len(classes) + 1)) # Build table
x = np.array(xa).reshape(len(data.T), len(classes))
ax.set_xlim(np.min(x) - 1, np.max(x) + 1)
for n, c in enumerate(classes):
cdata = data[n]
if 'error' in dso.statistics:
err = dso.statistics['error']['stddev'][:, :limit_to][n]
yperr = [(0, 1)[e > 0] for e in cdata]
ynerr = [(0, 1)[e < 0] for e in cdata]
yperr = np.array(yperr) * err
ynerr = np.array(ynerr) * err
yerr = (ynerr, yperr)
else:
yerr = None
ls = styles.get_style_for_class(c)
plots[c] = self.ax.bar(x[:, n], cdata, align='center', yerr=yerr, **ls.bar_kwargs)
xticks = np.mean(x, axis=1)
ax.set_xticks(xticks)
ax.set_xticklabels(labels, rotation=45, ha='right', rotation_mode='anchor')
legend = self.ax.legend(list(plots.values()),
list(plots.keys()),
loc='best') #, bbox_to_anchor=(1, 1))
legend.get_frame().set_facecolor('k')
legend.get_frame().set_alpha(0.05)
#if options.title:
# self.ax.title(options.title)
#else:
# self.ax..title(metabolite)
#plt.gca().xaxis.set_label_text(options.xlabel)
#plt.gca().yaxis.set_label_text(options.ylabel)
# Add some padding either side of graphs
#plt.xlim( ind[0]-1, ind[-1]+1)
return figure
# Add ellipses for confidence intervals, with thanks to Joe Kington
# http://stackoverflow.com/questions/12301071/multidimensional-confidence-intervals
def plot_point_cov(points, nstd=2, **kwargs):
"""
Plots an `nstd` sigma ellipse based on the mean and covariance of a point
"cloud" (points, an Nx2 array).
Parameters
----------
points : An Nx2 array of the data points.
nstd : The radius of the ellipse in numbers of standard deviations.
Defaults to 2 standard deviations.
Additional keyword arguments are pass on to the ellipse patch.
Returns
-------
A matplotlib ellipse artist
"""
pos = points.mean(axis=0)
cov = np.cov(points, rowvar=False)
return plot_cov_ellipse(cov, pos, nstd, **kwargs)
def plot_cov_ellipse(cov, pos, nstd=2, **kwargs):
"""
Plots an `nstd` sigma error ellipse based on the specified covariance
matrix (`cov`). Additional keyword arguments are passed on to the
ellipse patch artist.
Parameters
----------
cov : The 2x2 covariance matrix to base the ellipse on
pos : The location of the center of the ellipse. Expects a 2-element
sequence of [x0, y0].
nstd : The radius of the ellipse in numbers of standard deviations.
Defaults to 2 standard deviations.
Additional keyword arguments are pass on to the ellipse patch.
Returns
-------
A matplotlib ellipse artist
"""
def eigsorted(cov):
vals, vecs = np.linalg.eigh(cov)
order = vals.argsort()[::-1]
return vals[order], vecs[:, order]
vals, vecs = eigsorted(cov)
theta = np.degrees(np.arctan2(*vecs[:, 0][::-1]))
# Width and height are "full" widths, not radius
width, height = 2 * nstd * np.sqrt(vals)
ellip = Ellipse(xy=pos, width=width, height=height, angle=theta, fill=False, **kwargs)
return ellip
def scatterplot(data, figure=None, ax=None, styles=None, lines=[], label_index=None):
if figure is None:
figure = Figure(figsize=FIGURE_SIZE, dpi=FIGURE_DPI)
if ax is None:
ax = figure.add_subplot(111)
ax = figure.axes[0]
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
ax.cla()
if data is None:
assert False
if 'Class' in data.index.names and len(data.index.levels[data.index.names.index('Class')]) > 1:
class_idx = data.index.names.index('Class')
classes = list(data.index.levels[class_idx])
else:
class_idx = None
classes = [None]
plots = OrderedDict()
for c in classes:
if styles:
ls = styles.get_style_for_class(c)
else:
ls = None
if c is not None:
df = data.xs(c, level=class_idx)
else:
df = data
s = ls.markersize ** 2 if ls.markersize is not None else 20 #default
plots[c] = ax.scatter(df.iloc[:, 0].values, df.iloc[:, 1].values, color=ls.markerfacecolor, marker=ls.marker, s=s)
# Calculate 95% confidence interval for data but only if points >1
if df.values.shape[0] > 1:
ellip = plot_point_cov(df.values, nstd=2, linestyle='dashed', linewidth=0.5, edgecolor=ls.color,
alpha=0.5) #**kwargs for ellipse styling
ax.add_artist(ellip)
# If overlay lines are defined; plot + annotation
for x, y, label in lines:
ls = styles.get_style_for_class(None) # Blank for now; need to replace with general 'info lines' settings
ax.plot(x, y, **ls.line_kwargs)
ax.annotate(label, xy=(x[-1], y[-1]))
if len(plots.keys()) > 1:
# Only show a legend if there is >1 class (?)
legend = ax.legend(list(plots.values()),
list(plots.keys()),
scatterpoints=1,
loc='upper left', bbox_to_anchor=(1, 1))
legend.get_frame().set_facecolor('k')
legend.get_frame().set_alpha(0.05)
#ax.set_xlabel(dso.labels[1][0])
#ax.set_ylabel(dso.labels[1][1])
# Square the plot
x0, x1 = ax.get_xlim()
y0, y1 = ax.get_ylim()
ax.set_aspect((x1 - x0) / (y1 - y0))
if label_index is not None and label_index in data.index.names:
idx = data.index.names.index(label_index)
labels = [v[idx] for v in data.index.values]
for label, x, y in zip(labels, data.iloc[:, 0], data.iloc[:, 1]):
ax.annotate(label, xy=(x, y), xytext=(-1, 1), textcoords='offset points', ha='right', va='bottom',
size='small')
return figure
def heatmap(data, figure=None, ax=None, styles=None):
if figure is None:
figure = Figure(figsize=FIGURE_SIZE, dpi=FIGURE_DPI)
if ax is None:
ax = figure.add_subplot(111)
ylim = np.abs(data.max().max())
# Plot it out
datav = np.float64(data.values.T)
log2data = np.log2(datav)
ax.imshow(log2data, interpolation='none', aspect='auto', cmap=cm.RdBu_r) # vmin=-ylim, vmax=+ylim, )
# turn off the frame
ax.set_frame_on(False)
labels_x = [v[data.index.names.index('Class')] for v in data.index.values]
# print labels_x
ax.set_xticklabels(labels_x, minor=False)
ax.set_xticks(np.arange(len(labels_x)), minor=False)
ax.xaxis.tick_top()
'''
# put the major ticks at the middle of each cell
ax.set_yticks(np.arange(data.values.shape[0])+0.5, minor=False)
# want a more natural, table-like display
# Set the labels
# note I could have used nba_sort.columns but made "labels" instead
labels_x = [ v[ data.columns.names.index('Label') ] for v in data.columns.values]
ax.set_xticklabels(labels_x, minor=False)
# rotate the
plt.xticks(rotation=90)
for t in ax.xaxis.get_major_ticks():
t.tick1On = False
t.tick2On = False
for t in ax.yaxis.get_major_ticks():
t.tick1On = False
t.tick2On = False
'''
ax.grid(False)
return figure
def difference(data1, data2, figure=None, ax=None, styles=None):
if figure is None:
figure = Figure(figsize=FIGURE_SIZE, dpi=FIGURE_DPI)
if ax is None:
ax = figure.add_subplot(111)
ax = figure.axes[0]
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
ax.cla()
# Get common scales
data1v = np.mean(data1.values, 0) # Mean flatten
data2v = np.mean(data2.values, 0) # Mean flatten
scale1, linear_scale1, scale_name1 = find_linear_scale(data1)
scale2, linear_scale2, scale_name2 = find_linear_scale(data2)
if not linear_scale1 or not linear_scale2:
return None # Can't interpolate with non-linear scale
is_reversed = False
scale1 = np.array(scale1)
scale2 = np.array(scale2)
if scale1[0] > scale1[-1]:
# Reverse direction
is_reversed = True
# Flip to increasing for interpolation
scale1 = scale1[::-1]
data1v = data1v[::-1]
if scale2[0] > scale2[-1]:
scale2 = scale2[::-1]
data2v = data2v[::-1]
# Interpolate the data for shorter set
if len(scale1) < len(scale2):
data1v = np.interp(np.array(scale2), np.array(sorted(scale1)), data1v)
x = scale2
elif len(scale1) > len(scale2):
data2v = np.interp(np.array(scale1), np.array(sorted(scale2)), data2v)
x = scale1
else:
x = scale1
# Return to original order (not we must sort both arrays the same direction)
if is_reversed:
x = x[::-1]
data1v = data1v[::-1]
data2v = data2v[::-1]
y1 = data1v
y2 = data2v
ax.cla()
ax.plot(x, y2, color='black', linewidth=0.25)
ax.fill_between(x, y1, y2, where=y2 >= y1, facecolor=utils.category10[0], interpolate=False)
ax.fill_between(x, y1, y2, where=y2 <= y1, facecolor=utils.category10[1], interpolate=False)
if scale_name1:
ax.set_xlabel(scale_name1)
return figure
def histogram(data, bins=100, figure=None, ax=None, styles=None, regions=None):
if figure is None:
figure = Figure(figsize=FIGURE_SIZE, dpi=FIGURE_DPI)
if ax is None:
ax = figure.add_subplot(111)
ax = figure.axes[0]
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
ax.cla()
if data is None:
assert False
mean = np.nanmean( data.values.flatten() )
std = np.nanstd( data.values.flatten() )
if 'Class' in data.index.names and len(data.index.levels[data.index.names.index('Class')]) > 1:
class_idx = data.index.names.index('Class')
classes = list(data.index.levels[class_idx])
else:
class_idx = None
classes = False
ax.axvline(mean, c='k')
ax.axvline(mean+std, c='r')
ax.axvline(mean-std, c='r')
ax.axvline(mean+std*2, c='g')
ax.axvline(mean-std*2, c='g')
if classes:
# More than one data row (class) so plot each class
# Calculate a mean for each class
plots = OrderedDict()
for n, c in enumerate(classes):
# FIXME: Need to define a subset of style features for histograms (mplstyler)
if styles:
ls = styles.get_style_for_class(c).line_kwargs
else:
ls = {}
row = np.nanmean( data.xs(c, level=class_idx).values, axis=0 )
row = row[ ~np.isnan(row) ]
n, b, plots[c] = ax.hist(row, bins=bins, alpha=0.5) #, **ls)
legend = ax.legend(list(plots.values()),
list(plots.keys()),
loc='best') #, bbox_to_anchor=(1, 1))
legend.get_frame().set_facecolor('k')
legend.get_frame().set_alpha(0.05)
else:
# Only one data row (class) so plot all
row = np.nanmean(data.values, axis=0)
row = row[ ~np.isnan(row) ]
ax.hist( np.nanmean( row, axis=0), bins=bins, alpha=0.5, color=utils.category10[0])
if regions: # Plot defined x0, y0, x1, y2 regions onto the plot
for x0, y0, x1, y1 in regions:
ax.add_patch( Rectangle( (x0, y0), x1-x0, y1-y0, facecolor="grey", alpha=0.3))
return figure
| gpl-3.0 |
ondrolexa/pypsbuilder | setup.py | 1 | 1751 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from os import path
from setuptools import setup, find_packages
CURRENT_PATH = path.abspath(path.dirname(__file__))
with open(path.join(CURRENT_PATH, 'README.md')) as readme_file:
readme = readme_file.read()
with open(path.join(CURRENT_PATH, 'CHANGELOG.md')) as changelog_file:
changelog = changelog_file.read()
requirements = [
'numpy',
'matplotlib',
'scipy',
'networkx',
'shapely',
'descartes',
'tqdm'
]
setup(
name='pypsbuilder',
version='2.3.0',
description="THERMOCALC front-end for constructing and analyzing PT pseudosections",
long_description=readme + '\n\n' + changelog,
long_description_content_type="text/markdown",
author="Ondrej Lexa",
author_email='lexa.ondrej@gmail.com',
url='https://github.com/ondrolexa/pypsbuilder',
license="MIT",
python_requires=">=3.6",
packages=find_packages(),
package_data={'pypsbuilder.images': ['*.png']},
entry_points="""
[console_scripts]
ptbuilder=pypsbuilder.psbuilders:ptbuilder
txbuilder=pypsbuilder.psbuilders:txbuilder
pxbuilder=pypsbuilder.psbuilders:pxbuilder
psshow=pypsbuilder.psexplorer:ps_show
psiso=pypsbuilder.psexplorer:ps_iso
psgrid=pypsbuilder.psexplorer:ps_grid
psdrawpd=pypsbuilder.psexplorer:ps_drawpd
""",
install_requires=requirements,
zip_safe=False,
keywords='pypsbuilder',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Programming Language :: Python :: 3 :: Only',
'Topic :: Scientific/Engineering',
'Topic :: Utilities'
]
)
| gpl-3.0 |
soccermetrics/marcotti-events | marcottievents/etl/base/workflows.py | 1 | 2927 | from datetime import date
import pandas as pd
from sqlalchemy.orm.exc import NoResultFound, MultipleResultsFound
from marcottievents.models.common.suppliers import Suppliers
class ETL(object):
"""
Top-level ETL workflow.
Receive extracted data from XML and/or CSV sources, transform/validate it, and load it to database.
"""
def __init__(self, **kwargs):
self.supplier = kwargs.get('supplier')
self.transformer = kwargs.get('transform')(kwargs.get('session'), self.supplier)
self.loader = kwargs.get('load')(kwargs.get('session'), self.supplier)
def workflow(self, entity, *data):
"""
Implement ETL workflow for a specific data entity:
1. Combine data extracted from data sources.
2. Transform and validate combined data into IDs and enums in the Marcotti database.
3. Load transformed data into the database if it is not already there.
:param entity: Data model name
:param data: Data payloads from XML and/or CSV sources, in lists of dictionaries
"""
getattr(self.loader, entity)(getattr(self.transformer, entity)(self.combiner(*data)))
@staticmethod
def combiner(*data_dicts):
"""
Combine data from primary and supplemental data sources using unique ID of primary records.
Returns a Pandas DataFrame of the combined data.
:param data_dicts: List of data payloads from data sources, primary source first in list.
:return: DataFrame of combined data.
"""
data_frames = [pd.DataFrame(data) for data in data_dicts]
if len(data_frames) > 1:
new_frames = [data_frame.dropna(axis=1, how='all') for data_frame in data_frames]
return pd.merge(*new_frames, on=['remote_id'])
return data_frames[0]
class WorkflowBase(object):
def __init__(self, session, supplier):
self.session = session
self.supplier_id = self.get_id(Suppliers, name=supplier) if supplier else None
def get_id(self, model, **conditions):
try:
record_id = self.session.query(model).filter_by(**conditions).one().id
except NoResultFound as ex:
print "{} has no records in Marcotti database for: {}".format(model.__name__, conditions)
return None
except MultipleResultsFound as ex:
print "{} has multiple records in Marcotti database for: {}".format(model.__name__, conditions)
return None
return record_id
@staticmethod
def make_date_object(iso_date):
"""
Convert ISO date string into datetime.date object.
:param iso_date: Date string in ISO 8601 format.
:return: :class:`datetime.date` object.
"""
try:
yr, mo, da = [int(x) for x in iso_date.split('-')]
return date(yr, mo, da)
except ValueError:
return None
| mit |
fredhusser/scikit-learn | sklearn/utils/fixes.py | 133 | 12882 | """Compatibility fixes for older version of python, numpy and scipy
If you add content to this file, please give the version of the package
at which the fixe is no longer needed.
"""
# Authors: Emmanuelle Gouillart <emmanuelle.gouillart@normalesup.org>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Fabian Pedregosa <fpedregosa@acm.org>
# Lars Buitinck
#
# License: BSD 3 clause
import inspect
import warnings
import sys
import functools
import os
import errno
import numpy as np
import scipy.sparse as sp
import scipy
def _parse_version(version_string):
version = []
for x in version_string.split('.'):
try:
version.append(int(x))
except ValueError:
# x may be of the form dev-1ea1592
version.append(x)
return tuple(version)
np_version = _parse_version(np.__version__)
sp_version = _parse_version(scipy.__version__)
try:
from scipy.special import expit # SciPy >= 0.10
with np.errstate(invalid='ignore', over='ignore'):
if np.isnan(expit(1000)): # SciPy < 0.14
raise ImportError("no stable expit in scipy.special")
except ImportError:
def expit(x, out=None):
"""Logistic sigmoid function, ``1 / (1 + exp(-x))``.
See sklearn.utils.extmath.log_logistic for the log of this function.
"""
if out is None:
out = np.empty(np.atleast_1d(x).shape, dtype=np.float64)
out[:] = x
# 1 / (1 + exp(-x)) = (1 + tanh(x / 2)) / 2
# This way of computing the logistic is both fast and stable.
out *= .5
np.tanh(out, out)
out += 1
out *= .5
return out.reshape(np.shape(x))
# little danse to see if np.copy has an 'order' keyword argument
if 'order' in inspect.getargspec(np.copy)[0]:
def safe_copy(X):
# Copy, but keep the order
return np.copy(X, order='K')
else:
# Before an 'order' argument was introduced, numpy wouldn't muck with
# the ordering
safe_copy = np.copy
try:
if (not np.allclose(np.divide(.4, 1, casting="unsafe"),
np.divide(.4, 1, casting="unsafe", dtype=np.float))
or not np.allclose(np.divide(.4, 1), .4)):
raise TypeError('Divide not working with dtype: '
'https://github.com/numpy/numpy/issues/3484')
divide = np.divide
except TypeError:
# Compat for old versions of np.divide that do not provide support for
# the dtype args
def divide(x1, x2, out=None, dtype=None):
out_orig = out
if out is None:
out = np.asarray(x1, dtype=dtype)
if out is x1:
out = x1.copy()
else:
if out is not x1:
out[:] = x1
if dtype is not None and out.dtype != dtype:
out = out.astype(dtype)
out /= x2
if out_orig is None and np.isscalar(x1):
out = np.asscalar(out)
return out
try:
np.array(5).astype(float, copy=False)
except TypeError:
# Compat where astype accepted no copy argument
def astype(array, dtype, copy=True):
if not copy and array.dtype == dtype:
return array
return array.astype(dtype)
else:
astype = np.ndarray.astype
try:
with warnings.catch_warnings(record=True):
# Don't raise the numpy deprecation warnings that appear in
# 1.9, but avoid Python bug due to simplefilter('ignore')
warnings.simplefilter('always')
sp.csr_matrix([1.0, 2.0, 3.0]).max(axis=0)
except (TypeError, AttributeError):
# in scipy < 14.0, sparse matrix min/max doesn't accept an `axis` argument
# the following code is taken from the scipy 0.14 codebase
def _minor_reduce(X, ufunc):
major_index = np.flatnonzero(np.diff(X.indptr))
if X.data.size == 0 and major_index.size == 0:
# Numpy < 1.8.0 don't handle empty arrays in reduceat
value = np.zeros_like(X.data)
else:
value = ufunc.reduceat(X.data, X.indptr[major_index])
return major_index, value
def _min_or_max_axis(X, axis, min_or_max):
N = X.shape[axis]
if N == 0:
raise ValueError("zero-size array to reduction operation")
M = X.shape[1 - axis]
mat = X.tocsc() if axis == 0 else X.tocsr()
mat.sum_duplicates()
major_index, value = _minor_reduce(mat, min_or_max)
not_full = np.diff(mat.indptr)[major_index] < N
value[not_full] = min_or_max(value[not_full], 0)
mask = value != 0
major_index = np.compress(mask, major_index)
value = np.compress(mask, value)
from scipy.sparse import coo_matrix
if axis == 0:
res = coo_matrix((value, (np.zeros(len(value)), major_index)),
dtype=X.dtype, shape=(1, M))
else:
res = coo_matrix((value, (major_index, np.zeros(len(value)))),
dtype=X.dtype, shape=(M, 1))
return res.A.ravel()
def _sparse_min_or_max(X, axis, min_or_max):
if axis is None:
if 0 in X.shape:
raise ValueError("zero-size array to reduction operation")
zero = X.dtype.type(0)
if X.nnz == 0:
return zero
m = min_or_max.reduce(X.data.ravel())
if X.nnz != np.product(X.shape):
m = min_or_max(zero, m)
return m
if axis < 0:
axis += 2
if (axis == 0) or (axis == 1):
return _min_or_max_axis(X, axis, min_or_max)
else:
raise ValueError("invalid axis, use 0 for rows, or 1 for columns")
def sparse_min_max(X, axis):
return (_sparse_min_or_max(X, axis, np.minimum),
_sparse_min_or_max(X, axis, np.maximum))
else:
def sparse_min_max(X, axis):
return (X.min(axis=axis).toarray().ravel(),
X.max(axis=axis).toarray().ravel())
try:
from numpy import argpartition
except ImportError:
# numpy.argpartition was introduced in v 1.8.0
def argpartition(a, kth, axis=-1, kind='introselect', order=None):
return np.argsort(a, axis=axis, order=order)
try:
from itertools import combinations_with_replacement
except ImportError:
# Backport of itertools.combinations_with_replacement for Python 2.6,
# from Python 3.4 documentation (http://tinyurl.com/comb-w-r), copyright
# Python Software Foundation (https://docs.python.org/3/license.html)
def combinations_with_replacement(iterable, r):
# combinations_with_replacement('ABC', 2) --> AA AB AC BB BC CC
pool = tuple(iterable)
n = len(pool)
if not n and r:
return
indices = [0] * r
yield tuple(pool[i] for i in indices)
while True:
for i in reversed(range(r)):
if indices[i] != n - 1:
break
else:
return
indices[i:] = [indices[i] + 1] * (r - i)
yield tuple(pool[i] for i in indices)
try:
from numpy import isclose
except ImportError:
def isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):
"""
Returns a boolean array where two arrays are element-wise equal within
a tolerance.
This function was added to numpy v1.7.0, and the version you are
running has been backported from numpy v1.8.1. See its documentation
for more details.
"""
def within_tol(x, y, atol, rtol):
with np.errstate(invalid='ignore'):
result = np.less_equal(abs(x - y), atol + rtol * abs(y))
if np.isscalar(a) and np.isscalar(b):
result = bool(result)
return result
x = np.array(a, copy=False, subok=True, ndmin=1)
y = np.array(b, copy=False, subok=True, ndmin=1)
xfin = np.isfinite(x)
yfin = np.isfinite(y)
if all(xfin) and all(yfin):
return within_tol(x, y, atol, rtol)
else:
finite = xfin & yfin
cond = np.zeros_like(finite, subok=True)
# Since we're using boolean indexing, x & y must be the same shape.
# Ideally, we'd just do x, y = broadcast_arrays(x, y). It's in
# lib.stride_tricks, though, so we can't import it here.
x = x * np.ones_like(cond)
y = y * np.ones_like(cond)
# Avoid subtraction with infinite/nan values...
cond[finite] = within_tol(x[finite], y[finite], atol, rtol)
# Check for equality of infinite values...
cond[~finite] = (x[~finite] == y[~finite])
if equal_nan:
# Make NaN == NaN
cond[np.isnan(x) & np.isnan(y)] = True
return cond
if np_version < (1, 7):
# Prior to 1.7.0, np.frombuffer wouldn't work for empty first arg.
def frombuffer_empty(buf, dtype):
if len(buf) == 0:
return np.empty(0, dtype=dtype)
else:
return np.frombuffer(buf, dtype=dtype)
else:
frombuffer_empty = np.frombuffer
if np_version < (1, 8):
def in1d(ar1, ar2, assume_unique=False, invert=False):
# Backport of numpy function in1d 1.8.1 to support numpy 1.6.2
# Ravel both arrays, behavior for the first array could be different
ar1 = np.asarray(ar1).ravel()
ar2 = np.asarray(ar2).ravel()
# This code is significantly faster when the condition is satisfied.
if len(ar2) < 10 * len(ar1) ** 0.145:
if invert:
mask = np.ones(len(ar1), dtype=np.bool)
for a in ar2:
mask &= (ar1 != a)
else:
mask = np.zeros(len(ar1), dtype=np.bool)
for a in ar2:
mask |= (ar1 == a)
return mask
# Otherwise use sorting
if not assume_unique:
ar1, rev_idx = np.unique(ar1, return_inverse=True)
ar2 = np.unique(ar2)
ar = np.concatenate((ar1, ar2))
# We need this to be a stable sort, so always use 'mergesort'
# here. The values from the first array should always come before
# the values from the second array.
order = ar.argsort(kind='mergesort')
sar = ar[order]
if invert:
bool_ar = (sar[1:] != sar[:-1])
else:
bool_ar = (sar[1:] == sar[:-1])
flag = np.concatenate((bool_ar, [invert]))
indx = order.argsort(kind='mergesort')[:len(ar1)]
if assume_unique:
return flag[indx]
else:
return flag[indx][rev_idx]
else:
from numpy import in1d
if sp_version < (0, 15):
# Backport fix for scikit-learn/scikit-learn#2986 / scipy/scipy#4142
from ._scipy_sparse_lsqr_backport import lsqr as sparse_lsqr
else:
from scipy.sparse.linalg import lsqr as sparse_lsqr
if sys.version_info < (2, 7, 0):
# partial cannot be pickled in Python 2.6
# http://bugs.python.org/issue1398
class partial(object):
def __init__(self, func, *args, **keywords):
functools.update_wrapper(self, func)
self.func = func
self.args = args
self.keywords = keywords
def __call__(self, *args, **keywords):
args = self.args + args
kwargs = self.keywords.copy()
kwargs.update(keywords)
return self.func(*args, **kwargs)
else:
from functools import partial
if np_version < (1, 6, 2):
# Allow bincount to accept empty arrays
# https://github.com/numpy/numpy/commit/40f0844846a9d7665616b142407a3d74cb65a040
def bincount(x, weights=None, minlength=None):
if len(x) > 0:
return np.bincount(x, weights, minlength)
else:
if minlength is None:
minlength = 0
minlength = np.asscalar(np.asarray(minlength, dtype=np.intp))
return np.zeros(minlength, dtype=np.intp)
else:
from numpy import bincount
if 'exist_ok' in inspect.getargspec(os.makedirs).args:
makedirs = os.makedirs
else:
def makedirs(name, mode=0o777, exist_ok=False):
"""makedirs(name [, mode=0o777][, exist_ok=False])
Super-mkdir; create a leaf directory and all intermediate ones. Works
like mkdir, except that any intermediate path segment (not just the
rightmost) will be created if it does not exist. If the target
directory already exists, raise an OSError if exist_ok is False.
Otherwise no exception is raised. This is recursive.
"""
try:
os.makedirs(name, mode=mode)
except OSError as e:
if (not exist_ok or e.errno != errno.EEXIST
or not os.path.isdir(name)):
raise
| bsd-3-clause |
understar/imgcls4wmts | utils/train.py | 1 | 1060 | # -*- coding: utf-8 -*-
"""
Created on Mon Aug 04 20:34:11 2014
@author: Administrator
"""
import logging
import numpy as np
from sklearn.multiclass import OneVsRestClassifier
from sklearn.svm import LinearSVC
from sklearn.metrics import confusion_matrix
logging.getLogger().setLevel(logging.INFO)
logging.info('Loading training data and labels.')
X = np.loadtxt('420_X.txt', delimiter=',')
y = np.loadtxt('420_Y.txt', delimiter=',')
logging.info('Split dataset for training and testing.')
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.25, random_state=42)
logging.info('Training the model.')
classifer = OneVsRestClassifier(LinearSVC(random_state=0))
classifer.fit(X_train, y_train)
logging.info('Testing the model')
y_hat = classifer.predict(X_test)
cm = confusion_matrix(y_test, y_hat)
print cm
logging.info('Save the model')
from sklearn.externals import joblib
filename = '420.pkl'
joblib.dump(classifer, filename, compress = 9) | bsd-2-clause |
neerajhirani/BDA_py_demos | demos_ch11/demo11_1.py | 19 | 14478 | """Bayesian data analysis
Chapter 11, demo 1
Gibbs sampling demonstration
"""
from __future__ import division
import threading
import numpy as np
import scipy.io # For importing a matlab file
from scipy import linalg, stats
import matplotlib as mpl
import matplotlib.pyplot as plt
# edit default plot settings (colours from colorbrewer2.org)
plt.rc('font', size=14)
plt.rc('lines', color='#377eb8', linewidth=2, markeredgewidth=1.5,
markersize=8)
plt.rc('axes', color_cycle=('#377eb8','#e41a1c','#4daf4a',
'#984ea3','#ff7f00','#ffff33'))
# Parameters of a Normal distribution used as a toy target distribution
y1 = 0
y2 = 0
r = 0.8
S = np.array([[1.0, r], [r, 1.0]])
# Starting value of the chain
t1 = -2.5
t2 = 2.5
# Number of iterations.
M = 2*1000
# N.B. In this implementation one iteration updates only one parameter and one
# complete iteration updating both parameters takes two basic iterations. This
# implementation was used to make plotting of Gibbs sampler's zig-zagging. In
# plots You can implement this also by saving only the final state of complete
# iteration updating all parameters.
# ====== Gibbs sampling here
# Allocate memory for the samples
tt = np.empty((M,2))
tt[0] = [t1, t2] # Save starting point
# For demonstration load pre-computed values
# Replace this with your algorithm!
# tt is a M x 2 array, with M samples of both theta_1 and theta_2
res_path = '../utilities_and_data/demo11_2.mat'
res = scipy.io.loadmat(res_path)
''' Content information of the precalculated results:
>>> scipy.io.whosmat(res_path)
[('tt', (2001, 2), 'double')]
'''
tt = res['tt']
# ====== The rest is just for illustration
# Grid
Y1 = np.linspace(-4.5, 4.5, 150)
Y2 = np.linspace(-4.5, 4.5, 150)
# Plot 90% HPD.
# In 2d-case contour for 90% HPD is an ellipse, whose semimajor
# axes can be computed from the eigenvalues of the covariance
# matrix scaled by a value selected to get ellipse match the
# density at the edge of 90% HPD. Angle of the ellipse could be
# computed from the eigenvectors, but since marginals are same
# we know that angle is 45 degrees.
q = np.sort(np.sqrt(linalg.eigh(S, eigvals_only=True)) * 2.147)
el = mpl.patches.Ellipse(
xy = (y1,y2),
width = 2 * q[1],
height = 2 * q[0],
angle = 45,
facecolor = 'none',
edgecolor = '#e41a1c'
)
el_legend = mpl.lines.Line2D([], [], color='#e41a1c', linewidth=1)
fig = plt.figure(figsize=(10,8))
ax = fig.add_subplot(111, aspect='equal')
ax.add_artist(el)
samp_legend, = ax.plot(
tt[0,0], tt[0,1], 'o', markerfacecolor='none', markeredgecolor='#377eb8')
ax.set_xlim([-4.5, 4.5])
ax.set_ylim([-4.5, 4.5])
ax.set_xlabel(r'$\theta_1$', fontsize=18)
ax.set_ylabel(r'$\theta_2$', fontsize=18)
htext = ax.set_title('Gibbs sampling\npress any key to continue...',
fontsize=18)
ax.legend((el_legend, samp_legend), ('90% HPD', 'Starting point'), numpoints=1,
loc='lower right')
pdfline_legend = mpl.lines.Line2D([], [], color='#377eb8')
chain_legend = mpl.lines.Line2D(
[], [], color='#377eb8', marker='o',
markerfacecolor='none', markeredgecolor='#377eb8'
)
burnchain_legend = mpl.lines.Line2D(
[], [], color='m', marker='o',
markerfacecolor='none', markeredgecolor='m'
)
# function for interactively updating the figure
def update_figure(event):
if icontainer.stage == 0 and icontainer.i < 7 and icontainer.drawdist:
i = icontainer.i
icontainer.drawdist = False
# Remove previous lines
for l in icontainer.remove_lines:
ax.lines.remove(l)
icontainer.remove_lines = []
if i % 2 == 0:
line = ax.axhline(y=tt[i,1], linestyle='--', color='k')
icontainer.remove_lines.append(line)
line, = ax.plot(
Y1,
tt[i,1] + stats.norm.pdf(
Y1,
loc = y1 + r*(tt[i,1] - y2),
scale = np.sqrt((1 - r**2))
),
color = '#377eb8'
)
icontainer.remove_lines.append(line)
if i == 0:
ax.legend(
(el_legend, samp_legend, pdfline_legend),
( '90% HPD',
'Starting point',
r'Conditional density given $\theta_2$'
),
numpoints=1,
loc='lower right'
)
else:
ax.legend(
(el_legend, samp_legend, pdfline_legend),
( '90% HPD',
'Samples from the chain',
r'Conditional density given $\theta_2$'
),
loc='lower right'
)
else:
line = ax.axvline(x=tt[i,0], linestyle='--', color='k')
icontainer.remove_lines.append(line)
line, = ax.plot(
tt[i,0] + stats.norm.pdf(
Y2,
loc = y2 + r*(tt[i,0] - y1),
scale = np.sqrt((1 - r**2))
),
Y2,
color = '#377eb8'
)
icontainer.remove_lines.append(line)
ax.legend(
(el_legend, samp_legend, pdfline_legend),
( '90% HPD',
'Samples from the chain',
r'Conditional density given $\theta_1$'
),
loc='lower right'
)
fig.canvas.draw()
elif icontainer.stage == 0 and icontainer.i < 7 and not icontainer.drawdist:
icontainer.i += 1
i = icontainer.i
if i == 6:
icontainer.stage += 1
icontainer.drawdist = True
sampi, = ax.plot(tt[i,0], tt[i,1], 'o', markerfacecolor='none',
markeredgecolor='#377eb8')
icontainer.samps.append(sampi)
if i == 1:
ax.legend(
(el_legend, samp_legend, pdfline_legend),
( '90% HPD',
'Samples from the chain',
r'Conditional density given $\theta_2$'
),
loc='lower right'
)
fig.canvas.draw()
elif icontainer.stage == 1:
icontainer.stage += 1
for l in icontainer.remove_lines:
ax.lines.remove(l)
icontainer.remove_lines = []
ax.legend(
(el_legend, samp_legend),
('90% HPD', 'Samples from the chain'),
loc='lower right'
)
fig.canvas.draw()
elif icontainer.stage == 2:
icontainer.stage += 1
for s in icontainer.samps:
ax.lines.remove(s)
icontainer.samps = []
line, = ax.plot(
tt[:icontainer.i+1,0], tt[:icontainer.i+1,1], color='#377eb8')
icontainer.samps.append(line)
line, = ax.plot(
tt[:icontainer.i+1:2,0], tt[:icontainer.i+1:2,1],
'o', markerfacecolor='none', markeredgecolor='#377eb8')
icontainer.samps.append(line)
ax.legend((el_legend, chain_legend), ('90% HPD', 'Markov chain'),
loc='lower right')
fig.canvas.draw()
elif icontainer.stage == 3:
icontainer.stage += 1
# modify helper text
htext.set_text('Gibbs sampling\npress `q` to skip animation')
# start the timer
anim_thread.start()
elif icontainer.stage == 4 and event.key == 'q':
# stop the animation
stop_anim.set()
elif icontainer.stage == 5:
icontainer.stage += 1
for s in icontainer.samps:
ax.lines.remove(s)
icontainer.samps = []
# remove helper text
icontainer.itertext.remove()
line, = ax.plot(tt[:burnin,0], tt[:burnin,1], color='m')
icontainer.samps.append(line)
line, = ax.plot(tt[:burnin:2,0], tt[:burnin:2,1], 'o',
markerfacecolor='none', markeredgecolor='m')
icontainer.samps.append(line)
line, = ax.plot(
tt[burnin:nanim+1,0], tt[burnin:nanim+1,1], color='#377eb8')
icontainer.samps.append(line)
line, = ax.plot(tt[burnin:nanim+1:2,0], tt[burnin:nanim+1:2,1], 'o',
markerfacecolor='none', markeredgecolor='#377eb8')
icontainer.samps.append(line)
ax.legend(
(el_legend, chain_legend, burnchain_legend),
('90% HPD', 'Markov chain', 'burn-in'),
loc='lower right'
)
fig.canvas.draw()
elif icontainer.stage == 6:
icontainer.stage += 1
for s in icontainer.samps:
ax.lines.remove(s)
icontainer.samps = []
line, = ax.plot(tt[burnin:nanim+1:2,0], tt[burnin:nanim+1:2,1], 'o',
markerfacecolor='none', markeredgecolor='#377eb8')
icontainer.samps.append(line)
ax.legend(
(el_legend, samp_legend),
('90% HPD', 'samples from the chain after burn-in'),
loc='lower right'
)
fig.canvas.draw()
elif icontainer.stage == 7:
icontainer.stage += 1
for s in icontainer.samps:
ax.lines.remove(s)
icontainer.samps = []
points = ax.scatter(
tt[burnin::2,0], tt[burnin::2,1], 10, alpha=0.5, color='#377eb8')
icontainer.samps.append(points)
ax.legend(
(el_legend, points),
('90% HPD', '950 samples from the chain'),
loc='lower right'
)
fig.canvas.draw()
elif icontainer.stage == 8:
icontainer.stage += 1
fig.clear()
indexes = np.arange(burnin,M,2)
samps = tt[indexes]
ax1 = fig.add_subplot(3,1,1)
ax1.axhline(y=0, linewidth=1, color='gray')
line1, line2, = ax1.plot(indexes/2, samps, linewidth=1)
ax1.legend((line1, line2), (r'$\theta_1$', r'$\theta_2$'))
ax1.set_xlabel('iteration')
ax1.set_title('trends')
ax1.set_xlim([burnin/2, 1000])
ax2 = fig.add_subplot(3,1,2)
ax2.axhline(y=0, linewidth=1, color='gray')
ax2.plot(
indexes/2,
np.cumsum(samps, axis=0)/np.arange(1,len(samps)+1)[:,None],
linewidth=1.5
)
ax2.set_xlabel('iteration')
ax2.set_title('cumulative average')
ax2.set_xlim([burnin/2, 1000])
ax3 = fig.add_subplot(3,1,3)
maxlag = 20
sampsc = samps - np.mean(samps, axis=0)
acorlags = np.arange(maxlag+1)
ax3.axhline(y=0, linewidth=1, color='gray')
for i in [0,1]:
t = np.correlate(sampsc[:,i], sampsc[:,i], 'full')
t = t[-len(sampsc):-len(sampsc)+maxlag+1] / t[-len(sampsc)]
ax3.plot(acorlags, t)
ax3.set_xlabel('lag')
ax3.set_title('estimate of the autocorrelation function')
fig.suptitle('Gibbs sampling - press any key to continue...',
fontsize=18)
fig.subplots_adjust(hspace=0.6)
fig.canvas.draw()
elif icontainer.stage == 9:
icontainer.stage += 1
fig.clear()
indexes = np.arange(burnin,M,2)
samps = tt[indexes]
nsamps = np.arange(1,len(samps)+1)
ax1 = fig.add_subplot(1,1,1)
ax1.axhline(y=0, linewidth=1, color='gray')
line1, line2, = ax1.plot(
indexes/2,
np.cumsum(samps, axis=0)/nsamps[:,None],
linewidth=1.5
)
er1, = ax1.plot(
indexes/2, 1.96/np.sqrt(nsamps/4), 'k--', linewidth=1)
ax1.plot(indexes/2, -1.96/np.sqrt(nsamps/4), 'k--', linewidth=1)
er2, = ax1.plot(
indexes/2, 1.96/np.sqrt(nsamps), 'k:', linewidth=1)
ax1.plot(indexes/2, -1.96/np.sqrt(nsamps), 'k:', linewidth=1)
ax1.set_xlabel('iteration')
ax1.set_title('Gibbs sampling\ncumulative average')
ax1.legend(
(line1, line2, er1, er2),
(r'$\theta_1$', r'$\theta_2$',
'95% interval for MCMC error',
'95% interval for independent MC'
)
)
ax1.set_xlim([burnin/2, 1000])
ax1.set_ylim([-2, 2])
fig.canvas.draw()
# function for performing the figure animation in thread
def animation():
icontainer.itertext = ax.text(-4, 4, '', fontsize=18)
delay0 = 0.4
delayk = 0.85
while icontainer.i < nanim:
icontainer.i += 1
i = icontainer.i
icontainer.itertext.set_text('iter {}'.format(i//2))
# show next sample
line, = ax.plot(tt[i-1:i+1,0], tt[i-1:i+1,1], color='#377eb8')
icontainer.samps.append(line)
if i % 2 == 0:
line, = ax.plot(
tt[i,0], tt[i,1], 'o',
markerfacecolor='none', markeredgecolor='#377eb8')
icontainer.samps.append(line)
# update figure
fig.canvas.draw()
if i < nanim and (i < 16 or i % 2 == 0):
# wait animation delay time or until animation is cancelled
stop_anim.wait(delay0)
delay0 *= delayk
if stop_anim.isSet():
# animation cancelled
break
# skip the rest if the figure does not exist anymore
if not plt.fignum_exists(fig.number):
return
# advance stage
icontainer.stage += 1
# modify helper text
htext.set_text('Gibbs sampling\npress any key to continue...')
# plot the rest of the samples
if i < nanim:
icontainer.itertext.set_text('iter {}'.format(nanim//2))
line, = ax.plot(tt[i:nanim+1,0], tt[i:nanim+1,1], color='#377eb8')
icontainer.samps.append(line)
line, = ax.plot(tt[nanim:i-1:-2,0], tt[nanim:i-1:-2,1], 'o',
markerfacecolor='none', markeredgecolor='#377eb8')
icontainer.samps.append(line)
icontainer.i = nanim
fig.canvas.draw()
# animation related variables
stop_anim = threading.Event()
anim_thread = threading.Thread(target=animation)
nanim = 200
burnin = 50
# store the information of the current stage of the figure
class icontainer(object):
stage = 0
i = 0
drawdist = True
remove_lines = []
samps = [samp_legend]
itertext = None
# set figure to react to keypress events
fig.canvas.mpl_connect('key_press_event', update_figure)
# start blocking figure
plt.show()
| gpl-3.0 |
MonsieurV/py-findpeaks | tests/libs/findpeaks.py | 1 | 1767 | """ Searches for peaks in data
History:
-nov 2015: Janko Slavic, update
-mar 2013: janko.slavic@fs.uni-lj.si
"""
import numpy as np
def findpeaks(data, spacing=1, limit=None):
"""Finds peaks in `data` which are of `spacing` width and >=`limit`.
:param data: values
:param spacing: minimum spacing to the next peak (should be 1 or more)
:param limit: peaks should have value greater or equal
:return:
"""
len = data.size
x = np.zeros(len+2*spacing)
x[:spacing] = data[0]-1.e-6
x[-spacing:] = data[-1]-1.e-6
x[spacing:spacing+len] = data
peak_candidate = np.zeros(len)
peak_candidate[:] = True
for s in range(spacing):
start = spacing - s - 1
h_b = x[start : start + len] # before
start = spacing
h_c = x[start : start + len] # central
start = spacing + s + 1
h_a = x[start : start + len] # after
peak_candidate = np.logical_and(peak_candidate, np.logical_and(h_c > h_b, h_c > h_a))
ind = np.argwhere(peak_candidate)
ind = ind.reshape(ind.size)
if limit is not None:
ind = ind[data[ind] > limit]
return ind
if __name__ == '__main__':
import matplotlib.pyplot as plt
n = 80
m = 20
limit = 0
spacing = 3
t = np.linspace(0., 1, n)
x = np.zeros(n)
np.random.seed(0)
phase = 2 * np.pi * np.random.random(m)
for i in range(m):
x += np.sin(phase[i] + 2 * np.pi * t * i)
peaks = findpeaks(x, spacing=spacing, limit=limit)
plt.plot(t, x)
plt.axhline(limit, color='r')
plt.plot(t[peaks], x[peaks], 'ro')
plt.title('Peaks: minimum value {limit}, minimum spacing {spacing} points'.format(**{'limit': limit, 'spacing': spacing}))
plt.show()
| mit |
Ernestyj/PyStudy | finance/DaysTest/TestingFusion.py | 1 | 11643 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import talib
from pyalgotrade import strategy, plotter
from pyalgotrade.broker.backtesting import TradePercentage, Broker
from pyalgotrade.broker import Order
from pyalgotrade.barfeed import yahoofeed
from pyalgotrade.broker.slippage import NoSlippage, VolumeShareSlippage
from pyalgotrade.stratanalyzer import returns, trades
from pyalgotrade.talibext import indicator
from pyalgotrade.optimizer import server, local
import itertools
from sklearn import preprocessing, svm, cross_validation, metrics, pipeline, grid_search
from scipy.stats import sem
from DaysDataPrepare import readWSDFile, readWSDIndexFile, prepareData, optimizeSVM
def readAndReWriteCSV(baseDir, instrument, startYear, yearNum=1):
dateparse = lambda x: pd.datetime.strptime(x, '%Y-%m-%d').date()
df = 0
for i in range(yearNum):
tempDF = pd.read_csv(baseDir + instrument + '/wsd_' + instrument + '_' + str(startYear + i) + '.csv',
index_col=0, sep='\t', usecols=[0, 2, 3, 4, 5, 6, 14], header=None,
skiprows=1, names=['Date', 'Open', 'High', 'Low', 'Close', 'Volume', 'Adj Close'],
parse_dates=True, date_parser=dateparse)
if i == 0:
df = tempDF
else:
df = df.append(tempDF)
pathName = None
resultDF = None
if yearNum==1:
pathName = baseDir+str(instrument)+'_'+str(startYear)+'.csv'
resultDF = df[str(startYear)]
else:
pathName = baseDir+str(instrument)+'_'+str(startYear)+'_'+str(startYear+yearNum-1)+'.csv'
resultDF = df[str(startYear):str(startYear+yearNum-1)]
resultDF.to_csv(pathName)
return pathName, resultDF
'''
计算收益率
'''
def returnRatio(V, C=100000.0):
return V/C-1.0
'''
计算收益率(多期)
'''
def returnRatioArr1(VArr, C=100000.0):
arr = []
for v in VArr: arr.append(v/C-1.0)
return arr
def returnRatioArr(VArr, C=100000.0):
arr = []
for v in VArr:
arr.append(v / C - 1.0)
C = v
return arr
'''
计算年化收益率(多期)
'''
def annualizedReturnRatio(returnRatioArr, T=250.0, D=250.0):
import math
tmp = 1
for r in returnRatioArr: tmp *= (r+1)
return math.pow(tmp, D/T)-1
'''
计算年化收益率(单期)
'''
def annualizedReturnRatioSingle(portfolio, C=100000.0, T=250.0, D=250.0):
import math
return math.pow(portfolio/C, D/T) - 1
baseDir = '/Users/eugene/Downloads/Data/'
# baseDir = '/Users/eugene/Downloads/marketQuotationData/'
# 沪深300 上证50 中证500
instruments = ['000300.SH', '000016.SH', '000905.SH']
instrument = instruments[2]
initCapital = 100000000.0 # 一亿
startYear = 2015; yearNum = 1
# startYear = 2014; yearNum = 2
df = readWSDFile(baseDir, instrument, startYear, yearNum)
print 'Day count:', len(df)
# print df.head(5)
dfi = readWSDIndexFile(baseDir, instrument, startYear, yearNum)
X, y, actionDates = prepareData(df, dfi, win=16)
print np.shape(X), np.shape(actionDates), np.shape(y); print y
normalizer = preprocessing.Normalizer().fit(X) # fit does nothing
X_norm = normalizer.transform(X)
# gamma, C, score = optimizeSVM(X_norm, y, kFolds=10); print 'gamma=',gamma, 'C=',C, 'score=',score
# clf = svm.SVC(kernel='rbf', gamma=0.125, C=0.125)
# clf = svm.SVC(kernel='rbf', gamma=512, C=32768)
# clf = svm.SVC(kernel='rbf', gamma=2048, C=32768)
# clf = svm.SVC(kernel='rbf', gamma=2048, C=32768)
# clf = svm.SVC(kernel='rbf', gamma=0.125, C=0.125)
clf = svm.SVC(kernel='rbf', gamma=0.125, C=0.125)
from EnsembleTest import optimizeEnsemble
from AdaboostSGDTest import optimizeAdaBoostSGD
from sklearn.ensemble import AdaBoostClassifier, RandomForestClassifier, ExtraTreesClassifier, BaggingClassifier, VotingClassifier
from sklearn.linear_model import SGDClassifier
clf_rf = RandomForestClassifier(n_estimators=200, random_state=47)
clf_sgd = AdaBoostClassifier(base_estimator=SGDClassifier(loss='log', alpha=0.000001, random_state=47), n_estimators=200, random_state=47)
voting = VotingClassifier(estimators=[('svm', clf), ('rf', clf_rf), ('sgd', clf_sgd)], voting='hard')
pathName, df = readAndReWriteCSV(baseDir, instrument, startYear=startYear, yearNum=yearNum)
print pathName
# print df.sample(3)
feed = yahoofeed.Feed()
feed.addBarsFromCSV(instrument, pathName)
class SVMStrategy(strategy.BacktestingStrategy):
def __init__(self, feed, win=10):
super(SVMStrategy, self).__init__(feed)
self.__instrument = instrument
self.__position = None
self.getBroker().setCash(initCapital)
self.getBroker().setCommission(TradePercentage(0.003))
self.getBroker().setAllowNegativeCash(True)
self.getBroker().getFillStrategy().setVolumeLimit(1)
self.getBroker().getFillStrategy().setSlippageModel(VolumeShareSlippage(priceImpact=0.0))
self.__closeDataSeries = feed[instrument].getCloseDataSeries()
self.df = df
self.closeArr = []
self.portfolios = []
self.buys = []
self.sells = []
self.clf = voting
self.X_norm = X_norm
self.y = y
self.actionDates = actionDates
self.win = win
# print 'week count:', len(y)
self.segmentCount = 1
self.dayCount = 0
self.errorCount = 0
self.rightCount = 0
def getDF(self):
return self.df
def getBuys(self):
return self.buys
def getSells(self):
return self.sells
def getCorrectness(self):
return self.rightCount*1.0/(self.errorCount+self.rightCount)
def onEnterOk(self, position):
# execInfo = position.getEntryOrder().getExecutionInfo()
# self.info("%s BUY %.0f shares at %.3f, commission=%.3f, PnL=%.3f" %
# (execInfo.getDateTime().date(), execInfo.getQuantity(), execInfo.getPrice(), execInfo.getCommission(), position.getPnL()))
pass
def onEnterCanceled(self, position):
self.__position = None
def onExitOk(self, position):
# execInfo = position.getExitOrder().getExecutionInfo()
# self.info("%s SELL %.0f shares at %.3f, commission=%.3f, PnL=%.3f" %
# (execInfo.getDateTime().date(), execInfo.getQuantity(), execInfo.getPrice(), execInfo.getCommission(), position.getPnL()))
self.__position = None
def onExitCanceled(self, position):
# If the exit was canceled, re-submit it.
self.__position.exitMarket()
def onStart(self):
pass
def onFinish(self, bars):
self.df['closeArr'] = self.closeArr
self.df['portfolio'] = self.portfolios
# print 'dayCount=',self.dayCount, 'weekCount=',self.weekCount-1
# print 'errorCount=',self.errorCount, 'rightCount=',self.rightCount
pass
def onOrderUpdated(self, order):
execInfo = order.getExecutionInfo()
fillDate = None
if execInfo!=None:
fillDate = execInfo.getDateTime().date()
if order.getAction()==1: self.buys.append(fillDate)
else: self.sells.append(fillDate)
# print 'id=',order.getId(), 'state=',Order.State.toString(order.getState()), 'type=',order.getType(), \
# 'submitAt=',order.getSubmitDateTime().date(), 'fillAt=',fillDate, \
# 'action=',order.getAction(), 'state=',order.getState(), 'active=',order.isActive(), \
# 'quantity=',order.getQuantity(), 'Positions=',self.getBroker().getPositions(), \
# 'cash=', self.getBroker().getCash()
def onBars(self, bars):
self.closeArr.append(bars[self.__instrument].getPrice())
self.portfolios.append(self.getBroker().getEquity())
self.dayCount += 1
curDate = bars[self.__instrument].getDateTime().date()
if curDate!=self.actionDates[self.segmentCount-1]: # 非区间最后一天
return
else: # 区间最后一天
if self.segmentCount < self.win+1:
self.segmentCount += 1
return
else:
X_train = self.X_norm[self.segmentCount - self.win - 1:self.segmentCount - 1]
y_train = self.y[self.segmentCount - self.win - 1:self.segmentCount - 1]
X_test = self.X_norm[self.segmentCount - 1]
y_test = self.y[self.segmentCount - 1]
self.clf.fit(X_train, y_train)
result = self.clf.predict([X_test])[0] # 为-1表示跌,为1表示涨
if result!=y_test: self.errorCount += 1 # 分类错误
else: self.rightCount += 1 # 分类正确
# If a position was not opened, check if we should enter a long position.
if self.__position is None:
if result==1:
shares = int(self.getBroker().getCash() / bars[self.__instrument].getPrice())
hands = shares/100
# Enter a buy market order. The order is good till canceled.
self.__position = self.enterLong(self.__instrument, hands*100, False)
# Check if we have to exit the position.
elif not self.__position.exitActive() and result==-1:
self.__position.exitMarket()
self.segmentCount += 1
pass
def parameters_generator():
win = range(8, 23)
return itertools.product(win)
def testWithBestParameters(win=10):
# 用最佳参数回测
myStrategy = SVMStrategy(feed, win=win)
returnsAnalyzer = returns.Returns()
myStrategy.attachAnalyzer(returnsAnalyzer)
tradesAnalyzer = trades.Trades()
myStrategy.attachAnalyzer(tradesAnalyzer)
myStrategy.run()
df = myStrategy.getDF()
# print df[['Close', 'closeArr', 'fastSMA', 'slowSMA']].sample(5)
buys = myStrategy.getBuys()
sells = myStrategy.getSells()
# print 'TRADE INFO: ', 'count=',tradesAnalyzer.getCount(), 'allProfits=',tradesAnalyzer.getAll(), 'allReturns=',tradesAnalyzer.getAllReturns()
print "Accuracy: %.3f" % myStrategy.getCorrectness()
print "总净值: %.3f" % myStrategy.getResult()
print "总收益率: %.3f" % returnRatio(myStrategy.getResult(), C=initCapital)
print "年化收益率: %.3f" % annualizedReturnRatioSingle(myStrategy.getResult(), C=initCapital, T=250.0*yearNum, D=250.0)
# fig = plt.figure(figsize=(20,10))
# ax1 = fig.add_subplot(211)
# df[['closeArr']].plot(ax=ax1, lw=2.)
# ax1.plot(buys, df.closeArr.ix[buys], '^', markersize=10, color='m')
# ax1.plot(sells, df.closeArr.ix[sells], 'v', markersize=10, color='k')
# ax2 = fig.add_subplot(212)
# portfolio_ratio = df['portfolio']/initCapital
# portfolio_ratio.plot(ax=ax2, lw=2.)
# ax2.plot(buys, portfolio_ratio.ix[buys], '^', markersize=10, color='m')
# ax2.plot(sells, portfolio_ratio.ix[sells], 'v', markersize=10, color='k')
# # ax3 = fig.add_subplot(313)
# # df['portfolio'].plot(ax=ax3, lw=2.)
# # ax3.plot(buys, df['portfolio'].ix[buys], '^', markersize=10, color='m')
# # ax3.plot(sells, df['portfolio'].ix[sells], 'v', markersize=10, color='k')
# fig.tight_layout()
# plt.show()
def test(isOptimize=True, win=9):
if isOptimize: # 寻找最佳参数
results = local.run(SVMStrategy, feed, parameters_generator())
print 'Parameters:', results.getParameters(), 'Result:', results.getResult()
print results.getParameters()[0]
else: # 用最佳参数回测
testWithBestParameters(win=win)
test(isOptimize=False, win=10) | apache-2.0 |
Lawrence-Liu/scikit-learn | examples/ensemble/plot_adaboost_hastie_10_2.py | 355 | 3576 | """
=============================
Discrete versus Real AdaBoost
=============================
This example is based on Figure 10.2 from Hastie et al 2009 [1] and illustrates
the difference in performance between the discrete SAMME [2] boosting
algorithm and real SAMME.R boosting algorithm. Both algorithms are evaluated
on a binary classification task where the target Y is a non-linear function
of 10 input features.
Discrete SAMME AdaBoost adapts based on errors in predicted class labels
whereas real SAMME.R uses the predicted class probabilities.
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
.. [2] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
print(__doc__)
# Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>,
# Noel Dawe <noel.dawe@gmail.com>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import zero_one_loss
from sklearn.ensemble import AdaBoostClassifier
n_estimators = 400
# A learning rate of 1. may not be optimal for both SAMME and SAMME.R
learning_rate = 1.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_test, y_test = X[2000:], y[2000:]
X_train, y_train = X[:2000], y[:2000]
dt_stump = DecisionTreeClassifier(max_depth=1, min_samples_leaf=1)
dt_stump.fit(X_train, y_train)
dt_stump_err = 1.0 - dt_stump.score(X_test, y_test)
dt = DecisionTreeClassifier(max_depth=9, min_samples_leaf=1)
dt.fit(X_train, y_train)
dt_err = 1.0 - dt.score(X_test, y_test)
ada_discrete = AdaBoostClassifier(
base_estimator=dt_stump,
learning_rate=learning_rate,
n_estimators=n_estimators,
algorithm="SAMME")
ada_discrete.fit(X_train, y_train)
ada_real = AdaBoostClassifier(
base_estimator=dt_stump,
learning_rate=learning_rate,
n_estimators=n_estimators,
algorithm="SAMME.R")
ada_real.fit(X_train, y_train)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot([1, n_estimators], [dt_stump_err] * 2, 'k-',
label='Decision Stump Error')
ax.plot([1, n_estimators], [dt_err] * 2, 'k--',
label='Decision Tree Error')
ada_discrete_err = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_discrete.staged_predict(X_test)):
ada_discrete_err[i] = zero_one_loss(y_pred, y_test)
ada_discrete_err_train = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_discrete.staged_predict(X_train)):
ada_discrete_err_train[i] = zero_one_loss(y_pred, y_train)
ada_real_err = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_real.staged_predict(X_test)):
ada_real_err[i] = zero_one_loss(y_pred, y_test)
ada_real_err_train = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_real.staged_predict(X_train)):
ada_real_err_train[i] = zero_one_loss(y_pred, y_train)
ax.plot(np.arange(n_estimators) + 1, ada_discrete_err,
label='Discrete AdaBoost Test Error',
color='red')
ax.plot(np.arange(n_estimators) + 1, ada_discrete_err_train,
label='Discrete AdaBoost Train Error',
color='blue')
ax.plot(np.arange(n_estimators) + 1, ada_real_err,
label='Real AdaBoost Test Error',
color='orange')
ax.plot(np.arange(n_estimators) + 1, ada_real_err_train,
label='Real AdaBoost Train Error',
color='green')
ax.set_ylim((0.0, 0.5))
ax.set_xlabel('n_estimators')
ax.set_ylabel('error rate')
leg = ax.legend(loc='upper right', fancybox=True)
leg.get_frame().set_alpha(0.7)
plt.show()
| bsd-3-clause |
lmr/autotest | frontend/tko/graphing_utils.py | 2 | 32598 | import base64
import tempfile
import pickle
import datetime
import django
import os.path
import getpass
from math import sqrt
# When you import matplotlib, it tries to write some temp files for better
# performance, and it does that to the directory in MPLCONFIGDIR, or, if that
# doesn't exist, the home directory. Problem is, the home directory is not
# writable when running under Apache, and matplotlib's not smart enough to
# handle that. It does appear smart enough to handle the files going
# away after they are written, though.
temp_dir = os.path.join(tempfile.gettempdir(),
'.matplotlib-%s' % getpass.getuser())
if not os.path.exists(temp_dir):
os.mkdir(temp_dir)
os.environ['MPLCONFIGDIR'] = temp_dir
import matplotlib
matplotlib.use('Agg')
import matplotlib.figure
import matplotlib.backends.backend_agg
import StringIO
import colorsys
import PIL.Image
import PIL.ImageChops
from autotest.frontend.afe import readonly_connection
from autotest.frontend.afe.model_logic import ValidationError
from simplejson import encoder
from autotest.client.shared import settings
from autotest.frontend.tko import models, tko_rpc_utils
_FIGURE_DPI = 100
_FIGURE_WIDTH_IN = 10
_FIGURE_BOTTOM_PADDING_IN = 2 # for x-axis labels
_SINGLE_PLOT_HEIGHT = 6
_MULTIPLE_PLOT_HEIGHT_PER_PLOT = 4
_MULTIPLE_PLOT_MARKER_TYPE = 'o'
_MULTIPLE_PLOT_MARKER_SIZE = 4
_SINGLE_PLOT_STYLE = 'bs-' # blue squares with lines connecting
_SINGLE_PLOT_ERROR_BAR_COLOR = 'r'
_LEGEND_FONT_SIZE = 'xx-small'
_LEGEND_HANDLE_LENGTH = 0.03
_LEGEND_NUM_POINTS = 3
_LEGEND_MARKER_TYPE = 'o'
_LINE_XTICK_LABELS_SIZE = 'x-small'
_BAR_XTICK_LABELS_SIZE = 8
_json_encoder = encoder.JSONEncoder()
class NoDataError(Exception):
"""
Exception to raise if the graphing query returned an empty resultset.
"""
def _colors(n):
"""
Generator function for creating n colors. The return value is a tuple
representing the RGB of the color.
"""
for i in xrange(n):
yield colorsys.hsv_to_rgb(float(i) / n, 1.0, 1.0)
def _resort(kernel_labels, list_to_sort):
"""
Resorts a list, using a list of kernel strings as the keys. Returns the
resorted list.
"""
labels = [tko_rpc_utils.KernelString(label) for label in kernel_labels]
resorted_pairs = sorted(zip(labels, list_to_sort))
# We only want the resorted list; we are not interested in the kernel
# strings.
return [pair[1] for pair in resorted_pairs]
def _quote(string):
return "%s%s%s" % ("'", string.replace("'", r"\'"), "'")
_HTML_TEMPLATE = """
<html><head></head><body>
<img src="data:image/png;base64,%s" usemap="#%s"
border="0" alt="graph">
<map name="%s">%s</map>
</body></html>"""
_AREA_TEMPLATE = """
<area shape="rect" coords="%i,%i,%i,%i" title="%s"
href="#"
onclick="%s(%s); return false;">"""
class MetricsPlot(object):
def __init__(self, query_dict, plot_type, inverted_series, normalize_to,
drilldown_callback):
"""
query_dict: dictionary containing the main query and the drilldown
queries. The main query returns a row for each x value. The first
column contains the x-axis label. Subsequent columns contain data
for each series, named by the column names. A column named
'errors-<x>' will be interpreted as errors for the series named <x>.
plot_type: 'Line' or 'Bar', depending on the plot type the user wants
inverted_series: list of series that should be plotted on an inverted
y-axis
normalize_to:
None - do not normalize
'first' - normalize against the first data point
'x__%s' - normalize against the x-axis value %s
'series__%s' - normalize against the series %s
drilldown_callback: name of drilldown callback method.
"""
self.query_dict = query_dict
if plot_type == 'Line':
self.is_line = True
elif plot_type == 'Bar':
self.is_line = False
else:
raise ValidationError({'plot': 'Plot must be either Line or Bar'})
self.plot_type = plot_type
self.inverted_series = inverted_series
self.normalize_to = normalize_to
if self.normalize_to is None:
self.normalize_to = ''
self.drilldown_callback = drilldown_callback
class QualificationHistogram(object):
def __init__(self, query, filter_string, interval, drilldown_callback):
"""
query: the main query to retrieve the pass rate information. The first
column contains the hostnames of all the machines that satisfied the
global filter. The second column (titled 'total') contains the total
number of tests that ran on that machine and satisfied the global
filter. The third column (titled 'good') contains the number of
those tests that passed on that machine.
filter_string: filter to apply to the common global filter to show the
Table View drilldown of a histogram bucket
interval: interval for each bucket. E.g., 10 means that buckets should
be 0-10%, 10%-20%, ...
"""
self.query = query
self.filter_string = filter_string
self.interval = interval
self.drilldown_callback = drilldown_callback
def _create_figure(height_inches):
"""
Creates an instance of matplotlib.figure.Figure, given the height in inches.
Returns the figure and the height in pixels.
"""
fig = matplotlib.figure.Figure(
figsize=(_FIGURE_WIDTH_IN, height_inches + _FIGURE_BOTTOM_PADDING_IN),
dpi=_FIGURE_DPI, facecolor='white')
fig.subplots_adjust(bottom=float(_FIGURE_BOTTOM_PADDING_IN) / height_inches)
return (fig, fig.get_figheight() * _FIGURE_DPI)
def _create_line(plots, labels, plot_info):
"""
Given all the data for the metrics, create a line plot.
plots: list of dicts containing the plot data. Each dict contains:
x: list of x-values for the plot
y: list of corresponding y-values
errors: errors for each data point, or None if no error information
available
label: plot title
labels: list of x-tick labels
plot_info: a MetricsPlot
"""
# when we're doing any kind of normalization, all series get put into a
# single plot
single = bool(plot_info.normalize_to)
area_data = []
lines = []
if single:
plot_height = _SINGLE_PLOT_HEIGHT
else:
plot_height = _MULTIPLE_PLOT_HEIGHT_PER_PLOT * len(plots)
figure, height = _create_figure(plot_height)
if single:
subplot = figure.add_subplot(1, 1, 1)
# Plot all the data
for plot_index, (plot, color) in enumerate(zip(plots, _colors(len(plots)))):
needs_invert = (plot['label'] in plot_info.inverted_series)
# Add a new subplot, if user wants multiple subplots
# Also handle axis inversion for subplots here
if not single:
subplot = figure.add_subplot(len(plots), 1, plot_index + 1)
subplot.set_title(plot['label'])
if needs_invert:
# for separate plots, just invert the y-axis
subplot.set_ylim(1, 0)
elif needs_invert:
# for a shared plot (normalized data), need to invert the y values
# manually, since all plots share a y-axis
plot['y'] = [-y for y in plot['y']]
# Plot the series
subplot.set_xticks(range(0, len(labels)))
subplot.set_xlim(-1, len(labels))
if single:
lines += subplot.plot(plot['x'], plot['y'], label=plot['label'],
marker=_MULTIPLE_PLOT_MARKER_TYPE,
markersize=_MULTIPLE_PLOT_MARKER_SIZE)
error_bar_color = lines[-1].get_color()
else:
lines += subplot.plot(plot['x'], plot['y'], _SINGLE_PLOT_STYLE,
label=plot['label'])
error_bar_color = _SINGLE_PLOT_ERROR_BAR_COLOR
if plot['errors']:
subplot.errorbar(plot['x'], plot['y'], linestyle='None',
yerr=plot['errors'], color=error_bar_color)
subplot.set_xticklabels([])
# Construct the information for the drilldowns.
# We need to do this in a separate loop so that all the data is in
# matplotlib before we start calling transform(); otherwise, it will return
# incorrect data because it hasn't finished adjusting axis limits.
for line in lines:
# Get the pixel coordinates of each point on the figure
x = line.get_xdata()
y = line.get_ydata()
label = line.get_label()
icoords = line.get_transform().transform(zip(x, y))
# Get the appropriate drilldown query
drill = plot_info.query_dict['__' + label + '__']
# Set the title attributes (hover-over tool-tips)
x_labels = [labels[x_val] for x_val in x]
titles = ['%s - %s: %f' % (label, x_label, y_val)
for x_label, y_val in zip(x_labels, y)]
# Get the appropriate parameters for the drilldown query
params = [dict(query=drill, series=line.get_label(), param=x_label)
for x_label in x_labels]
area_data += [dict(left=ix - 5, top=height - iy - 5,
right=ix + 5, bottom=height - iy + 5,
title=title,
callback=plot_info.drilldown_callback,
callback_arguments=param_dict)
for (ix, iy), title, param_dict
in zip(icoords, titles, params)]
subplot.set_xticklabels(labels, rotation=90, size=_LINE_XTICK_LABELS_SIZE)
# Show the legend if there are not multiple subplots
if single:
font_properties = matplotlib.font_manager.FontProperties(
size=_LEGEND_FONT_SIZE)
legend = figure.legend(lines, [plot['label'] for plot in plots],
prop=font_properties,
handlelen=_LEGEND_HANDLE_LENGTH,
numpoints=_LEGEND_NUM_POINTS)
# Workaround for matplotlib not keeping all line markers in the legend -
# it seems if we don't do this, matplotlib won't keep all the line
# markers in the legend.
for line in legend.get_lines():
line.set_marker(_LEGEND_MARKER_TYPE)
return (figure, area_data)
def _get_adjusted_bar(x, bar_width, series_index, num_plots):
"""
Adjust the list 'x' to take the multiple series into account. Each series
should be shifted such that the middle series lies at the appropriate x-axis
tick with the other bars around it. For example, if we had four series
(i.e. four bars per x value), we want to shift the left edges of the bars as
such:
Bar 1: -2 * width
Bar 2: -width
Bar 3: none
Bar 4: width
"""
adjust = (-0.5 * num_plots - 1 + series_index) * bar_width
return [x_val + adjust for x_val in x]
# TODO(showard): merge much of this function with _create_line by extracting and
# parameterizing methods
def _create_bar(plots, labels, plot_info):
"""
Given all the data for the metrics, create a line plot.
plots: list of dicts containing the plot data.
x: list of x-values for the plot
y: list of corresponding y-values
errors: errors for each data point, or None if no error information
available
label: plot title
labels: list of x-tick labels
plot_info: a MetricsPlot
"""
area_data = []
bars = []
figure, height = _create_figure(_SINGLE_PLOT_HEIGHT)
# Set up the plot
subplot = figure.add_subplot(1, 1, 1)
subplot.set_xticks(range(0, len(labels)))
subplot.set_xlim(-1, len(labels))
subplot.set_xticklabels(labels, rotation=90, size=_BAR_XTICK_LABELS_SIZE)
# draw a bold line at y=0, making it easier to tell if bars are dipping
# below the axis or not.
subplot.axhline(linewidth=2, color='black')
# width here is the width for each bar in the plot. Matplotlib default is
# 0.8.
width = 0.8 / len(plots)
# Plot the data
for plot_index, (plot, color) in enumerate(zip(plots, _colors(len(plots)))):
# Invert the y-axis if needed
if plot['label'] in plot_info.inverted_series:
plot['y'] = [-y for y in plot['y']]
adjusted_x = _get_adjusted_bar(plot['x'], width, plot_index + 1,
len(plots))
bar_data = subplot.bar(adjusted_x, plot['y'],
width=width, yerr=plot['errors'],
facecolor=color,
label=plot['label'])
bars.append(bar_data[0])
# Construct the information for the drilldowns.
# See comment in _create_line for why we need a separate loop to do this.
for plot_index, plot in enumerate(plots):
adjusted_x = _get_adjusted_bar(plot['x'], width, plot_index + 1,
len(plots))
# Let matplotlib plot the data, so that we can get the data-to-image
# coordinate transforms
line = subplot.plot(adjusted_x, plot['y'], linestyle='None')[0]
label = plot['label']
upper_left_coords = line.get_transform().transform(zip(adjusted_x,
plot['y']))
bottom_right_coords = line.get_transform().transform(
[(x + width, 0) for x in adjusted_x])
# Get the drilldown query
drill = plot_info.query_dict['__' + label + '__']
# Set the title attributes
x_labels = [labels[x] for x in plot['x']]
titles = ['%s - %s: %f' % (plot['label'], label, y)
for label, y in zip(x_labels, plot['y'])]
params = [dict(query=drill, series=plot['label'], param=x_label)
for x_label in x_labels]
area_data += [dict(left=ulx, top=height - uly,
right=brx, bottom=height - bry,
title=title,
callback=plot_info.drilldown_callback,
callback_arguments=param_dict)
for (ulx, uly), (brx, bry), title, param_dict
in zip(upper_left_coords, bottom_right_coords, titles,
params)]
figure.legend(bars, [plot['label'] for plot in plots])
return (figure, area_data)
def _normalize(data_values, data_errors, base_values, base_errors):
"""
Normalize the data against a baseline.
data_values: y-values for the to-be-normalized data
data_errors: standard deviations for the to-be-normalized data
base_values: list of values normalize against
base_errors: list of standard deviations for those base values
"""
values = []
for value, base in zip(data_values, base_values):
try:
values.append(100 * (value - base) / base)
except ZeroDivisionError:
# Base is 0.0 so just simplify:
# If value < base: append -100.0;
# If value == base: append 0.0 (obvious); and
# If value > base: append 100.0.
values.append(100 * float(cmp(value, base)))
# Based on error for f(x,y) = 100 * (x - y) / y
if data_errors:
if not base_errors:
base_errors = [0] * len(data_errors)
errors = []
for data, error, base_value, base_error in zip(
data_values, data_errors, base_values, base_errors):
try:
errors.append(sqrt(error ** 2 * (100 / base_value) ** 2
+ base_error ** 2 * (100 * data / base_value ** 2) ** 2
+ error * base_error * (100 / base_value ** 2) ** 2))
except ZeroDivisionError:
# Again, base is 0.0 so do the simple thing.
errors.append(100 * abs(error))
else:
errors = None
return (values, errors)
def _create_png(figure):
"""
Given the matplotlib figure, generate the PNG data for it.
"""
# Draw the image
canvas = matplotlib.backends.backend_agg.FigureCanvasAgg(figure)
canvas.draw()
size = canvas.get_renderer().get_canvas_width_height()
image_as_string = canvas.tostring_rgb()
image = PIL.Image.fromstring('RGB', size, image_as_string, 'raw', 'RGB', 0,
1)
image_background = PIL.Image.new(image.mode, image.size,
figure.get_facecolor())
# Crop the image to remove surrounding whitespace
non_whitespace = PIL.ImageChops.difference(image, image_background)
bounding_box = non_whitespace.getbbox()
image = image.crop(bounding_box)
image_data = StringIO.StringIO()
image.save(image_data, format='PNG')
return image_data.getvalue(), bounding_box
def _create_image_html(figure, area_data, plot_info):
"""
Given the figure and drilldown data, construct the HTML that will render the
graph as a PNG image, and attach the image map to that image.
figure: figure containing the drawn plot(s)
area_data: list of parameters for each area of the image map. See the
definition of the template string '_AREA_TEMPLATE'
plot_info: a MetricsPlot or QualHistogram
"""
png, bbox = _create_png(figure)
# Construct the list of image map areas
areas = [_AREA_TEMPLATE %
(data['left'] - bbox[0], data['top'] - bbox[1],
data['right'] - bbox[0], data['bottom'] - bbox[1],
data['title'], data['callback'],
_json_encoder.encode(data['callback_arguments'])
.replace('"', '"'))
for data in area_data]
map_name = plot_info.drilldown_callback + '_map'
return _HTML_TEMPLATE % (base64.b64encode(png), map_name, map_name,
'\n'.join(areas))
def _find_plot_by_label(plots, label):
for index, plot in enumerate(plots):
if plot['label'] == label:
return index
raise ValueError('no plot labeled "%s" found' % label)
def _normalize_to_series(plots, base_series):
base_series_index = _find_plot_by_label(plots, base_series)
base_plot = plots[base_series_index]
base_xs = base_plot['x']
base_values = base_plot['y']
base_errors = base_plot['errors']
del plots[base_series_index]
for plot in plots:
old_xs, old_values, old_errors = plot['x'], plot['y'], plot['errors']
new_xs, new_values, new_errors = [], [], []
new_base_values, new_base_errors = [], []
# Select only points in the to-be-normalized data that have a
# corresponding baseline value
for index, x_value in enumerate(old_xs):
try:
base_index = base_xs.index(x_value)
except ValueError:
continue
new_xs.append(x_value)
new_values.append(old_values[index])
new_base_values.append(base_values[base_index])
if old_errors:
new_errors.append(old_errors[index])
new_base_errors.append(base_errors[base_index])
if not new_xs:
raise NoDataError('No normalizable data for series ' +
plot['label'])
plot['x'] = new_xs
plot['y'] = new_values
if old_errors:
plot['errors'] = new_errors
plot['y'], plot['errors'] = _normalize(plot['y'], plot['errors'],
new_base_values,
new_base_errors)
def _create_metrics_plot_helper(plot_info, extra_text=None):
"""
Create a metrics plot of the given plot data.
plot_info: a MetricsPlot object.
extra_text: text to show at the uppper-left of the graph
TODO(showard): move some/all of this logic into methods on MetricsPlot
"""
query = plot_info.query_dict['__main__']
cursor = readonly_connection.connection().cursor()
cursor.execute(query)
if not cursor.rowcount:
raise NoDataError('query did not return any data')
rows = cursor.fetchall()
# "transpose" rows, so columns[0] is all the values from the first column,
# etc.
columns = zip(*rows)
plots = []
labels = [str(label) for label in columns[0]]
needs_resort = (cursor.description[0][0] == 'kernel')
# Collect all the data for the plot
col = 1
while col < len(cursor.description):
y = columns[col]
label = cursor.description[col][0]
col += 1
if (col < len(cursor.description) and
'errors-' + label == cursor.description[col][0]):
errors = columns[col]
col += 1
else:
errors = None
if needs_resort:
y = _resort(labels, y)
if errors:
errors = _resort(labels, errors)
x = [index for index, value in enumerate(y) if value is not None]
if not x:
raise NoDataError('No data for series ' + label)
y = [y[i] for i in x]
if errors:
errors = [errors[i] for i in x]
plots.append({
'label': label,
'x': x,
'y': y,
'errors': errors
})
if needs_resort:
labels = _resort(labels, labels)
# Normalize the data if necessary
normalize_to = plot_info.normalize_to
if normalize_to == 'first' or normalize_to.startswith('x__'):
if normalize_to != 'first':
baseline = normalize_to[3:]
try:
baseline_index = labels.index(baseline)
except ValueError:
raise ValidationError({
'Normalize': 'Invalid baseline %s' % baseline
})
for plot in plots:
if normalize_to == 'first':
plot_index = 0
else:
try:
plot_index = plot['x'].index(baseline_index)
# if the value is not found, then we cannot normalize
except ValueError:
raise ValidationError({
'Normalize': ('%s does not have a value for %s'
% (plot['label'], normalize_to[3:]))
})
base_values = [plot['y'][plot_index]] * len(plot['y'])
if plot['errors']:
base_errors = [plot['errors'][plot_index]] * len(plot['errors'])
plot['y'], plot['errors'] = _normalize(plot['y'], plot['errors'],
base_values,
None or base_errors)
elif normalize_to.startswith('series__'):
base_series = normalize_to[8:]
_normalize_to_series(plots, base_series)
# Call the appropriate function to draw the line or bar plot
if plot_info.is_line:
figure, area_data = _create_line(plots, labels, plot_info)
else:
figure, area_data = _create_bar(plots, labels, plot_info)
# TODO(showard): extract these magic numbers to named constants
if extra_text:
text_y = .95 - .0075 * len(plots)
figure.text(.1, text_y, extra_text, size='xx-small')
return (figure, area_data)
def create_metrics_plot(query_dict, plot_type, inverted_series, normalize_to,
drilldown_callback, extra_text=None):
plot_info = MetricsPlot(query_dict, plot_type, inverted_series,
normalize_to, drilldown_callback)
figure, area_data = _create_metrics_plot_helper(plot_info, extra_text)
return _create_image_html(figure, area_data, plot_info)
def _get_hostnames_in_bucket(hist_data, bucket):
"""
Get all the hostnames that constitute a particular bucket in the histogram.
hist_data: list containing tuples of (hostname, pass_rate)
bucket: tuple containing the (low, high) values of the target bucket
"""
return [hostname for hostname, pass_rate in hist_data
if bucket[0] <= pass_rate < bucket[1]]
def _create_qual_histogram_helper(plot_info, extra_text=None):
"""
Create a machine qualification histogram of the given data.
plot_info: a QualificationHistogram
extra_text: text to show at the upper-left of the graph
TODO(showard): move much or all of this into methods on
QualificationHistogram
"""
cursor = readonly_connection.connection().cursor()
cursor.execute(plot_info.query)
if not cursor.rowcount:
raise NoDataError('query did not return any data')
# Lists to store the plot data.
# hist_data store tuples of (hostname, pass_rate) for machines that have
# pass rates between 0 and 100%, exclusive.
# no_tests is a list of machines that have run none of the selected tests
# no_pass is a list of machines with 0% pass rate
# perfect is a list of machines with a 100% pass rate
hist_data = []
no_tests = []
no_pass = []
perfect = []
# Construct the lists of data to plot
for hostname, total, good in cursor.fetchall():
if total == 0:
no_tests.append(hostname)
continue
if good == 0:
no_pass.append(hostname)
elif good == total:
perfect.append(hostname)
else:
percentage = 100.0 * good / total
hist_data.append((hostname, percentage))
interval = plot_info.interval
bins = range(0, 100, interval)
if bins[-1] != 100:
bins.append(bins[-1] + interval)
figure, height = _create_figure(_SINGLE_PLOT_HEIGHT)
subplot = figure.add_subplot(1, 1, 1)
# Plot the data and get all the bars plotted
_, _, bars = subplot.hist([data[1] for data in hist_data],
bins=bins, align='left')
bars += subplot.bar([-interval], len(no_pass),
width=interval, align='center')
bars += subplot.bar([bins[-1]], len(perfect),
width=interval, align='center')
bars += subplot.bar([-3 * interval], len(no_tests),
width=interval, align='center')
buckets = [(bin, min(bin + interval, 100)) for bin in bins[:-1]]
# set the x-axis range to cover all the normal bins plus the three "special"
# ones - N/A (3 intervals left), 0% (1 interval left) ,and 100% (far right)
subplot.set_xlim(-4 * interval, bins[-1] + interval)
subplot.set_xticks([-3 * interval, -interval] + bins + [100 + interval])
subplot.set_xticklabels(['N/A', '0%'] +
['%d%% - <%d%%' % bucket for bucket in buckets] +
['100%'], rotation=90, size='small')
# Find the coordinates on the image for each bar
x = []
y = []
for bar in bars:
x.append(bar.get_x())
y.append(bar.get_height())
f = subplot.plot(x, y, linestyle='None')[0]
upper_left_coords = f.get_transform().transform(zip(x, y))
bottom_right_coords = f.get_transform().transform(
[(x_val + interval, 0) for x_val in x])
# Set the title attributes
titles = ['%d%% - <%d%%: %d machines' % (bucket[0], bucket[1], y_val)
for bucket, y_val in zip(buckets, y)]
titles.append('0%%: %d machines' % len(no_pass))
titles.append('100%%: %d machines' % len(perfect))
titles.append('N/A: %d machines' % len(no_tests))
# Get the hostnames for each bucket in the histogram
names_list = [_get_hostnames_in_bucket(hist_data, bucket)
for bucket in buckets]
names_list += [no_pass, perfect]
if plot_info.filter_string:
plot_info.filter_string += ' AND '
# Construct the list of drilldown parameters to be passed when the user
# clicks on the bar.
params = []
for names in names_list:
if names:
hostnames = ','.join(_quote(hostname) for hostname in names)
hostname_filter = 'hostname IN (%s)' % hostnames
full_filter = plot_info.filter_string + hostname_filter
params.append({'type': 'normal',
'filterString': full_filter})
else:
params.append({'type': 'empty'})
params.append({'type': 'not_applicable',
'hosts': '<br />'.join(no_tests)})
area_data = [dict(left=ulx, top=height - uly,
right=brx, bottom=height - bry,
title=title, callback=plot_info.drilldown_callback,
callback_arguments=param_dict)
for (ulx, uly), (brx, bry), title, param_dict
in zip(upper_left_coords, bottom_right_coords, titles, params)]
# TODO(showard): extract these magic numbers to named constants
if extra_text:
figure.text(.1, .95, extra_text, size='xx-small')
return (figure, area_data)
def create_qual_histogram(query, filter_string, interval, drilldown_callback,
extra_text=None):
plot_info = QualificationHistogram(query, filter_string, interval,
drilldown_callback)
figure, area_data = _create_qual_histogram_helper(plot_info, extra_text)
return _create_image_html(figure, area_data, plot_info)
def create_embedded_plot(model, update_time):
"""
Given an EmbeddedGraphingQuery object, generate the PNG image for it.
model: EmbeddedGraphingQuery object
update_time: 'Last updated' time
"""
params = pickle.loads(model.params)
extra_text = 'Last updated: %s' % update_time
if model.graph_type == 'metrics':
plot_info = MetricsPlot(query_dict=params['queries'],
plot_type=params['plot'],
inverted_series=params['invert'],
normalize_to=None,
drilldown_callback='')
figure, areas_unused = _create_metrics_plot_helper(plot_info,
extra_text)
elif model.graph_type == 'qual':
plot_info = QualificationHistogram(
query=params['query'], filter_string=params['filter_string'],
interval=params['interval'], drilldown_callback='')
figure, areas_unused = _create_qual_histogram_helper(plot_info,
extra_text)
else:
raise ValueError('Invalid graph_type %s' % model.graph_type)
image, bounding_box_unused = _create_png(figure)
return image
_cache_timeout = settings.settings.get_value('AUTOTEST_WEB',
'graph_cache_creation_timeout_minutes')
def handle_plot_request(id, max_age):
"""
Given the embedding id of a graph, generate a PNG of the embedded graph
associated with that id.
id: id of the embedded graph
max_age: maximum age, in minutes, that a cached version should be held
"""
model = models.EmbeddedGraphingQuery.objects.get(id=id)
# Check if the cached image needs to be updated
now = datetime.datetime.now()
update_time = model.last_updated + datetime.timedelta(minutes=int(max_age))
if now > update_time:
cursor = django.db.connection.cursor()
# We want this query to update the refresh_time only once, even if
# multiple threads are running it at the same time. That is, only the
# first thread will win the race, and it will be the one to update the
# cached image; all other threads will show that they updated 0 rows
query = """
UPDATE embedded_graphing_queries
SET refresh_time = NOW()
WHERE id = %s AND (
refresh_time IS NULL OR
refresh_time + INTERVAL %s MINUTE < NOW()
)
"""
cursor.execute(query, (id, _cache_timeout))
# Only refresh the cached image if we were successful in updating the
# refresh time
if cursor.rowcount:
model.cached_png = create_embedded_plot(model, now.ctime())
model.last_updated = now
model.refresh_time = None
model.save()
return model.cached_png
| gpl-2.0 |
UMN-Hydro/GSFLOW_pre-processor | python_scripts/GSFLOW_print_controlfile_Shullcas.py | 1 | 22307 | """
Created on Sun Sep 10 22:06:46 2017
Converting from GSFLOW_print_controlfile4_gcng_melt30yr.m
Creates inputs files to run GSFLOW, based on the "Sagehen" example but
modified for Chimborazo's Gavilan Machay watershed. Crystal's AGU2016
poster.
GSFLOW Input files:
- control file (generate with GSFLOW_print_controlfile1.m - NOT YET WRITTEN)
- parameter files (generate with GSFLOW_print_PRMSparamfile1.m and GSFLOW_print_GSFLOWparamfile1.m - NOT YET WRITTEN)
- variable / data files (generate with GSFLOW_print_ObsMet_files1.m - NOT YET WRITTEN)
(Control file includes names of parameter and data files. Thus,
parameter and variable file names must match those specified there!!)
**** Meltwater, 30-yrs spin-up ****
- precip with melt (precip*.day)
- no veg shift (ChimTest_GSFLOW.param)
- no plus 1C (tmin*.day, tmax*.day)
- run 30yrs (start_date, end_date here and in MODFLOW)
@author: gcng
"""
#==============================================================================
# ## Control file
#
# # see PRMS manual:
# # - list of control variables in Appendix 1 Table 1-2 (p.33-36),
# # - description of control file on p.126
#
# # general syntax (various parameters listed in succession):
# # line 1: '####'
# # line 2: control parameter name
# # line 3: number of parameter values specified
# # line 4: data type -> 1=int, 2=single prec, 3=double prec, 4=char str
# # line 5-end: parameter values, 1 per line
# #
# # *** CUSTOMIZE TO YOUR COMPUTER! *****************************************
# # NOTE: '/' is directory separator for Linux, '\' for Windows!!
#==============================================================================
import numpy as np # matlab core
import scipy as sp # matlab toolboxes
import matplotlib.pyplot as plt # matlab-like plots
import os # os functions
import settings
import platform
# control file that will be written with this script
# (will be in control_dir with model mode suffix)
con_filname0 = settings.PROJ_CODE
# - choose one:
# model_mode = 'WRITE_CLIMATE'; # creates pre-processed climate_hru files
# model_mode = 'PRMS'; # run only PRMS
# model_mode = 'MODFLOW'; # run only MODFLOW-2005
model_mode = 'GSFLOW' # run coupled PRMS-MODFLOW
# data file that the control file will point to (generate with PRMS_print_climate_hru_files2.m)
datafil = settings.PRMSinput_dir + 'empty.day'
# parameter file that the control file will point to (generate with PRMS_print_paramfile3.m)
parfil_pre = settings.PROJ_CODE # will have '_', model_mode following
# MODFLOW namefile that the control file will point to (generate with write_nam_MOD.m)
namfil = settings.MODFLOWinput_dir + 'test2lay_py.nam'
# output directory that the control file will point to for creating output files (include slash at end!)
outdir = settings.PRMSoutput_dir
# model start and end dates
# ymdhms_v = [ 2015 6 16 0 0 0; ...
# 2016 6 24 0 0 0];
#ymdhms_v = np.array([[ 2015, 6, 16, 0, 0, 0],
# [ 2020, 6, 15, 0, 0, 0]])
#ymdhms_v = np.array([[ 1990, 4, 23, 0, 0, 0],
# [ 2017, 9, 27, 0, 0, 0]])
# ymdhms_v = [ 2015 6 16 0 0 0; ...
# 2025 6 15 0 0 0];
#ymdhms_v = np.array([[ 2015, 6, 16, 0, 0, 0],
# [ 2025, 6, 15, 0, 0, 0]])
####
ymdhms_v = np.array([[ 2013, 8, 26, 0, 0, 0],
[ 2016, 9, 29, 0, 0, 0]])
#First MODFLOW initial stress period (can be earlier than model start date;
# useful when using load_init_file and modflow period represents longer
# period that started earlier).
ymdhms_m = ymdhms_v[0]
# initial condition files
# (see /home/gcng/workspace/Models/GSFLOW/GSFLOW_1.2.0/data/sagehen_restart
# as example for how to stitch together many restarts using a shell script)
if model_mode == 'GSFLOW':
fl_load_init = 0 # 1 to load previously saved initial conditions
# load initial conditions from this file
# load_init_file = PRMSoutput_dir + 'init_cond_infile' # load initial conditions from this file
load_init_file = settings.PRMSoutput_dir + 'init_cond_outfile'
fl_save_init = 1 # 1 to save outputs as initial conditions
save_init_file = settings.PRMSoutput_dir + 'init_cond_outfile' # save new results as initial conditions in this file
# 1: use all pre-processed met data
fl_all_climate_hru = 0 # (could set to = False)
if fl_all_climate_hru == 0:
# pick one:
# precip_module = 'precip_1sta'
precip_module = 'climate_hru'
# pick one:
# temp_module = 'temp_1sta';
temp_module = 'climate_hru';
et_module = 'potet_pt' # set pt_alpha(nhru, nmonth), currently all = 1.26
solrad_module = 'ddsolrad'
# If climate_hru, use the following file names (else ignored)
# (note that WRITE_CLIMATE will produce files with the below names)
# precip_datafil = strcat(PRMSinput_dir, 'precip_rep30yr.day'); # w/o meltwater
precip_datafil = settings.PRMSinput_dir + 'precip.day'
# tmax_datafil = strcat(PRMSinput_dir, 'tmax_rep30yr_tadj_plus1C.day'); # to be safe: set as F
# tmin_datafil = strcat(PRMSinput_dir, 'tmin_rep30yr_tadj_plus1C.day'); # to be safe: set as F
tmax_datafil = settings.PRMSinput_dir +'tmax.day' # to be safe: set as F
tmin_datafil = settings.PRMSinput_dir + 'tmin.day' # to be safe: set as F
solrad_datafil = settings.PRMSinput_dir + 'swrad.day'
pet_datafil = settings.PRMSinput_dir + 'potet.day'
humidity_datafil = settings.PRMSinput_dir + 'humidity.day' # for potet_pm
transp_datafil = settings.PRMSinput_dir + 'transp.day' # may not be needed in GSFLOW? Is needed!
# *************************************************************************
# Project-specific entries ->
title_str = settings.PROJ_NAME
# n_par_max should be dynamically generated
con_par_name = [] # name of control file parameter
con_par_num = [] # number of values for a control parameter
con_par_type = [] # 1=int, 2=single prec, 3=double prec, 4=char str
#con_par_values = np.empty((n_par_max,1), dtype=np.object) # control parameter values
con_par_values = [] # control parameter values
# First 2 blocks should be specified, rest are optional (though default
# values exist for all variables, see last column of App 1 Table 1-2 p.33).
# 1 - Variables pertaining to simulation execution and required input and output files
# (some variable values left out if default is the only choice we want)
con_par_name.append('model_mode') # typically 'PRMS', also 'FROST' or 'WRITE_CLIMATE'
con_par_type.append(4)
con_par_values.append(model_mode) #
con_par_name.append('modflow_name')
con_par_type.append(4)
con_par_values.append(namfil)
# no more inputs needed for MODFLOW-only runs
if model_mode != 'MODFLOW':
# for GSFLOW
if model_mode == 'GSFLOW':
con_par_name.append('csv_output_file')
con_par_type.append(4)
con_par_values.append(outdir + 'gsflow.csv')
con_par_name.append('modflow_time_zero')
con_par_type.append(1)
con_par_values.append(ymdhms_m) # year, month, day, hour, minute, second
con_par_name.append('gsflow_output_file')
con_par_type.append(4)
con_par_values.append(outdir + 'gsflow.out')
con_par_name.append('gsf_rpt') # flag to create csv output file
con_par_type.append(1)
con_par_values.append(1)
con_par_name.append('rpt_days') # Frequency with which summary tables are written;default 7
con_par_type.append(1)
con_par_values.append(7)
con_par_name.append('start_time')
con_par_type.append(1)
con_par_values.append(ymdhms_v[0,:])
con_par_name.append('end_time')
con_par_type.append(1)
con_par_values.append(ymdhms_v[1,:]) # year, month, day, hour, minute, second
con_par_name.append('data_file')
con_par_type.append(4)
con_par_values.append(datafil)
parfil = settings.PRMSinput_dir + parfil_pre + '_' + model_mode + '.param'
con_par_name.append('param_file')
con_par_type.append(4)
con_par_values.append(parfil)
con_par_name.append('model_output_file')
con_par_type.append(4 )
con_par_values.append(outdir + 'prms.out')
# new for GSFLOW
con_par_name.append('parameter_check_flag')
con_par_type.append(1)
con_par_values.append(1)
con_par_name.append('cbh_check_flag')
con_par_type.append(1)
con_par_values.append(1)
# 2 - Variables pertaining to module selection and simulation options
# - module selection:
# See PRMS manual: Table 2 (pp. 12-13), summary pp. 14-16, details in
# Appendix 1 (pp. 29-122)
# meteorological data
con_par_name.append('precip_module') # precip distribution method (should match temp)
con_par_type.append(4)
if fl_all_climate_hru == 1:
con_par_values.append('climate_hru') # climate_hru, ide_dist, precip_1sta, precip_dist2, precip_laps, or xyz_dist
else:
con_par_values.append(precip_module) # climate_hru, ide_dist, precip_1sta, precip_dist2, precip_laps, or xyz_dist
if con_par_values[-1] == 'climate_hru': # index -1 for last element
con_par_name.append('precip_day') # file with precip data for each HRU
con_par_type.append(4)
con_par_values.append(precip_datafil) # file name
# Below: harmless mistake in original matlab version
con_par_name.append('humidity_day')
con_par_type.append(4)
con_par_values.append(humidity_datafil) # file name
con_par_name.append('temp_module') # temperature distribution method (should match precip)
con_par_type.append(4)
if fl_all_climate_hru == 1:
con_par_values.append('climate_hru') # climate_hru, temp_1sta, temp_dist2, temp_laps, ide_dist, xyz_dist
else:
con_par_values.append(temp_module) # climate_hru, ide_dist, precip_1sta, precip_dist2, precip_laps, or xyz_dist
if con_par_values[-1] == 'climate_hru':
con_par_name.append('tmax_day') # file with precip data for each HRU
con_par_type.append(4)
con_par_values.append(tmax_datafil) # file name
con_par_name.append('tmin_day') # file with precip data for each HRU
con_par_type.append(4)
con_par_values.append(tmin_datafil) # file name
con_par_name.append('solrad_module') # solar rad distribution method
con_par_type.append(4)
if fl_all_climate_hru == 1:
con_par_values.append('climate_hru') # climate_hru, temp_1sta, temp_dist2, temp_laps, ide_dist, xyz_dist
else:
con_par_values.append(solrad_module) # climate_hru, ide_dist, precip_1sta, precip_dist2, precip_laps, or xyz_dist
if con_par_values[-1] == 'climate_hru':
con_par_name.append('swrad_day') # file with precip data for each HRU
con_par_type.append(4)
con_par_values.append(solrad_datafil) # file name
con_par_name.append('et_module') # method for calculating ET
con_par_type.append(4)
if fl_all_climate_hru == 1:
con_par_values.append('climate_hru') # climate_hru, temp_1sta, temp_dist2, temp_laps, ide_dist, xyz_dist
else:
con_par_values.append(et_module) # climate_hru, ide_dist, precip_1sta, precip_dist2, precip_laps, or xyz_dist
if con_par_values[-1] == 'climate_hru':
con_par_name.append('potet_day,') # file with precip data for each HRU
con_par_type.append(4)
con_par_values.append(pet_datafil) # file name
con_par_name.append('transp_module') # transpiration simulation method
con_par_type.append(4)
con_par_values.append('transp_tindex') # climate_hru, transp_frost, or transp_tindex
if con_par_values[-1] == 'climate_hru':
con_par_name.append('transp_day,') # file with precip data for each HRU
con_par_type.append(4)
con_par_values.append(transp_datafil) # file name
# Climate-by-HRU Files
con_par_name.append('cbh_binary_flag') # to use binary format cbh files
con_par_type.append(1)
con_par_values.append(0) # 0 for no, use default
#Read a CBH file with humidity data
con_par_name.append('humidity_cbh_flag') # humidity cbh files (for Penman-Monteith ET (potet_pm))
con_par_type.append(1)
con_par_values.append(0) # 0 for no, use default
#Variable orad
con_par_name.append('orad_flag') # humidity cbh files (not needed)
con_par_type.append(1)
con_par_values.append(0) # 0 for no, use default
con_par_name.append('srunoff_module') # surface runoff/infil calc method
con_par_type.append(4)
# con_par_values.append('srunoff_smidx_casc') # runoff_carea or srunoff_smidx
con_par_values.append('srunoff_smidx') # runoff_carea or srunoff_smidx (updated name for GSFLOW)
# strmflow: directly routes runoff to basin outlet
# muskingum: moves through stream segments, change in stream segment storages is by Muskingum eq
# strmflow_in_out: moves through stream segments, input to stream segment = output to stream segment
# strmflow_lake: for lakes...
ind = np.squeeze(np.where(np.array(con_par_name) == 'model_mode'))
if con_par_values[ind] == 'PRMS':
con_par_name.append('strmflow_module') # streamflow routing method
con_par_type.append(4)
con_par_values.append('strmflow_in_out') # strmflow, muskingum, strmflow_in_out, or strmflow_lake
# cascade module
ncascade = 0
if ncascade > 0: # default: ncascade = 0
con_par_name.append('cascade_flag') # runoff routing between HRU's
con_par_type.append(1)
con_par_values.append(1)
ncascadegw = 0
if ncascadegw > 0: # default: ncascadegw = 0
con_par_name.append('cascadegw_flag') # gw routing between HRU's
con_par_type.append(1)
con_par_values.append(1)
con_par_name.append('dprst_flag') # flag for depression storage simulations
con_par_type.append(1)
con_par_values.append(0)
# 3 - Output file: Statistic Variables (statvar) Files
# See list in PRMS manual Table 1-5 pp.61-74 for variables you can print
con_par_name.append('statsON_OFF') # flag to create Statistics output variables
con_par_type.append(1)
con_par_values.append(1)
# con_par_values.append(0)
con_par_name.append('stat_var_file') # output Statistics file location, name
con_par_type.append(4)
con_par_values.append(outdir + '{}.statvar'.format(settings.PROJ_CODE))
con_par_name.append('statVar_names')
con_par_type.append(4)
con_par_values.append(
np.array(['basin_actet',
'basin_cfs',
'basin_gwflow_cfs',
'basin_horad',
'basin_imperv_evap',
'basin_imperv_stor',
'basin_infil',
'basin_intcp_evap',
'basin_intcp_stor',
'basin_perv_et',
'basin_pk_precip',
'basin_potet',
'basin_potsw',
'basin_ppt',
'basin_pweqv',
'basin_rain',
'basin_snow',
'basin_snowcov',
'basin_snowevap',
'basin_snowmelt',
'basin_soil_moist',
'basin_soil_rechr',
'basin_soil_to_gw',
'basin_sroff_cfs',
'basin_ssflow_cfs',
'basin_ssin',
'basin_ssstor',
'basin_tmax',
'basin_tmin',
'basin_slstor',
'basin_pref_stor']))
# index of statVar_names to be printed to Statistics Output file
con_par_name.append('statVar_element') # ID numbers for variables in stat_Var_names
ind = np.squeeze(np.where(np.array(con_par_name) == 'statVar_names'))
con_par_num_i = con_par_values[ind].size
con_par_type.append(4)
ind = np.ones((con_par_num_i, 1), int).astype(str) # index of variables (can be 1 to max index of StatVar)
# add lines here to specify different variable indices other than 1
con_par_values.append(ind)
# 4 - "Animation" output files (spatial data)
# See list in: (1) PRMS manual Table 1-5 pp.61-74 and (2) GSFLOW
# Input Instructions manual Table A1-2 for variables you can print
con_par_name.append('aniOutON_OFF') # flag to create Statistics output variables
con_par_type.append(1)
con_par_values.append(1)
# con_par_values.append(0)
con_par_name.append('ani_output_file') # output Statistics file location, name
con_par_type.append(4)
con_par_values.append(outdir + '{}.ani'.format(settings.PROJ_CODE))
con_par_name.append('aniOutVar_names')
con_par_type.append(4)
con_par_values.append(np.array(['hru_ppt', # [in] Precipitation distributed to each HRU Rain
'hru_actet', # [in] Actual ET for each HRU
'actet_tot_gwsz', # [nhru] Total actual ET from each MODFLOW cell and PRMS soil zone [in]
'sat_recharge', # [nhru] HRU total recharge to the saturated zone
'streamflow_sfr'])) # [nsegment] Streamflow as computed by SFR for each segment
# 4 - For GUI (otherwise ignored during command line execution)
# GSFLOW: ignore these
# con_par_name.append('ndispGraphs') # number runtime graphs with GUI
# con_par_type.append(1)
# con_par_values.append(2)
#
# con_par_name.append('dispVar_names') # variables for runtime plot
# con_par_type.append(4)
# con_par_values.append(
# np.array(['basin_cfs',
# 'runoff']))
#
# # index of dispVar_names to be displayed in runtime plots
# con_par_name.append('dispVar_element') # variable indices for runtime plot
# con_par_type.append(4)
# ind = np.squeeze(np.where(np.array(con_par_name) == 'dispVar_names'))
# con_par_num_i = con_par_values[ind].size
# con_par_type.append(4)
# ind = np.ones((con_par_num_i, 1), int).astype(str) # index of variables (can be 1 to max index of StatVar)
# # add lines here to specify different variable indices other than 1
# ind[1] = '2'
# con_par_values.append(ind)
#
# con_par_name.append('dispGraphsBuffSize') # num timesteps (days) before updating runtime plot
# con_par_type.append(1)
# con_par_values.append(1)
#
# con_par_name.append('initial_deltat') # initial time step length (hrs)
# con_par_type.append(2)
# con_par_values.append(24.0) # 24 hrs matches model's daily time step
#
# # previously for PRMS, omit
# con_par_name.append('executable_desc')
# con_par_type.append(4)
# con_par_values.append('PRMS IV')
#
# con_par_name.append('executable_model')
# con_par_type.append(4)
# con_par_values.append(PRMS_exe)
# 5 - Initial condition file
# (default is init_vars_from_file = 0, but still need to specify for GUI)
con_par_name.append('init_vars_from_file') # use IC from initial cond file
con_par_type.append(1)
con_par_values.append(fl_load_init) # 0 for no, use default
if fl_load_init == 1:
con_par_name.append('var_init_file') # use IC from initial cond file
con_par_type.append(4)
con_par_values.append(load_init_file) # 0 for no, use default
# (default is save_vars_to_file = 0, but still need to specify for GUI)
con_par_name.append('save_vars_to_file') # save IC to output file
con_par_type.append(1)
con_par_values.append(fl_save_init)
if fl_save_init == 1:
con_par_name.append('var_save_file') # use IC from initial cond file
con_par_type.append(4)
con_par_values.append(save_init_file) # 0 for no, use default
# 6 - Suppress printing of some execution warnings
con_par_name.append('print_debug')
con_par_type.append(1)
con_par_values.append(-1)
# % % -----------------------------------------------------------------------
# Generally, do not change below here
con_filname = settings.control_dir + con_filname0 + '_' + model_mode + '.control'
# - Write to control file
if model_mode != 'MODFLOW':
ind = np.squeeze(np.where(np.array(con_par_name) == 'statVar_names'))
if ind.size > 0:
con_par_name.append('nstatVars') # num output vars in statVar_names (for Statistics output file)
con_par_type.append(1)
con_par_values.append(con_par_values[ind].size)
ind = np.squeeze(np.where(np.array(con_par_name) == 'aniOutVar_names'))
if ind.size > 0:
con_par_name.append('naniOutVars') # num output vars in aniOutVar_names (for animation output file)
con_par_type.append(1)
con_par_values.append(con_par_values[ind].size)
nvars = len(con_par_name)
con_par_num = np.ones((nvars,1), int)
ii = 0
for x in con_par_values:
if isinstance(x, (list, np.ndarray)): # check if list or array
if isinstance(x, np.ndarray):
con_par_num[ii] = x.size
else:
con_par_num[ii] = len(x)
ii = ii + 1
if os.path.isdir(outdir) == False:
os.makedirs(outdir);
# - Write to control file
fobj = open(con_filname, 'w+') # w+ for write and read
fobj.write(title_str + '\n')
line1 = '####\n'
for ii in range(0, nvars):
# Line 1
fobj.write(line1)
# Line 2
fobj.write(con_par_name[ii] + '\n');
# Line 3:
fobj.write(str(np.squeeze(con_par_num[ii])) + '\n');
# Line 4:
fobj.write(str(np.squeeze(con_par_type[ii])) + '\n');
# Line 5 to end:
if con_par_num[ii] == 1:
fobj.write(str(np.squeeze(con_par_values[ii])) + '\n')
else:
for x in con_par_values[ii]:
fobj.write(str(np.squeeze(x)) + '\n');
fobj.close()
# % % ------------------------------------------------------------------------
# Prepare for model execution
if model_mode != 'MODFLOW':
if os.path.isdir(outdir) == False:
os.makedirs(outdir);
print 'Make sure the below data files are ready: \n {}\n'.format(datafil)
print ' {}\n'.format(precip_datafil)
print ' {}\n'.format(tmax_datafil)
print ' {}\n'.format(tmin_datafil)
print ' {}\n'.format(solrad_datafil)
print 'Make sure the below parameter file is ready: \n {}\n'.format(parfil)
if platform.system() == 'Linux':
cmd_str = settings.GSFLOW_exe + ' ' + con_filname + ' &> out.txt'
else:
cmd_str = settings.GSFLOW_exe + ' ' + con_filname
#cmd_str_cmt = '#' + GSFLOW_exe_cmt + ' ' + con_filname + ' &> out.txt'
print 'To run command-line execution, enter at prompt: \n {}\n'.format(cmd_str)
runscriptfil = settings.control_dir + con_filname0 + '_' + model_mode + '.sh'
fobj = open(runscriptfil, 'w+')
#fobj.write(cmd_str_cmt);
fobj.write('\n\n');
fobj.write(cmd_str);
fobj.close()
os.chmod(runscriptfil, 0777)
| gpl-3.0 |
inonchiu/ComEst | comest/py2dmatch.py | 1 | 18934 | #############
#
# Match two sets of data in the 2d plane.
# I.e., find nearest neighbor of one that's in the other.
# This routine is modified from some scripts I found online, forgot whom I looked for...
# Please let me know if you are the author.
#
# I copy the astropy.stats.sigma_clipping in this module for sigma clipping.
# see webpage: https://astropy.readthedocs.org/en/v1.0.5/_modules/astropy/stats/sigma_clipping.html#sigma_clip
#
#############
import numpy as np
from math import *
try:
from scipy.spatial import cKDTree as KDT
except ImportError:
from scipy.spatial import KDTree as KDT
def carte2dmatch(x1, y1, x2, y2, tol= None, nnearest=1):
"""
Finds matches in one catalog to another.
Parameters
x1 : array-like
Cartesian coordinate x of the first catalog
y1 : array-like
Cartesian coordinate y of the first catalog (shape of array must match `x1`)
x2 : array-like
Cartesian coordinate x of the second catalog
y2 : array-like
Cartesian coordinate y of the second catalog (shape of array must match `x2`)
tol : float or None, optional
How close (in the unit of the cartesian coordinate) a match has to be to count as a match. If None,
all nearest neighbors for the first catalog will be returned.
nnearest : int, optional
The nth neighbor to find. E.g., 1 for the nearest nearby, 2 for the
second nearest neighbor, etc. Particularly useful if you want to get
the nearest *non-self* neighbor of a catalog. To do this, use:
``carte2dmatch(x, y, x, y, nnearest=2)``
Returns
-------
idx1 : int array
Indecies into the first catalog of the matches. Will never be
larger than `x1`/`y1`.
idx2 : int array
Indecies into the second catalog of the matches. Will never be
larger than `x1`/`y1`.
ds : float array
Distance (in the unit of the cartesian coordinate) between the matches
"""
# sanitize
x1 = np.array(x1, copy=False)
y1 = np.array(y1, copy=False)
x2 = np.array(x2, copy=False)
y2 = np.array(y2, copy=False)
# check
if x1.shape != y1.shape:
raise ValueError('x1 and y1 do not match!')
if x2.shape != y2.shape:
raise ValueError('x2 and y2 do not match!')
# this is equivalent to, but faster than just doing np.array([x1, y1, z1])
coords1 = np.empty((x1.size, 2))
coords1[:, 0] = x1
coords1[:, 1] = y1
# this is equivalent to, but faster than just doing np.array([x1, y1, z1])
coords2 = np.empty((x2.size, 2))
coords2[:, 0] = x2
coords2[:, 1] = y2
# set kdt for coord2
kdt = KDT(coords2)
if nnearest == 1:
idxs2 = kdt.query(coords1)[1]
elif nnearest > 1:
idxs2 = kdt.query(coords1, nnearest)[1][:, -1]
else:
raise ValueError('invalid nnearest ' + str(nnearest))
# calc the distance
ds = np.hypot(x1 - x2[idxs2], y1 - y2[idxs2])
# index for coord1
idxs1 = np.arange(x1.size)
if tol is not None:
msk = ds < tol
idxs1 = idxs1[msk]
idxs2 = idxs2[msk]
ds = ds[msk]
return idxs1, idxs2, ds
# ---
# 3d match
# ---
def carte2d_and_z_match(x1, y1, z1, x2, y2, z2, ztol, stol):
"""
Finds matches in one catalog to another.
Parameters
x1 : array-like
Cartesian coordinate x of the first catalog
y1 : array-like
Cartesian coordinate y of the first catalog (shape of array must match `x1`)
z1 : array-like
Cartesian coordinate z of the first catalog (shape of array must match `x1`)
x2 : array-like
Cartesian coordinate x of the second catalog
y2 : array-like
Cartesian coordinate y of the second catalog (shape of array must match `x2`)
z2 : array-like
Cartesian coordinate z of the second catalog (shape of array must match `x2`)
ztol: float or array-like
The tolarance in z direction. Its shape must match to `x1` if it is an array.
stol: float or None, optional
How close (in the unit of the cartesian coordinate) a match has to be to count as a match. If None,
all nearest neighbors for the first catalog will be returned.
nnearest : int, optional
The nth neighbor to find. E.g., 1 for the nearest nearby, 2 for the
second nearest neighbor, etc. Particularly useful if you want to get
the nearest *non-self* neighbor of a catalog. To do this, use:
``carte2dmatch(x, y, x, y, nnearest=2)``
Returns
-------
idx1 : int array
Indecies into the first catalog of the matches. Will never be
larger than `x1`/`y1`.
idx2 : int array
Indecies into the second catalog of the matches. Will never be
larger than `x1`/`y1`.
ds : float array
Distance (in the unit of the cartesian coordinate) between the matches
dz : float array
Distance (in the unit of the cartesian coordinate) between the matches
"""
# sanitize
x1 = np.array(x1, copy=False)
y1 = np.array(y1, copy=False)
z1 = np.array(z1, copy=False)
x2 = np.array(x2, copy=False)
y2 = np.array(y2, copy=False)
z2 = np.array(z2, copy=False)
# check
if x1.shape != y1.shape or x1.shape != z1.shape:
raise ValueError('x1 and y1/z1 do not match!')
if x2.shape != y2.shape or x2.shape != z2.shape:
raise ValueError('x2 and y2/z2 do not match!')
# this is equivalent to, but faster than just doing np.array([x1, y1])
coords1 = np.empty((x1.size, 2))
coords1[:, 0] = x1
coords1[:, 1] = y1
# this is equivalent to, but faster than just doing np.array([x1, y1])
coords2 = np.empty((x2.size, 2))
coords2[:, 0] = x2
coords2[:, 1] = y2
# set kdt for coord2
kdt = KDT(coords2)
# ---
# Match using kdt
# ---
idxs2_within_balls = kdt.query_ball_point(coords1, stol) # find the neighbors within a ball
n_within_ball = np.array(map(len, idxs2_within_balls), dtype = np.int) # counts within each ball
zero_within_ball = np.where( n_within_ball == 0)[0] # find which one does not have neighbors
nonzero_within_ball = np.where( n_within_ball > 0)[0] # find which one has neighbors
# declare the distance / idxs2 for each element in nonzero_within_ball
# I use no-brain looping here, slow but seems to be acceptable
dz_within_ball = [] # the distance
idxs2 = []
for i in nonzero_within_ball:
#print i, len(idxs2_within_balls[i]), z1[i], z2[ idxs2_within_balls[i] ]
# Another sub-kdt within a ball, but this times we use kdt.query to find the nearest one
dz_temp, matched_id_temp = KDT( np.transpose([ z2[ idxs2_within_balls[i] ] ]) ).query( np.transpose([ z1[i] ]) )
matched_id_temp = idxs2_within_balls[i][ matched_id_temp ]
# append
dz_within_ball.append(dz_temp) # the distance of the nearest neighbor within the ball
idxs2.append(matched_id_temp) # the index in array2 of the nearest neighbor within the ball
# index for coord1 - only using the object with non-zero neighbor in the ball
idxs1 = np.arange(x1.size)[ nonzero_within_ball ]
idxs2 = np.array(idxs2, dtype = np.int)
dz_within_ball = np.array(dz_within_ball, dtype = np.float)
# clean
del dz_temp, matched_id_temp
# msk to clean the object with dz > ztol
ztol = np.array(ztol, ndmin=1)
if len(ztol) == 1:
msk = ( dz_within_ball < ztol )
elif len(ztol) == len(x1):
msk = ( dz_within_ball < ztol[ nonzero_within_ball ] )
else:
raise ValueError("The length of ztol has to be 1 (float) or as the same as input x1/y1. len(ztol):", len(ztol))
# only keep the matches which have dz < ztol
idxs1 = idxs1[ msk ]
idxs2 = idxs2[ msk ]
ds = np.hypot( x1[idxs1] - x2[idxs2], y1[idxs1] - y2[idxs2] )
dz = dz_within_ball[ msk ]
return idxs1, idxs2, ds, dz
############################################################
#
# sigma clipping from astropy.stats
# Under the LICENSE:
# Licensed under a 3-clause BSD style license
#
############################################################
def sigma_clip(data, sig=3.0, iters=1, cenfunc=np.ma.median, varfunc=np.var,
axis=None, copy=True):
"""Perform sigma-clipping on the provided data.
This performs the sigma clipping algorithm - i.e. the data will be iterated
over, each time rejecting points that are more than a specified number of
standard deviations discrepant.
.. note::
`scipy.stats.sigmaclip
<http://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.sigmaclip.html>`_
provides a subset of the functionality in this function.
Parameters
----------
data : array-like
The data to be sigma-clipped (any shape).
sig : float
The number of standard deviations (*not* variances) to use as the
clipping limit.
iters : int or `None`
The number of iterations to perform clipping for, or `None` to clip
until convergence is achieved (i.e. continue until the last
iteration clips nothing).
cenfunc : callable
The technique to compute the center for the clipping. Must be a
callable that takes in a masked array and outputs the central value.
Defaults to the median (numpy.median).
varfunc : callable
The technique to compute the standard deviation about the center. Must
be a callable that takes in a masked array and outputs a width
estimator::
deviation**2 > sig**2 * varfunc(deviation)
Defaults to the variance (numpy.var).
axis : int or `None`
If not `None`, clip along the given axis. For this case, axis=int will
be passed on to cenfunc and varfunc, which are expected to return an
array with the axis dimension removed (like the numpy functions).
If `None`, clip over all values. Defaults to `None`.
copy : bool
If `True`, the data array will be copied. If `False`, the masked array
data will contain the same array as ``data``. Defaults to `True`.
Returns
-------
filtered_data : `numpy.ma.MaskedArray`
A masked array with the same shape as ``data`` input, where the points
rejected by the algorithm have been masked.
Notes
-----
1. The routine works by calculating::
deviation = data - cenfunc(data [,axis=int])
and then setting a mask for points outside the range::
data.mask = deviation**2 > sig**2 * varfunc(deviation)
It will iterate a given number of times, or until no further points are
rejected.
2. Most numpy functions deal well with masked arrays, but if one would
like to have an array with just the good (or bad) values, one can use::
good_only = filtered_data.data[~filtered_data.mask]
bad_only = filtered_data.data[filtered_data.mask]
However, for multidimensional data, this flattens the array, which may
not be what one wants (especially is filtering was done along an axis).
Examples
--------
This will generate random variates from a Gaussian distribution and return
a masked array in which all points that are more than 2 *sample* standard
deviation from the median are masked::
>>> from astropy.stats import sigma_clip
>>> from numpy.random import randn
>>> randvar = randn(10000)
>>> filtered_data = sigma_clip(randvar, 2, 1)
This will clipping on a similar distribution, but for 3 sigma relative to
the sample *mean*, will clip until converged, and does not copy the data::
>>> from astropy.stats import sigma_clip
>>> from numpy.random import randn
>>> from numpy import mean
>>> randvar = randn(10000)
>>> filtered_data = sigma_clip(randvar, 3, None, mean, copy=False)
This will clip along one axis on a similar distribution with bad points
inserted::
>>> from astropy.stats import sigma_clip
>>> from numpy.random import normal
>>> from numpy import arange, diag, ones
>>> data = arange(5)+normal(0.,0.05,(5,5))+diag(ones(5))
>>> filtered_data = sigma_clip(data, axis=0, sig=2.3)
Note that along the other axis, no points would be masked, as the variance
is higher.
"""
if axis is not None:
cenfunc_in = cenfunc
varfunc_in = varfunc
cenfunc = lambda d: np.expand_dims(cenfunc_in(d, axis=axis), axis=axis)
varfunc = lambda d: np.expand_dims(varfunc_in(d, axis=axis), axis=axis)
filtered_data = np.ma.array(data, copy=copy)
if iters is None:
i = -1
lastrej = filtered_data.count() + 1
while filtered_data.count() != lastrej:
i += 1
lastrej = filtered_data.count()
do = filtered_data - cenfunc(filtered_data)
# if nothing left, we dont update the mask array
if lastrej == 0:
continue
else:
filtered_data.mask |= do * do > varfunc(filtered_data) * sig ** 2
else:
for i in range(iters):
do = filtered_data - cenfunc(filtered_data)
filtered_data.mask |= do * do > varfunc(filtered_data) * sig ** 2
return filtered_data
############################################################
#
# Adaptively sigma_clip as a function of magnitude
#
############################################################
def Adpative_sigma_clip(mag1, mag2, sig=3.0, iters=1, cenfunc=np.ma.median, varfunc=np.var):
"""
This is the function which will do sigma clip on mag1 - mag2 as a function of mag2.
In this case, we set mag1 = mag_auto from SE and mag2 = mag_true from the input mock.
Note that the len(mag1) = len(mag2)
Parameters:
-`mag1`: 1d array. The first magnitude array.
-`mag2`: 1d array. The second magnitude array.
-`sig`: float. The multiplicative factor of the sigma clipping.
-`iters`: int. The iteration. iters = None means performing sigma clipping until it converges.
-`cenfunc`: function object. The function which is used to calc the center of the distribution.
-`varfunc`: function object. The function which is used to calc the width of the distribution. Note this is variance not the std.
Returns:
-`filtered_data1`: 1d array. The filtered array of mag1.
-`filtered_data2`: 1d array. The filtered array of mag2.
"""
# obtain the length
nobjs = len(mag1)
# sanity check
if nobjs != len(mag2): raise RuntimeError("len(mag2) != len(mag1) = ", nobjs)
# First derive the dmag = mag1 - mag2. Then we have dmag v.s. mag2.
# The important part is we express the histogram in
# the indice of the bins which each value in the dmag belongs.
# Use ~0.5 mag as the binning step.
mag_edges = np.linspace(mag2.min(), mag2.max(), int( (mag2.max() - mag2.min()) / 0.1 ) + 1)
mag_bins = 0.5 * (mag_edges[1:] + mag_edges[:-1])
mag_steps = mag_edges[1:] - mag_edges[:-1]
# derive dmag
dmag = mag1 - mag2
# derive hist digitize
# mag_edges[i-1] < x <= mag_edges[i] (note that we close the right boundary)
indice_hist = np.digitize(mag2, bins = mag_edges, right = True)
# ---
# sigma clip on each mag_bins
# ---
# declare an array which do not mask anything for mag1 and mag2
returned_mask = np.zeros(nobjs, dtype = np.bool)
# loop all the bins except the indice_hist = 0
for i in set(indice_hist) - {0}:
# select the object in this bin
i_am_in_this_bin = np.where(indice_hist == i)[0]
# if no object, we pass. Or we do sigma clipping
if len(i_am_in_this_bin) == 0:
pass
else:
# sigma clipping on dmag[i_am_in_this_bin]
filtered_data = sigma_clip(
data = dmag[i_am_in_this_bin],
sig = sig,
iters= iters,
cenfunc=cenfunc,
varfunc=varfunc,
axis=None,
copy=True)
# pass the mask aray to returned_mask
returned_mask[ i_am_in_this_bin ] = filtered_data.mask.copy()
return returned_mask
if __name__ == "__main__":
x2 = np.random.uniform(0.0, 1000.0, 50000)
y2 = np.random.uniform(0.0, 1000.0, 50000)
z2 = np.random.uniform(15.0, 30.0, 50000)
x1 = np.random.normal(loc = x2, scale = 1.0 / 0.26)
y1 = np.random.normal(loc = y2, scale = 1.0 / 0.26)
z1 = np.random.normal(loc = z2, scale = 0.1)
z1err= np.random.uniform(low = 0.085, high = 0.115, size = len(z1))
import matplotlib.pyplot as pyplt
'''
# this is equivalent to, but faster than just doing np.array([x1, y1, z1])
coords1 = np.empty((x1.size, 2))
coords1[:, 0] = x1
coords1[:, 1] = y1
# this is equivalent to, but faster than just doing np.array([x1, y1, z1])
coords2 = np.empty((x2.size, 2))
coords2[:, 0] = x2
coords2[:, 1] = y2
# set kdt for coord2
kdt = KDT(coords2)
# ---
# start from here
# ---
idxs2_within_balls = kdt.query_ball_point(coords1, 3.0 / 0.26) # find the neighbors within a ball
n_within_ball = np.array(map(len, idxs2_within_balls), dtype = np.int) # counts within each ball
zero_within_ball = np.where( n_within_ball == 0)[0] # find which one does not have neighbors
nonzero_within_ball = np.where( n_within_ball > 0)[0] # find which one has neighbors
dz_within_ball = [] # the distance
idxs2 = []
for i in nonzero_within_ball:
dz_temp, matched_id_temp = KDT( np.transpose([ z2[ idxs2_within_balls[i] ] ]) ).query( np.transpose([ z1[i] ]) )
matched_id_temp = idxs2_within_balls[i][ matched_id_temp ]
dz_within_ball.append(dz_temp)
idxs2.append(matched_id_temp)
# index for coord1
idxs1 = np.arange(x1.size)[ nonzero_within_ball ]
idxs2 = np.array(idxs2, dtype = np.int)
dz_within_ball = np.array(dz_within_ball, dtype = np.float)
'''
A = carte2d_and_z_match(x1 = x1, y1 = y1, z1 = z1, x2 = x2, y2 = y2, z2 = z2, stol = 1.0 / 0.26, ztol = 3.0 * z1err )
| mit |
nyirock/mg_blast_wrapper | mg_blast_wrapper_v1.10.py | 1 | 19368 | #!/usr/bin/python
import getopt
import sys
from Bio import SeqIO
import time
import os
import shutil
import pandas
from Bio.SeqRecord import SeqRecord
from Bio.Seq import Seq
__author__ = "Andriy Sheremet"
#Helper functions definitions
def genome_shredder(input_dct, shear_val):
shredded = {}
for key, value in input_dct.items():
#print input_dct[i].seq
#print i
dic_name = key
rec_name = value.name
for j in range(0, len(str(value.seq)), int(shear_val)):
# print j
record = str(value.seq)[0+j:int(shear_val)+j]
shredded[dic_name+"_"+str(j)] = SeqRecord(Seq(record),rec_name+"_"+str(j),'','')
#record = SeqRecord(input_ref_records[i].seq[0+i:int(shear_val)+i],input_ref_records[i].name+"_%i"%i,"","")
return shredded
def parse_contigs_ind(f_name):
"""
Returns sequences index from the input files(s)
remember to close index object after use
"""
handle = open(f_name, "rU")
record_dict = SeqIO.index(f_name,"fasta")
handle.close()
return record_dict
#returning specific sequences and overal list
def retrive_sequence(contig_lst, rec_dic):
"""
Returns list of sequence elements from dictionary/index of SeqIO objects specific to the contig_lst parameter
"""
contig_seqs = list()
#record_dict = rec_dic
#handle.close()
for contig in contig_lst:
contig_seqs.append(str(rec_dic[contig].seq))#fixing BiopythonDeprecationWarning
return contig_seqs
def filter_seq_dict(key_lst, rec_dic):
"""
Returns filtered dictionary element from rec_dic according to sequence names passed in key_lst
"""
return { key: rec_dic[key] for key in key_lst }
def unique_scaffold_topEval(dataframe):
#returns pandas series object
variables = list(dataframe.columns.values)
scaffolds=dict()
rows=list()
for row in dataframe.itertuples():
#if row[1]=='Ga0073928_10002560':
if row[1] not in scaffolds:
scaffolds[row[1]]=row
else:
if row[11]<scaffolds[row[1]][11]:
scaffolds[row[1]]=row
rows=scaffolds.values()
#variables=['quid', 'suid', 'iden', 'alen', 'mism', 'gapo', 'qsta', 'qend', 'ssta', 'send', 'eval', 'bits']
df = pandas.DataFrame([[getattr(i,j) for j in variables] for i in rows], columns = variables)
return df
def unique_scaffold_topBits(dataframe):
#returns pandas series object
variables = list(dataframe.columns.values)
scaffolds=dict()
rows=list()
for row in dataframe.itertuples():
#if row[1]=='Ga0073928_10002560':
if row[1] not in scaffolds:
scaffolds[row[1]]=row
else:
if row[12]>scaffolds[row[1]][12]:
scaffolds[row[1]]=row
rows=scaffolds.values()
#variables=['quid', 'suid', 'iden', 'alen', 'mism', 'gapo', 'qsta', 'qend', 'ssta', 'send', 'eval', 'bits']
df = pandas.DataFrame([[getattr(i,j) for j in variables] for i in rows], columns = variables)
return df
def close_ind_lst(ind_lst):
"""
Closes index objects supplied in input parameter list
"""
for index in ind_lst:
index.close()
def usage():
print "\nThis is the usage function\n"
# print 'Usage: '+sys.argv[0]+' -i <input_file> [-o <output>] [-l <minimum length>]'
# print 'Example: '+sys.argv[0]+' -i input.fasta -o output.fasta -l 100'
def main(argv):
#default parameters
mg_lst = []
ref_lst = []
e_val = 1e-5
alen = 50.0
alen_percent = True
alen_bp = False
iden = 95.0
name= "output"
fmt_lst = ["fasta"]
supported_formats =["fasta", "csv"]
iterations = 1
alen_increment = 5.0
iden_increment = 0.0
blast_db_Dir = ""
results_Dir = ""
input_files_Dir = ""
ref_out_0 = ""
blasted_lst = []
continue_from_previous = False #poorly supported, just keeping the directories
skip_blasting = False
debugging = False
sheared = False
shear_val = None
try:
opts, args = getopt.getopt(argv, "r:m:n:e:a:i:s:f:h", ["reference=", "metagenome=", "name=", "e_value=", "alignment_length=", "identity=","shear=","format=", "iterations=", "alen_increment=", "iden_increment=","continue_from_previous","skip_blasting","debugging", "help"])
except getopt.GetoptError:
usage()
sys.exit(2)
for opt, arg in opts:
if opt in ("-h", "--help"):
usage()
sys.exit()
# elif opt in ("--recover_after_failure"):
# recover_after_failure = True
# print "Recover after failure:", recover_after_failure
elif opt in ("--continue_from_previous"):
continue_from_previous = True
if debugging:
print "Continue after failure:", continue_from_previous
elif opt in ("--debugging"):
debugging = True
if debugging:
print "Debugging messages:", debugging
elif opt in ("-r", "--reference"):
if arg:
ref_lst=arg.split(',')
#infiles = arg
if debugging:
print "Reference file(s)", ref_lst
elif opt in ("-m", "--metagenome"):
if arg:
mg_lst=arg.split(',')
#infiles = arg
if debugging:
print "Metagenome file(s)", mg_lst
elif opt in ("-f", "--format"):
if arg:
fmt_lst=arg.split(',')
#infiles = arg
if debugging:
print "Output format(s)", fmt_lst
elif opt in ("-n", "--name"):
if arg.strip():
name = arg
if debugging:
print "Project name", name
elif opt in ("-e", "--e_value"):
try:
e_val = float(arg)
except:
print "\nERROR: Please enter numerical value as -e parameter (default: 1e-5)"
usage()
sys.exit(1)
if debugging:
print "E value", e_val
elif opt in ("-a", "--alignment_length"):
if arg.strip()[-1]=="%":
alen_bp = False
alen_percent = True
else:
alen_bp = True
alen_percent = False
try:
alen = float(arg.split("%")[0])
except:
print "\nERROR: Please enter a numerical value as -a parameter (default: 50.0)"
usage()
sys.exit(1)
if debugging:
print "Alignment length", alen
elif opt in ("-i", "--identity"):
try:
iden = float(arg)
except:
print "\nERROR: Please enter a numerical value as -i parameter (default: 95.0)"
usage()
sys.exit(1)
if debugging:
print "Alignment length", iden
elif opt in ("-s", "--shear"):
sheared = True
try:
shear_val = int(arg)
except:
print "\nERROR: Please enter an integer value as -s parameter"
usage()
sys.exit(1)
if debugging:
print "Alignment length", iden
elif opt in ("--iterations"):
try:
iterations = int(arg)
except:
print "\nWARNING: Please enter integer value as --iterations parameter (using default: 1)"
if debugging:
print "Iterations: ", iterations
elif opt in ("--alen_increment"):
try:
alen_increment = float(arg)
except:
print "\nWARNING: Please enter numerical value as --alen_increment parameter (using default: )", alen_increment
if debugging:
print "Alignment length increment: ", alen_increment
elif opt in ("--iden_increment"):
try:
iden_increment = float(arg)
except:
print "\nWARNING: Please enter numerical value as --iden_increment parameter (using default: )", iden_increment
if debugging:
print "Alignment length increment: ", iden_increment
elif opt in ("--skip_blasting"):
skip_blasting = True
if debugging:
print "Blasting step omitted; Using previous blast output."
for ref_file in [x for x in ref_lst if x]:
try:
#
with open(ref_file, "rU") as hand_ref:
pass
except:
print "\nERROR: Reference File(s) ["+ref_file+"] doesn't exist"
usage()
sys.exit(1)
for mg_file in [x for x in mg_lst if x]:
try:
#
with open(mg_file, "rU") as hand_mg:
pass
except:
print "\nERROR: Metagenome File(s) ["+mg_file+"] doesn't exist"
usage()
sys.exit(1)
for fmt in [x for x in fmt_lst if x]:
if fmt not in supported_formats:
print "\nWARNING: Output format [",fmt,"] is not supported"
print "\tUse -h(--help) option for the list of supported formats"
fmt_lst=["fasta"]
print "\tUsing default output format: ", fmt_lst[0]
project_dir = name
if not continue_from_previous:
if os.path.exists(project_dir):
shutil.rmtree(project_dir)
try:
os.mkdir(project_dir)
except OSError:
print "ERROR: Cannot create project directory: " + name
raise
print "\n\t Initial Parameters:"
print "\nProject Name: ", name,'\n'
print "Project Directory: ", os.path.abspath(name),'\n'
print "Reference File(s): ", ref_lst,'\n'
if sheared:
print "Shear Reference File(s):", str(shear_val)+"bp",'\n'
print "Metagenome File(s): ", mg_lst,'\n'
print "E Value: ", e_val, "\n"
if alen_percent:
print "Alignment Length: "+str(alen)+'%\n'
if alen_bp:
print "Alignment Length: "+str(alen)+'bp\n'
print "Sequence Identity: "+str(iden)+'%\n'
print "Output Format(s):", fmt_lst,'\n'
if iterations > 1:
print "Iterations: ", iterations, '\n'
print "Alignment Length Increment: ", alen_increment, '\n'
print "Sequence identity Increment: ", iden_increment, '\n'
#Initializing directories
blast_db_Dir = name+"/blast_db"
if not continue_from_previous:
if os.path.exists(blast_db_Dir):
shutil.rmtree(blast_db_Dir)
try:
os.mkdir(blast_db_Dir)
except OSError:
print "ERROR: Cannot create project directory: " + blast_db_Dir
raise
results_Dir = name+"/results"
if not continue_from_previous:
if os.path.exists(results_Dir):
shutil.rmtree(results_Dir)
try:
os.mkdir(results_Dir)
except OSError:
print "ERROR: Cannot create project directory: " + results_Dir
raise
input_files_Dir = name+"/input_files"
if not continue_from_previous:
if os.path.exists(input_files_Dir):
shutil.rmtree(input_files_Dir)
try:
os.mkdir(input_files_Dir)
except OSError:
print "ERROR: Cannot create project directory: " + input_files_Dir
raise
# Writing raw reference files into a specific input filename
input_ref_records = {}
for reference in ref_lst:
ref_records_ind = parse_contigs_ind(reference)
#ref_records = dict(ref_records_ind)
input_ref_records.update(ref_records_ind)
ref_records_ind.close()
#input_ref_records.update(ref_records)
ref_out_0 = input_files_Dir+"/reference0.fna"
if (sheared & bool(shear_val)):
with open(ref_out_0, "w") as handle:
SeqIO.write(genome_shredder(input_ref_records, shear_val).values(), handle, "fasta")
#NO NEED TO CLOSE with statement will automatically close the file
else:
with open(ref_out_0, "w") as handle:
SeqIO.write(input_ref_records.values(), handle, "fasta")
# Making BLAST databases
#output fname from before used as input for blast database creation
input_ref_0 = ref_out_0
title_db = name+"_db"#add iteration functionality
outfile_db = blast_db_Dir+"/iteration"+str(iterations)+"/"+name+"_db"#change into for loop
os.system("makeblastdb -in "+input_ref_0+" -dbtype nucl -title "+title_db+" -out "+outfile_db+" -parse_seqids")
# BLASTing query contigs
if not skip_blasting:
print "\nBLASTing query file(s):"
for i in range(len(mg_lst)):
database = outfile_db # adjust for iterations
blasted_lst.append(results_Dir+"/recruited_mg_"+str(i)+".tab")
start = time.time()
os_string = 'blastn -db '+database+' -query \"'+mg_lst[i]+'\" -out '+blasted_lst[i]+" -evalue "+str(e_val)+" -outfmt 6 -num_threads 8"
#print os_string
os.system(os_string)
print "\t"+mg_lst[i]+"; Time elapsed: "+str(time.time()-start)+" seconds."
else:
for i in range(len(mg_lst)):
blasted_lst.append(results_Dir+"/recruited_mg_"+str(i)+".tab")
# Parsing BLAST outputs
blast_cols = ['quid', 'suid', 'iden', 'alen', 'mism', 'gapo', 'qsta', 'qend', 'ssta', 'send', 'eval', 'bits']
recruited_mg=[]
for i in range(len(mg_lst)):
df = pandas.read_csv(blasted_lst[i] ,sep="\t", header=None)
df.columns=blast_cols
recruited_mg.append(df)
# print len(recruited_mg[0])
# print len(recruited_mg[1])
#creating all_records entry
#! Remember to close index objects after they are no longer needed
#! Use helper function close_ind_lst()
all_records = []
all_input_recs = parse_contigs_ind(ref_out_0)
# _ = 0
# for key, value in all_input_recs.items():
# _ +=1
# if _ < 20:
# print key, len(value)
print "\nIndexing metagenome file(s):"
for i in range(len(mg_lst)):
start = time.time()
all_records.append(parse_contigs_ind(mg_lst[i]))
print "\t"+mg_lst[i]+" Indexed in : "+str(time.time()-start)+" seconds."
# Transforming data
for i in range(len(mg_lst)):
#cutoff_contigs[dataframe]=evalue_filter(cutoff_contigs[dataframe])
recruited_mg[i]=unique_scaffold_topBits(recruited_mg[i])
contig_list = recruited_mg[i]['quid'].tolist()
recruited_mg[i]['Seq_nt']=retrive_sequence(contig_list, all_records[i])
recruited_mg[i]['Seq_size']=recruited_mg[i]['Seq_nt'].apply(lambda x: len(x))
recruited_mg[i]['Ref_size']=recruited_mg[i]['suid'].apply(lambda x: len(all_input_recs[str(x)]))
#recruited_mg[i]['Coverage']=recruited_mg[i]['alen'].apply(lambda x: 100.0*float(x))/min(recruited_mg[i]['Seq_size'].apply(lambda y: y),recruited_mg[i]['Ref_size'].apply(lambda z: z))
#df.loc[:, ['B0', 'B1', 'B2']].min(axis=1)
recruited_mg[i]['Coverage']=recruited_mg[i]['alen'].apply(lambda x: 100.0*float(x))/recruited_mg[i].loc[:,["Seq_size", "Ref_size"]].min(axis=1)
recruited_mg[i]['Metric']=recruited_mg[i]['Coverage']*recruited_mg[i]['iden']/100.0
recruited_mg[i] = recruited_mg[i][['quid', 'suid', 'iden', 'alen','Coverage','Metric', 'mism', 'gapo', 'qsta', 'qend', 'ssta', 'send', 'eval', 'bits','Ref_size','Seq_size','Seq_nt']]
# Here would go statistics functions and producing plots
#
#
#
#
#
# Quality filtering before outputting
if alen_percent:
for i in range(len(recruited_mg)):
recruited_mg[i]=recruited_mg[i][(recruited_mg[i]['iden']>=iden)&(recruited_mg[i]['Coverage']>=alen)&(recruited_mg[i]['eval']<=e_val)]
if alen_bp:
for i in range(len(recruited_mg)):
recruited_mg[i]=recruited_mg[i][(recruited_mg[i]['iden']>=iden)&(recruited_mg[i]['alen']>=alen)&(recruited_mg[i]['eval']<=e_val)]
# print len(recruited_mg[0])
# print len(recruited_mg[1])
# Batch export to outfmt (csv and/or multiple FASTA)
alen_str = ""
iden_str = "_iden_"+str(iden)+"%"
if alen_percent:
alen_str = "_alen_"+str(alen)+"%"
if alen_bp:
alen_str = "_alen_"+str(alen)+"bp"
if iterations > 1:
prefix=name+"/results/"+name.split("/")[0]+"_iter_e_"+str(e_val)+iden_str+alen_str
else:
prefix=name+"/results/"+name.split("/")[0]+"_e_"+str(e_val)+iden_str+alen_str
if sheared:
prefix = prefix+'_sheared_'+str(shear_val)+"bp"
prefix = prefix + "_recruited_mg_"
print "\nWriting files:"
for i in range(len(mg_lst)):
records= []
# try:
# os.remove(outfile1)
# except OSError:
# pass
if "csv" in fmt_lst:
outfile1 = prefix+str(i)+".csv"
recruited_mg[i].to_csv(outfile1, sep='\t')
print str(len(recruited_mg[i]))+" sequences written to "+outfile1
if "fasta" in fmt_lst:
ids = recruited_mg[i]['quid'].tolist()
#if len(ids)==len(sequences):
for j in range(len(ids)):
records.append(all_records[i][ids[j]])
outfile2 = prefix+str(i)+".fasta"
with open(outfile2, "w") as output_handle:
SeqIO.write(records, output_handle, "fasta")
print str(len(ids))+" sequences written to "+outfile2
close_ind_lst(all_records)
close_ind_lst([all_input_recs])
#all_records[i].close()# keep open if multiple iterations
#recruited_mg_1 = pandas.read_csv(out_name1 ,sep="\t", header=None)
#recruited_mg_1.columns=['quid', 'suid', 'iden', 'alen', 'mism', 'gapo', 'qsta', 'qend', 'ssta', 'send', 'eval', 'bits']
#recruited_mg_2 = pandas.read_csv(out_name2 ,sep="\t", header=None)
#recruited_mg_2.columns=['quid', 'suid', 'iden', 'alen', 'mism', 'gapo', 'qsta', 'qend', 'ssta', 'send', 'eval', 'bits']
#recruited_mg = [recruited_mg_1, recruited_mg_2]
# blast_db_Dir = ""
# results_Dir = ""
# input_files_Dir = ""
# parsed = SeqIO.parse(handle, "fasta")
#
# records = list()
#
#
# total = 0
# processed = 0
# for record in parsed:
# total += 1
# #print(record.id), len(record.seq)
# if len(record.seq) >= length:
# processed += 1
# records.append(record)
# handle.close()
#
# print "%d sequences found"%(total)
#
# try:
# output_handle = open(outfile, "w")
# SeqIO.write(records, output_handle, "fasta")
# output_handle.close()
# print "%d sequences written"%(processed)
# except:
# print "ERROR: Illegal output filename"
# sys.exit(1)
if __name__ == "__main__":
main(sys.argv[1:])
| mit |
mne-tools/mne-tools.github.io | 0.21/_downloads/d5061624de1ba2be808df117f8a2ada0/plot_decoding_xdawn_eeg.py | 4 | 4591 | """
============================
XDAWN Decoding From EEG data
============================
ERP decoding with Xdawn ([1]_, [2]_). For each event type, a set of
spatial Xdawn filters are trained and applied on the signal. Channels are
concatenated and rescaled to create features vectors that will be fed into
a logistic regression.
"""
# Authors: Alexandre Barachant <alexandre.barachant@gmail.com>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import StratifiedKFold
from sklearn.pipeline import make_pipeline
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.preprocessing import MinMaxScaler
from mne import io, pick_types, read_events, Epochs, EvokedArray
from mne.datasets import sample
from mne.preprocessing import Xdawn
from mne.decoding import Vectorizer
print(__doc__)
data_path = sample.data_path()
###############################################################################
# Set parameters and read data
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
tmin, tmax = -0.1, 0.3
event_id = {'Auditory/Left': 1, 'Auditory/Right': 2,
'Visual/Left': 3, 'Visual/Right': 4}
n_filter = 3
# Setup for reading the raw data
raw = io.read_raw_fif(raw_fname, preload=True)
raw.filter(1, 20, fir_design='firwin')
events = read_events(event_fname)
picks = pick_types(raw.info, meg=False, eeg=True, stim=False, eog=False,
exclude='bads')
epochs = Epochs(raw, events, event_id, tmin, tmax, proj=False,
picks=picks, baseline=None, preload=True,
verbose=False)
# Create classification pipeline
clf = make_pipeline(Xdawn(n_components=n_filter),
Vectorizer(),
MinMaxScaler(),
LogisticRegression(penalty='l1', solver='liblinear',
multi_class='auto'))
# Get the labels
labels = epochs.events[:, -1]
# Cross validator
cv = StratifiedKFold(n_splits=10, shuffle=True, random_state=42)
# Do cross-validation
preds = np.empty(len(labels))
for train, test in cv.split(epochs, labels):
clf.fit(epochs[train], labels[train])
preds[test] = clf.predict(epochs[test])
# Classification report
target_names = ['aud_l', 'aud_r', 'vis_l', 'vis_r']
report = classification_report(labels, preds, target_names=target_names)
print(report)
# Normalized confusion matrix
cm = confusion_matrix(labels, preds)
cm_normalized = cm.astype(float) / cm.sum(axis=1)[:, np.newaxis]
# Plot confusion matrix
fig, ax = plt.subplots(1)
im = ax.imshow(cm_normalized, interpolation='nearest', cmap=plt.cm.Blues)
ax.set(title='Normalized Confusion matrix')
fig.colorbar(im)
tick_marks = np.arange(len(target_names))
plt.xticks(tick_marks, target_names, rotation=45)
plt.yticks(tick_marks, target_names)
fig.tight_layout()
ax.set(ylabel='True label', xlabel='Predicted label')
###############################################################################
# The ``patterns_`` attribute of a fitted Xdawn instance (here from the last
# cross-validation fold) can be used for visualization.
fig, axes = plt.subplots(nrows=len(event_id), ncols=n_filter,
figsize=(n_filter, len(event_id) * 2))
fitted_xdawn = clf.steps[0][1]
tmp_info = epochs.info.copy()
tmp_info['sfreq'] = 1.
for ii, cur_class in enumerate(sorted(event_id)):
cur_patterns = fitted_xdawn.patterns_[cur_class]
pattern_evoked = EvokedArray(cur_patterns[:n_filter].T, tmp_info, tmin=0)
pattern_evoked.plot_topomap(
times=np.arange(n_filter),
time_format='Component %d' if ii == 0 else '', colorbar=False,
show_names=False, axes=axes[ii], show=False)
axes[ii, 0].set(ylabel=cur_class)
fig.tight_layout(h_pad=1.0, w_pad=1.0, pad=0.1)
###############################################################################
# References
# ----------
# .. [1] Rivet, B., Souloumiac, A., Attina, V., & Gibert, G. (2009). xDAWN
# algorithm to enhance evoked potentials: application to brain-computer
# interface. Biomedical Engineering, IEEE Transactions on, 56(8),
# 2035-2043.
# .. [2] Rivet, B., Cecotti, H., Souloumiac, A., Maby, E., & Mattout, J. (2011,
# August). Theoretical analysis of xDAWN algorithm: application to an
# efficient sensor selection in a P300 BCI. In Signal Processing
# Conference, 2011 19th European (pp. 1382-1386). IEEE.
| bsd-3-clause |
alexlee-gk/visual_dynamics | scripts/plot_concise_results.py | 1 | 6691 | import argparse
import csv
import os
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--experiment_name', choices=['fqi', 'all'], default='fqi')
parser.add_argument('--results_dir', type=str, default='results')
parser.add_argument('--infix', type=str, help='e.g. unseen')
parser.add_argument('--usetex', '--use_tex', action='store_true')
parser.add_argument('--save', action='store_true')
args = parser.parse_args()
if args.usetex:
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
if args.experiment_name != 'fqi':
figsize = (9, 6)
else:
figsize = None
fig, ax = plt.subplots(figsize=figsize, tight_layout=True)
bar_width = 0.35
opacity = 0.8
error_config = {'ecolor': '0.3'}
title_fontsize = 18
fontsize = 14
if args.experiment_name == 'fqi':
feature_dynamics_names = ['fc_pixel', 'local_pixel', 'local_level1', 'local_level2', 'local_level3', 'local_level4', 'local_level5']
feature_dynamics_labels = ['pixel,\nfully\nconnected', 'pixel,\nlocally\nconnected', 'VGG\nconv1_2', 'VGG\nconv2_2', 'VGG\nconv3_3', 'VGG\nconv4_3', 'VGG\nconv5_3']
if args.usetex:
feature_dynamics_labels = [label.replace('_', '\hspace{-0.1em}\_\hspace{0.1em}') for label in feature_dynamics_labels]
algorithm_name = 'fqi'
mean_returns = []
std_returns = []
for feature_dynamics_name in feature_dynamics_names:
conditions = [algorithm_name, feature_dynamics_name]
if args.infix:
conditions.append(args.infix)
result_fname = os.path.join(args.results_dir, '_'.join(conditions + ['test.csv']))
if not os.path.exists(result_fname):
break
with open(result_fname, 'r') as result_file:
reader = csv.reader(result_file, delimiter='\t', quotechar='|', quoting=csv.QUOTE_MINIMAL)
mean_return, std_return = list(reader)[-1]
mean_returns.append(-float(mean_return))
std_returns.append(float(std_return))
index = np.arange(len(mean_returns)) * 2 * bar_width
color_palette = sns.color_palette('Set2', 10)
color = [color_palette[i] for i in [3, 5, 4, 6, 7, 9, 8]]
plt.bar(index, mean_returns, bar_width,
alpha=opacity,
color=color,
yerr=std_returns,
error_kw=error_config)
plt.xlabel('Feature Dynamics', fontsize=title_fontsize)
plt.ylabel('Average Cost', fontsize=title_fontsize)
if args.infix == 'unseen':
plt.title('Costs of Executions when Following Novel Cars', fontsize=title_fontsize)
else:
plt.title('Costs of Executions when Following Cars Seen During Training', fontsize=title_fontsize)
plt.xticks(index, feature_dynamics_labels, fontsize=fontsize)
plt.yticks(np.arange(10), fontsize=fontsize)
plt.legend()
ax.set_ylim((0, 10))
else:
method_names = ['orb_nofilter', 'ccot', 'trpo_pixel',
'unweighted_local_level4',
'trpo_iter_2_local_level4', 'trpo_iter_50_local_level4', 'fqi_local_level4']
method_labels = ['ORB\nfeature\npoints\nIBVS',
'C-COT\nvisual\ntracker\nIBVS',
'CNN\n+TRPO\n($\geq$ 20000)',
'unweighted\nfeature\ndynamics\n+CEM\n(1500)',
'feature\ndynamics\n+TRPO\n($\geq$ 80)',
'feature\ndynamics\n+TRPO\n($\geq$ 2000)',
r'$\textbf{ours,}$' '\n'
r'$\textbf{feature}$' '\n'
r'$\textbf{dynamics}$' '\n'
r'$\textbf{+FQI}$' '\n'
r'$\textbf{(20)}$']
if args.usetex:
method_labels = [label.replace('_', '\hspace{-0.1em}\_\hspace{0.1em}') for label in method_labels]
mean_returns = []
std_returns = []
for method_name in method_names:
conditions = [method_name]
if args.infix:
conditions.append(args.infix)
result_fname = os.path.join(args.results_dir, '_'.join(conditions + ['test.csv']))
if not os.path.exists(result_fname):
break
with open(result_fname, 'r') as result_file:
reader = csv.reader(result_file, delimiter='\t', quotechar='|', quoting=csv.QUOTE_MINIMAL)
mean_return, std_return = list(reader)[-1]
mean_returns.append(-float(mean_return))
std_returns.append(float(std_return))
index = np.arange(len(mean_returns)) * 2 * bar_width
color_palette = sns.color_palette('Set2', 10)
color = [color_palette[2]] * 3 + [color_palette[1]] * 5
plt.bar(index, mean_returns, bar_width,
alpha=opacity,
color=color,
yerr=std_returns,
error_kw=error_config)
plt.axvline(x=(index[2] + index[3]) / 2., color='k', linestyle='--')
plt.text((index[1] + index[2]) / 2., 4.5, 'prior methods that\ndo not use learned\nfeature dynamics',
horizontalalignment='center', verticalalignment='center', fontsize=title_fontsize)
text = 'methods that use VGG conv4_3\nfeatures and their learned\n locally connected feature dynamics'
if args.usetex:
text = text.replace('_', '\hspace{-0.1em}\_\hspace{0.1em}')
plt.text((index[4] + index[5]) / 2., 4.5, text, horizontalalignment='center', verticalalignment='center', fontsize=title_fontsize)
plt.xlabel('Feature Representation and Optimization Method', fontsize=title_fontsize)
plt.ylabel('Average Cost', fontsize=title_fontsize)
# if args.infix == 'unseen':
# plt.title('Costs of Executions when Following Novel Cars', fontsize=title_fontsize)
# else:
# plt.title('Costs of Executions when Following Cars Seen During Training', fontsize=title_fontsize)
plt.xticks(index, method_labels, fontsize=fontsize)
plt.yticks(fontsize=fontsize)
plt.legend()
ax.set_ylim((0, 5.5))
if args.save:
fname = args.experiment_name
if args.infix:
fname += '_' + args.infix
fname += '_results'
plt.savefig('/home/alex/Dropbox/visual_servoing/20160322/%s.pdf' % fname, bbox_inches='tight')
else:
plt.show()
if __name__ == '__main__':
main()
| mit |
manics/openmicroscopy | components/tools/OmeroPy/src/omero/install/jvmcfg.py | 1 | 16238 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2014 Glencoe Software, Inc. All Rights Reserved.
# Use is subject to license terms supplied in LICENSE.txt
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Automatic configuration of memory settings for Java servers.
"""
from types import StringType
from shlex import split
import logging
LOGGER = logging.getLogger("omero.install.jvmcfg")
def strip_dict(map, prefix=("omero", "jvmcfg"), suffix=(), limit=1):
"""
For the given dictionary, return a copy of the
dictionary where all entries not matching the
prefix, suffix, and limit have been removed and
where all remaining keys have had the prefix and
suffix stripped. The limit describes the number
of elements that are allowed in the new key after
stripping prefix and suffix.
"""
if isinstance(prefix, StringType):
prefix = tuple(prefix.split("."))
if isinstance(suffix, StringType):
suffix = tuple(suffix.split("."))
rv = dict()
if not map:
return dict()
def __strip_dict(k, v, prefix, suffix, rv):
key = tuple(k.split("."))
ksz = len(key)
psz = len(prefix)
ssz = len(suffix)
if ksz <= (psz + ssz):
return # No way to strip if smaller
if key[0:psz] == prefix and key[ksz-ssz:] == suffix:
newkey = key[psz:ksz-ssz]
if len(newkey) == limit:
newkey = ".".join(newkey)
rv[newkey] = v
for k, v in map.items():
__strip_dict(k, v, prefix, suffix, rv)
return rv
class StrategyRegistry(dict):
def __init__(self, *args, **kwargs):
super(dict, self).__init__(*args, **kwargs)
STRATEGY_REGISTRY = StrategyRegistry()
class Settings(object):
"""
Container for the config options found in etc/grid/config.xml
"""
def __init__(self, server_values=None, global_values=None):
if server_values is None:
self.__server = dict()
else:
self.__server = server_values
if global_values is None:
self.__global = dict()
else:
self.__global = global_values
self.__static = {
"strategy": PercentStrategy,
"append": "",
"perm_gen": "128m",
"heap_dump": "off",
"heap_size": "512m",
"system_memory": None,
"max_system_memory": "48000",
"min_system_memory": "3414",
}
self.__manual = dict()
def __getattr__(self, key):
return self.lookup(key)
def lookup(self, key, default=None):
if key in self.__manual:
return self.__manual[key]
elif key in self.__server:
return self.__server[key]
elif key in self.__global:
return self.__global[key]
elif key in self.__static:
return self.__static[key]
else:
return default
def overwrite(self, key, value, always=False):
if self.was_set(key) and not always:
# Then we leave it as the user requested
return
else:
self.__manual[key] = value
def was_set(self, key):
return key in self.__server or key in self.__global
def get_strategy(self):
return STRATEGY_REGISTRY.get(self.strategy, self.strategy)
def __str__(self):
rv = dict()
rv.update(self.__server)
rv.update(self.__global)
if not rv:
rv = ""
return 'Settings(%s)' % rv
class Strategy(object):
"""
Strategy for calculating memory settings. Primary
class of the memory module.
"""
def __init__(self, name, settings=None):
"""
'name' argument should likely be one of:
('blitz', 'indexer', 'pixeldata', 'repository')
"""
if settings is None:
settings = Settings()
self.name = name
self.settings = settings
if type(self) == Strategy:
raise Exception("Must subclass!")
# Memory helpers
def system_memory_mb(self):
"""
Returns a tuple, in MB, of available, active, and total memory.
"total" memory is found by calling to first a Python library
(if installed) and otherwise a Java class. If
"system_memory" is set, it will short-circuit both methods.
"active" memory is set to "total" but limited by "min_system_memory"
and "max_system_memory".
"available" may not be accurate, and in some cases will be
set to total.
"""
available, total = None, None
if self.settings.system_memory is not None:
total = int(self.settings.system_memory)
available = total
else:
pymem = self._system_memory_mb_psutil()
if pymem is not None:
available, total = pymem
else:
available, total = self._system_memory_mb_java()
max_system_memory = int(self.settings.max_system_memory)
min_system_memory = int(self.settings.min_system_memory)
active = max(min(total, max_system_memory), min_system_memory)
return available, active, total
def _system_memory_mb_psutil(self):
try:
import psutil
pymem = psutil.virtual_memory()
return (pymem.free/1000000, pymem.total/1000000)
except ImportError:
LOGGER.debug("No psutil installed")
return None
def _system_memory_mb_java(self):
import omero.cli
import omero.java
# Copied from db.py. Needs better dir detection
cwd = omero.cli.CLI().dir
jars = str(cwd / "lib" / "server") + "/*"
cmd = ["ome.services.util.JvmSettingsCheck", "--psutil"]
p = omero.java.popen(["-cp", str(jars)] + cmd)
o, e = p.communicate()
if p.poll() != 0:
LOGGER.warn("Failed to invoke java:\nout:%s\nerr:%s",
o, e)
rv = dict()
for line in o.split("\n"):
line = line.strip()
if not line:
continue
parts = line.split(":")
if len(parts) == 1:
parts.append("")
rv[parts[0]] = parts[1]
try:
free = long(rv["Free"]) / 1000000
except:
LOGGER.warn("Failed to parse Free from %s", rv)
free = 2000
try:
total = long(rv["Total"]) / 1000000
except:
LOGGER.warn("Failed to parse Total from %s", rv)
total = 4000
return (free, total)
# API Getters
def get_heap_size(self, sz=None):
if sz is None or self.settings.was_set("heap_size"):
sz = self.settings.heap_size
if str(sz).startswith("-X"):
return sz
else:
rv = "-Xmx%s" % sz
if rv[-1].lower() not in ("b", "k", "m", "g"):
rv = "%sm" % rv
return rv
def get_heap_dump(self):
hd = self.settings.heap_dump
if hd == "off":
return ""
elif hd in ("on", "cwd", "tmp"):
return "-XX:+HeapDumpOnOutOfMemoryError"
def get_perm_gen(self):
pg = self.settings.perm_gen
if str(pg).startswith("-XX"):
return pg
else:
return "-XX:MaxPermSize=%s" % pg
def get_append(self):
values = []
if self.settings.heap_dump == "tmp":
import tempfile
tmp = tempfile.gettempdir()
values.append("-XX:HeapDumpPath=%s" % tmp)
return values + split(self.settings.append)
def get_memory_settings(self):
values = [
self.get_heap_size(),
self.get_heap_dump(),
self.get_perm_gen(),
]
if any([x.startswith("-XX:MaxPermSize") for x in values]):
values.append("-XX:+IgnoreUnrecognizedVMOptions")
values += self.get_append()
return [x for x in values if x]
class ManualStrategy(Strategy):
"""
Simplest strategy which assumes all values have
been set and simply uses them or their defaults.
"""
class PercentStrategy(Strategy):
"""
Strategy based on a percent of available memory.
"""
PERCENT_DEFAULTS = (
("blitz", 15),
("pixeldata", 15),
("indexer", 10),
("repository", 10),
("other", 1),
)
def __init__(self, name, settings=None):
super(PercentStrategy, self).__init__(name, settings)
self.defaults = dict(self.PERCENT_DEFAULTS)
self.use_active = True
def get_heap_size(self):
"""
Uses the results of the default settings of
calculate_heap_size() as an argument to
get_heap_size(), in other words some percent
of the active memory.
"""
sz = self.calculate_heap_size()
return super(PercentStrategy, self).get_heap_size(sz)
def get_percent(self):
other = self.defaults.get("other", "1")
default = self.defaults.get(self.name, other)
percent = int(self.settings.lookup("percent", default))
return percent
def get_perm_gen(self):
available, active, total = self.system_memory_mb()
choice = self.use_active and active or total
if choice <= 4000:
if choice >= 2000:
self.settings.overwrite("perm_gen", "256m")
elif choice <= 8000:
self.settings.overwrite("perm_gen", "512m")
else:
self.settings.overwrite("perm_gen", "1g")
return super(PercentStrategy, self).get_perm_gen()
def calculate_heap_size(self, method=None):
"""
Re-calculates the appropriate heap size based on the
value of get_percent(). The "active" memory returned
by method() will be used by default, but can be modified
to use "total" via the "use_active" flag.
"""
if method is None:
method = self.system_memory_mb
available, active, total = method()
choice = self.use_active and active or total
percent = self.get_percent()
calculated = choice * int(percent) / 100
return calculated
def usage_table(self, min=10, max=20):
total_mb = [2**x for x in range(min, max)]
for total in total_mb:
method = lambda: (total, total, total)
yield total, self.calculate_heap_size(method)
STRATEGY_REGISTRY["manual"] = ManualStrategy
STRATEGY_REGISTRY["percent"] = PercentStrategy
def read_settings(template_xml):
"""
Read the memory settings from the template file
"""
rv = dict()
for template in template_xml.findall("server-template"):
for server in template.findall("server"):
for option in server.findall("option"):
o = option.text
if o.startswith("-Xmx") | o.startswith("-XX"):
rv.setdefault(server.get('id'), []).append(o)
return rv
def adjust_settings(config, template_xml,
blitz=None, indexer=None,
pixeldata=None, repository=None):
"""
Takes an omero.config.ConfigXml object and adjusts
the memory settings. Primary entry point to the
memory module.
"""
from xml.etree.ElementTree import Element
from collections import defaultdict
replacements = dict()
options = dict()
for template in template_xml.findall("server-template"):
for server in template.findall("server"):
for option in server.findall("option"):
o = option.text
if o.startswith("MEMORY:"):
options[o[7:]] = (server, option)
for props in server.findall("properties"):
for prop in props.findall("property"):
name = prop.attrib.get("name", "")
if name.startswith("REPLACEMENT:"):
replacements[name[12:]] = (server, prop)
rv = defaultdict(list)
m = config.as_map()
loop = (("blitz", blitz), ("indexer", indexer),
("pixeldata", pixeldata), ("repository", repository))
for name, StrategyType in loop:
if name not in options:
raise Exception(
"Cannot find %s option. Make sure templates.xml was "
"not copied from an older server" % name)
for name, StrategyType in loop:
specific = strip_dict(m, suffix=name)
defaults = strip_dict(m)
settings = Settings(specific, defaults)
rv[name].append(settings)
if StrategyType is None:
StrategyType = settings.get_strategy()
if not callable(StrategyType):
raise Exception("Bad strategy: %s" % StrategyType)
strategy = StrategyType(name, settings)
settings = strategy.get_memory_settings()
server, option = options[name]
idx = 0
for v in settings:
rv[name].append(v)
if idx == 0:
option.text = v
else:
elem = Element("option")
elem.text = v
server.insert(idx, elem)
idx += 1
# Now we check for any other properties and
# put them where the replacement should go.
for k, v in m.items():
r = []
suffix = ".%s" % name
size = len(suffix)
if k.endswith(suffix):
k = k[:-size]
r.append((k, v))
server, replacement = replacements[name]
idx = 0
for k, v in r:
if idx == 0:
replacement.attrib["name"] = k
replacement.attrib["value"] = v
else:
elem = Element("property", name=k, value=v)
server.append(elem)
return rv
def usage_charts(path,
min=0, max=20,
Strategy=PercentStrategy, name="blitz"):
# See http://matplotlib.org/examples/pylab_examples/anscombe.html
from pylab import array
from pylab import axis
from pylab import gca
from pylab import subplot
from pylab import plot
from pylab import setp
from pylab import savefig
from pylab import text
points = 200
x = array([2 ** (x / points) / 1000
for x in range(min*points, max*points)])
y_configs = (
(Settings({}), 'A'),
(Settings({"percent": "20"}), 'B'),
(Settings({}), 'C'),
(Settings({"max_system_memory": "10000"}), 'D'),
)
def f(cfg):
s = Strategy(name, settings=cfg[0])
y = []
for total in x:
method = lambda: (total, total, total)
y.append(s.calculate_heap_size(method))
return y
y1 = f(y_configs[0])
y2 = f(y_configs[1])
y3 = f(y_configs[2])
y4 = f(y_configs[3])
axis_values = [0, 20, 0, 6]
def ticks_f():
setp(gca(), xticks=(8, 16), yticks=(2, 4))
def text_f(which):
cfg = y_configs[which]
# s = cfg[0]
txt = "%s" % (cfg[1],)
text(2, 2, txt, fontsize=20)
subplot(221)
plot(x, y1)
axis(axis_values)
text_f(0)
ticks_f()
subplot(222)
plot(x, y2)
axis(axis_values)
text_f(1)
ticks_f()
subplot(223)
plot(x, y3)
axis(axis_values)
text_f(2)
ticks_f()
subplot(224)
plot(x, y4)
axis(axis_values)
text_f(3)
ticks_f()
savefig(path)
| gpl-2.0 |
juanka1331/VAN-applied-to-Nifti-images | lib/data_loader/PET_stack_NORAD.py | 1 | 1951 | import scipy.io as sio
import settings
import numpy as np
import nibabel as nib
from matplotlib import pyplot as plt
def get_parameters():
"""
function creates to avoid loading in memory the full stack
:return: Dict['imgsize|total_size|voxel_index]
"""
f = sio.loadmat(settings.PET_stack_path)
images_size = [79, 95, 68]
voxels_index = f['maskind'][0]
total_voxels = np.array(images_size).prod()
return {'voxel_index': voxels_index,
'imgsize':images_size,
'total_size': total_voxels}
def get_full_stack():
"""
This function returns a dictionary with these three values:
1)
:return:
"""
f = sio.loadmat(settings.PET_stack_path)
# f -> dict_keys(['bmask', 'normtype', 'tu', 'thr', 'labels_conv', 'labels', '__globals__',
# 'nthr', 'maskind', 'atlas', 'stack_all_norm', 'CLASV', 'stack_PET', '__header__', '__version__',
# 'clastring', 'patient'])
images_size = [79, 95, 68]
voxels_index = f['maskind'][0]
total_voxels = np.array(images_size).prod()
images = f['stack_PET'] # [138 x 510340]
patient_labels = f['labels'] #[ 1x138]
return {'labels': patient_labels,
'stack': images,
'voxel_index': voxels_index,
'imgsize':images_size,
'n_patients': len(patient_labels),
'total_size': total_voxels}
def load_patients_labels():
dict_norad = get_full_stack() # 'stack' 'voxel_index' 'labels'
return dict_norad['labels']
def test():
data = get_stack()
sample = data['stack'][50, :]
template = np.zeros(data['imgsize'], dtype=float)
template = template.flatten()
template[data['voxel_index']] = sample
out = np.reshape(template, [79, 95, 68], "F")
plt.imshow(np.rot90(out[:, 30, :]), cmap='jet')
plt.show(block=True)
img = nib.Nifti1Image(out, np.eye(4))
img.to_filename('test4d.nii.gz')
#test()
#stack = get_stack() | gpl-2.0 |
adrinjalali/Network-Classifier | parse_results.py | 1 | 19468 | import matplotlib as mpl
#mpl.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import pickle
import sys
import os
import re
import glob
from joblib import Parallel, delayed, logger
from itertools import chain
from scipy.stats import gaussian_kde
from plot_ratboost import generate_graph_plots
from plot_ratboost import get_feature_annotation
from misc import *
dataset_resolve = {'vantveer-prognosis': "Van 't Veer - Prognosis",
'TCGA-LAML-vital_status': 'TCGA-LAML-Methylation - Vital Status',
'TCGA-LAML-risk_group': 'TCGA-LAML-Methylation - Risk Group',
'TCGA-BRCA-ER': 'TCGA-BRCA - Estrogen Receptor',
'TCGA-BRCA-N': 'TCGA-BRCA - Lymph Node Status',
'TCGA-BRCA-T': 'TCGA-BRCA - Tumor Size',
'TCGA-BRCA-stage': 'TCGA-BRCA - Cancer Stage',
'TCGA-LAML-GeneExpression-risk_group':
'TCGA-LAML - Gene Expression - Risk Group',
'TCGA-LAML-GeneExpression-vital_status':
'TCGA-LAML - Gene Expression - Vital Status',
}
methods_order = ['SVM, linear kernel',
'SVM, RBF kernel',
'SVM, linear kernel, transformed',
'Adaboost',
'Gradient Boosting Classifier',
'RatBoost']
def append_score(scores, score):
if (not isinstance(score, dict)):
value = np.array(score).flatten()[0]
if not np.isnan(value):
scores.append(value)
else:
for key, value in score.items():
if not key in scores:
if isinstance(value, dict):
scores[key] = dict()
else:
scores[key] = []
append_score(scores[key], value)
def get_scores(root_dir):
datas = os.listdir(root_dir)
datas = [name for name in datas if os.path.isdir(
root_dir + '/' + name)]
all_scores = dict()
for data in datas:
if data == 'synthesized':
continue
print(data)
targets = os.listdir(root_dir + '/' + data)
targets = [name for name in targets if os.path.isdir(
'%s/%s/%s' % (root_dir, data, name))]
for target in targets:
print(target)
if (not os.path.isdir('%s/%s/%s/results/' % (root_dir,
data,
target))):
continue;
files = os.listdir('%s/%s/%s/results/' % (root_dir,
data,
target))
print(len(files))
problem = '%s-%s' %(data, target)
all_scores[problem] = dict()
for f in files:
#print('%s/%s/results/%s' % (
# data, target, f))
try:
scores = pickle.load(open('%s/%s/%s/results/%s' % (
root_dir, data, target, f), 'rb'))
except EOFError as e:
print(e)
print('%s/%s/results/%s' % (
data, target, f))
method, cv_index, major = re.split('[-\.]', f)[:3]
cv_index = int(cv_index)
append_score(all_scores[problem], scores)
return(all_scores)
def add_text(prefix, parameter):
ignore_parameters = ['learner_type', 'regularizer_index']
if not isinstance(parameter, tuple):
return prefix
if (parameter[0] in ignore_parameters):
return prefix
if prefix.strip() == '':
return prefix + "%s: %s" % (str(parameter[0]), str(parameter[1]))
else:
return prefix + ", %s: %s" % (str(parameter[0]), str(parameter[1]))
return ('NA')
def ignore_key(key, filters):
if filters == None:
return False
elif isinstance(filters, tuple):
if filters[0] == key[0] and filters[1] != key[1]:
return True
else:
for f in filters:
if f[0] == key[0] and f[1] != key[1]:
return True
return False
def flatten_scores_dict(scores, filters = None):
res = list()
labels = list()
for key in sorted(scores.keys()):
#print(key)
if ignore_key(key, filters):
continue
value = scores[key]
if isinstance(value, dict):
_scores, _labels = flatten_scores_dict(value, filters)
#print(_labels)
#print(labels)
#print('$$$$$$$')
for label in _labels:
labels.append(add_text(label, key))
for s in _scores:
res.append(s)
else:
res.append(value)
labels.append(add_text('', key))
return(res, labels)
def draw_plot(all_scores, problem, filters = None, plt_ax = None, verbose=True):
colors = ['b', 'g', 'y', 'k', 'c', 'r', 'm', '0.5', '0.9']
index = 0
plot_colors = []
plot_texts = []
tmp = list()
if (verbose):
print_scores(all_scores[problem])
for method in methods_order:
if not method in all_scores[problem]:
continue
_scores, _texts = flatten_scores_dict(all_scores[problem][method], filters)
for s in _scores:
tmp.append(s)
for t in _texts:
plot_texts.append(t)
plot_colors.append(colors[index])
#print(plot_colors)
index += 1
if (plt_ax is None):
fig, ax = plt.subplots()
else:
#ax = plt.subplot(211)
ax = plt_ax
pl = ax.boxplot(tmp, True)
last_color = None
idx = 0
objs = []
nms = []
for i in range(len(plot_colors)):
pl['boxes'][i].set_c(plot_colors[i])
pl['boxes'][i].set_linewidth(2)
if last_color != plot_colors[i]:
objs.append(pl['boxes'][i])
nms.append(methods_order[idx])
idx += 1
last_color = plot_colors[i]
#lgnd = plt.legend(objs, nms, fancybox=True)
#lgnd = plt.legend(objs, nms, bbox_to_anchor=(1.05,1),loc=2,borderaxespad=0.)
lgnd = plt.legend(objs, nms, bbox_to_anchor=[-1.6, -2.1, 2, 2],
loc='lower center',
ncol=2,
mode="expand",
borderaxespad=0.,
fancybox=True
)
#lgnd.draggable(True)
for l in lgnd.get_lines():
l.set_linewidth(3)
ax.set_title(dataset_resolve[problem])
texts = ax.set_xticklabels(plot_texts)
for text in texts:
text.set_rotation(270)
if (plt_ax is None):
plt.show()
return (objs, nms)
def draw_plots(all_scores):
for problem in sorted(all_scores.keys()):
print(problem)
draw_plot(all_scores, problem)
def load_models_info(root_dir, regularizer_index, learner_count,
data = None,
target = None):
if (data is None):
datas = os.listdir(root_dir)
datas = [name for name in datas if os.path.isdir(
root_dir + '/' + name)]
elif isinstance(data, list):
datas = data
else:
datas = list([data])
for data in datas:
print(data)
if (target is None):
targets = os.listdir(root_dir + '/' + data)
elif isinstance(target, list):
targets = target
else:
targets = list([target])
for target in targets:
print(target)
files = glob.glob('%s/%s/%s/models/*-rat-%d-*' % (root_dir,
data,
target,
regularizer_index))
structs = list()
for f in files:
struct = pickle.load(open(f, 'rb'))
structs.append(struct)
node_groups = [[list(m.keys()) for
m in s[:learner_count]]
for s in structs]
nodes = sorted(set(chain.from_iterable(
chain.from_iterable(node_groups))))
vertex_map_name2index = dict()
vertex_map_index2name = dict()
for i in range(len(nodes)):
vertex_map_name2index[nodes[i]] = i
vertex_map_index2name[i] = nodes[i]
node_confidence = dict()
adj = np.zeros(shape=(len(nodes), len(nodes)))
for s in structs:
for m in s[:learner_count]:
for i in m:
if (i in node_confidence):
node_confidence[i] += 1
else:
node_confidence[i] = 1
for j in m:
if i != j:
v1 = nodes.index(i)
v2 = nodes.index(j)
adj[v1,v2] = adj[v1,v2] + 1
#adj[v2,v1] = adj[v2,v1] + 1
density = gaussian_kde(adj[adj != 0])
xs = np.linspace(0, max(adj[adj != 0]), 200)
#density.covariance_factor = lambda : .25
#density._compute_covariance()
#find threshold for top X% of area under the density curve
low = -1e3
high = 1e3
target_top = 0.1
while(True):
mid = (low + high) / 2
mid_v = density.integrate_box_1d(mid, 1e4)
print(mid, mid_v)
if (abs(mid_v - target_top) < 1e-3):
break
if (mid_v > target_top):
low = mid
elif (mid_v < target_top):
high = mid
plt.figure()
fig = plt.gcf()
fig.set_size_inches(7,7)
plt.plot(xs, density(xs))
plt.xlim([0,100])
plt.axvline(mid, c='g')
#plt.show()
plt.savefig('tmp/density-%s-%s-%d.eps' %
(data, target, regularizer_index // 2), dpi=100)
plt.close()
threshold = mid
tmp_g = gt.Graph(directed = False)
has_edge = dict()
tmp_g.add_vertex(len(nodes))
node_confidence_vprop = tmp_g.new_vertex_property('double')
for key, value in node_confidence.items():
node_confidence_vprop[tmp_g.vertex(nodes.index(key))] = value
edge_weights = tmp_g.new_edge_property('double')
for i in range(len(nodes)):
for j in range(len(nodes)):
if i > j and adj[i,j] > threshold:
has_edge[i] = True
has_edge[j] = True
e = tmp_g.add_edge(i, j)
edge_weights[e] = 1 + 1/adj[i,j]
feature_annotation = get_feature_annotation(root_dir, data, target)
vlabel = tmp_g.new_vertex_property('string')
vfontcolor = tmp_g.new_vertex_property('string')
feature_list = list()
for v in tmp_g.vertices():
if (v.in_degree() > 0 or v.out_degree() > 0 or
node_confidence[vertex_map_index2name[int(v)]] > threshold):
vlabel[v] = feature_annotation[vertex_map_index2name[int(v)]]
feature_list.append(vertex_map_index2name[int(v)])
if (node_confidence[vertex_map_index2name[int(v)]] >
np.mean((max(node_confidence.values()),
min(node_confidence.values())))):
vfontcolor[v] = 'white'
else:
vfontcolor[v] = 'black'
gt.draw.graphviz_draw(tmp_g, layout='neato',
size=(25,25),
vcolor=node_confidence_vprop,
vcmap=plt.get_cmap('Blues'),
vprops = {'label': vlabel,
'shape': 'ellipse',
'vpenwidth': 1,
'fontcolor': vfontcolor},
eprops = {'len': edge_weights},
#gprops = {'labelloc':'t',
# 'label':'High Confidence Features: %s %s (%g)' % \
# (data, target, threshold)},
output = 'tmp/summary-%s-%s-%02d.eps' %
(data, target, regularizer_index // 2))
#if (data == 'TCGA-BRCA'):
# continue
generate_graph_plots(root_dir, data, target, learner_count,
regularizer_index,
feature_list,
node_confidence,
cv_index = 35,
threshold = threshold,
plot_titles = False)
if __name__ == '__main__':
root_dir = ''
for i in range(len(sys.argv)):
print(sys.argv[i])
if (sys.argv[i] == '--root-dir'):
root_dir = sys.argv[i + 1]
if (root_dir == ''):
root_dir = "../../Data"
all_scores = get_scores(root_dir)
print_scores(all_scores)
methods = ['Gradient Boosting Classifier', 'SVM', 'Raccoon', 'Raccoon_static', 'Adaboost', 'RatBoost']
print_summary(all_scores, methods)
methods = ['Gradient Boosting Classifier', 'SVM', 'Raccoon', 'Raccoon_static', 'Adaboost']
print_summary(all_scores, methods)
methods = ['Raccoon', 'Raccoon_static']
print_summary(all_scores, methods)
#draw_plots(all_scores)
#draw_plot(all_scores, 'vantveer-prognosis', ('regularizer_index', 4))
'''
at the moment there are 9 dataset/problems, plot them in
3x3 subplots
'''
"""
regularizer_indices = [2, 4, 6, 8, 10, 12, 14, 16, 18]
for ri in regularizer_indices:
print(ri)
f, axarr = plt.subplots(3, 3, sharey = False)
draw_plot(all_scores, 'TCGA-BRCA-T',
('regularizer_index', ri), axarr[0, 0], False)
draw_plot(all_scores, 'TCGA-BRCA-N',
('regularizer_index', ri), axarr[0, 1], False)
draw_plot(all_scores, 'TCGA-BRCA-ER',
('regularizer_index', ri), axarr[0, 2], False)
draw_plot(all_scores, 'TCGA-BRCA-stage',
('regularizer_index', ri), axarr[1, 0], False)
draw_plot(all_scores, 'TCGA-LAML-GeneExpression-risk_group',
('regularizer_index', ri), axarr[1, 1], False)
draw_plot(all_scores, 'TCGA-LAML-GeneExpression-vital_status',
('regularizer_index', ri), axarr[1, 2], False)
draw_plot(all_scores, 'TCGA-LAML-risk_group',
('regularizer_index', ri), axarr[2, 1], False)
draw_plot(all_scores, 'TCGA-LAML-vital_status',
('regularizer_index', ri), axarr[2, 2], False)
draw_plot(all_scores, 'vantveer-prognosis',
('regularizer_index', ri), axarr[2, 0], False)
fig = plt.gcf()
fig.set_size_inches(18.5,10.5)
plt.tight_layout(pad=2)
plt.savefig('tmp/performance-notaligned-%02d.eps' % (ri // 2), dpi=100)
plt.close()
#plt.show()
f, axarr = plt.subplots(3, 3, sharey = True)
draw_plot(all_scores, 'TCGA-BRCA-T',
('regularizer_index', ri), axarr[0, 0], False)
draw_plot(all_scores, 'TCGA-BRCA-N',
('regularizer_index', ri), axarr[0, 1], False)
draw_plot(all_scores, 'TCGA-BRCA-ER',
('regularizer_index', ri), axarr[0, 2], False)
draw_plot(all_scores, 'TCGA-BRCA-stage',
('regularizer_index', ri), axarr[1, 0], False)
draw_plot(all_scores, 'TCGA-LAML-GeneExpression-risk_group',
('regularizer_index', ri), axarr[1, 1], False)
draw_plot(all_scores, 'TCGA-LAML-GeneExpression-vital_status',
('regularizer_index', ri), axarr[1, 2], False)
draw_plot(all_scores, 'TCGA-LAML-risk_group',
('regularizer_index', ri), axarr[2, 1], False)
draw_plot(all_scores, 'TCGA-LAML-vital_status',
('regularizer_index', ri), axarr[2, 2], False)
draw_plot(all_scores, 'vantveer-prognosis',
('regularizer_index', ri), axarr[2, 0], False)
fig = plt.gcf()
plt.ylim([0.1,1])
fig.set_size_inches(18.5,10.5)
plt.tight_layout(pad=2)
plt.savefig('tmp/performance-aligned-%02d.eps' % (ri // 2), dpi=100)
plt.close()
learner_count = 4
load_models_info(root_dir, ri, learner_count)
f, axarr = plt.subplots(1, 1, sharey = True)
draw_plot(all_scores, 'TCGA-BRCA-N',
('regularizer_index', ri), axarr, False)
fig = plt.gcf()
plt.ylim([0.1,1])
fig.set_size_inches(8,5)
plt.tight_layout(pad=2)
plt.savefig('tmp/one-performance-aligned-%02d.eps' % (ri // 2), dpi=100)
plt.close()
regularizer_indices = [2, 4, 6, 8, 10, 12, 14, 16, 18]
for ri in regularizer_indices:
print(ri)
learner_count = 4
load_models_info(root_dir, ri, learner_count, data='TCGA-LAML-GeneExpression',
target='risk_group')
#load_models_info(root_dir, ri, learner_count, data='vantveer',
# target='prognosis')
def get_points(v):
if (isinstance(v, dict)):
result = list()
for x in v.values():
p = get_points(x)
[result.append(r) for r in p]
return result
if (isinstance(v, list)):
return [(np.mean(v), np.std(v), len(v))]
import matplotlib.patches as mpatches
from datetime import datetime
colors = ['b', 'g', 'y', 'k', 'c', 'r', 'm', '0.5', '0.9']
for key, value in all_scores.items():
points = dict()
c = 0
pcolors = list()
x = list()
y = list()
patches = list()
lens = list()
desc_lens = dict()
jkeys = sorted(value.keys())
for jkey in jkeys:
jvalue = value[jkey]
ps = get_points(jvalue)
points[jkey] = ps
[x.append(i[0]) for i in ps]
[y.append(i[1]) for i in ps]
[lens.append(i[2]) for i in ps]
desc_lens[jkey] = [i[2] for i in ps]
[pcolors.append(colors[c]) for i in ps]
patches.append(mpatches.Patch(color=colors[c], label=jkey))
c = c + 1
if (len(lens) == 0):
continue
print('\n', key)
print(desc_lens)
scales = [(s - min(lens) + 1) * 100 / (max(lens) - min(lens) + 1)
for s in lens]
plt.figure()
mplot = plt.scatter(x = x, y = y, c = pcolors, s = scales)
plt.title('%s, max: %d, min: %d' % (key, max(lens), min(lens)))
plt.legend(handles = patches, loc=1)
fig = plt.gcf()
fig.set_size_inches(18.5,10.5)
fig.savefig('plots/%s - performance_scatter - %s.eps' %
(datetime.now().strftime("%Y-%m-%d %H%M"),
key))
#plt.show()
"""
| gpl-3.0 |
mjasher/gac | GAC/flopy/utils/datafile.py | 1 | 16152 | """
Module to read MODFLOW output files. The module contains shared
abstract classes that should not be directly accessed.
"""
from __future__ import print_function
import numpy as np
import flopy.utils
class Header():
"""
The header class is an abstract base class to create headers for MODFLOW files
"""
def __init__(self, filetype=None, precision='single'):
floattype = 'f4'
if precision == 'double':
floattype = 'f8'
self.header_types = ['head', 'ucn']
if filetype is None:
self.header_type = None
else:
self.header_type = filetype.lower()
if self.header_type in self.header_types:
if self.header_type == 'head':
self.dtype = np.dtype([('kstp', 'i4'), ('kper', 'i4'),
('pertim', floattype), ('totim', floattype),
('text', 'a16'),
('ncol', 'i4'), ('nrow', 'i4'), ('ilay', 'i4')])
elif self.header_type == 'ucn':
self.dtype = np.dtype([('ntrans', 'i4'), ('kstp', 'i4'), ('kper', 'i4'),
('totim', floattype), ('text', 'a16'),
('ncol', 'i4'), ('nrow', 'i4'), ('ilay', 'i4')])
self.header = np.ones(1, self.dtype)
else:
self.dtype = None
self.header = None
print('Specified {0} type is not available. Available types are:'.format(self.header_type))
for idx, t in enumerate(self.header_types):
print(' {0} {1}'.format(idx+1, t))
return
def get_dtype(self):
"""
Return the dtype
"""
return self.dtype
def get_names(self):
"""
Return the dtype names
"""
return self.dtype.names
def get_values(self):
"""
Return the header values
"""
if self.header is None:
return None
else:
return self.header[0]
class LayerFile(object):
"""
The LayerFile class is the abstract base class from which specific derived
classes are formed. LayerFile This class should not be instantiated directly.
"""
def __init__(self, filename, precision, verbose, kwargs):
self.filename = filename
self.precision = precision
self.verbose = verbose
self.file = open(self.filename, 'rb')
self.nrow = 0
self.ncol = 0
self.nlay = 0
self.times = []
self.kstpkper = []
self.recordarray = []
self.iposarray = []
if precision == 'single':
self.realtype = np.float32
elif precision == 'double':
self.realtype = np.float64
else:
raise Exception('Unknown precision specified: ' + precision)
self.model = None
self.dis = None
self.sr = None
if 'model' in kwargs.keys():
self.model = kwargs.pop('model')
self.sr = self.model.dis.sr
self.dis = self.model.dis
if 'dis' in kwargs.keys():
self.dis = kwargs.pop('dis')
self.sr = self.dis.sr
if 'sr' in kwargs.keys():
self.sr = kwargs.pop('sr')
if len(kwargs.keys()) > 0:
args = ','.join(kwargs.keys())
raise Exception('LayerFile error: unrecognized kwargs: '+args)
#read through the file and build the pointer index
self._build_index()
# now that we read the data and know nrow and ncol,
# we can make a generic sr if needed
if self.sr is None:
self.sr = flopy.utils.SpatialReference(np.ones(self.ncol), np.ones(self.nrow), 0)
return
def to_shapefile(self, filename, kstpkper=None, totim=None, mflay=None, attrib_name='lf_data'):
"""
Export model output data to a shapefile at a specific location
in LayerFile instance.
Parameters
----------
filename : str
Shapefile name to write
kstpkper : tuple of ints
A tuple containing the time step and stress period (kstp, kper).
These are zero-based kstp and kper values.
totim : float
The simulation time.
mflay : integer
MODFLOW zero-based layer number to return. If None, then layer 1
will be written
attrib_name : str
Base name of attribute columns. (default is 'lf_data')
Returns
----------
None
See Also
--------
Notes
-----
Examples
--------
>>> import flopy
>>> hdobj = flopy.utils.HeadFile('test.hds')
>>> times = hdobj.get_times()
>>> hdobj.to_shapefile('test_heads_sp6.shp', totim=times[-1])
"""
plotarray = np.atleast_3d(self.get_data(kstpkper=kstpkper,
totim=totim, mflay=mflay)
.transpose()).transpose()
if mflay != None:
attrib_dict = {attrib_name+'{0:03d}'.format(mflay):plotarray[0, :, :]}
else:
attrib_dict = {}
for k in range(plotarray.shape[0]):
name = attrib_name+'{0:03d}'.format(k)
attrib_dict[name] = plotarray[k]
from flopy.utils.flopy_io import write_grid_shapefile
write_grid_shapefile(filename, self.sr, attrib_dict)
def plot(self, axes=None, kstpkper=None, totim=None, mflay=None,
filename_base=None, **kwargs):
'''
Plot 3-D model output data in a specific location
in LayerFile instance
Parameters
----------
axes : list of matplotlib.pyplot.axis
List of matplotlib.pyplot.axis that will be used to plot
data for each layer. If axes=None axes will be generated.
(default is None)
kstpkper : tuple of ints
A tuple containing the time step and stress period (kstp, kper).
These are zero-based kstp and kper values.
totim : float
The simulation time.
mflay : int
MODFLOW zero-based layer number to return. If None, then all
all layers will be included. (default is None)
filename_base : str
Base file name that will be used to automatically generate file
names for output image files. Plots will be exported as image
files if file_name_base is not None. (default is None)
**kwargs : dict
pcolor : bool
Boolean used to determine if matplotlib.pyplot.pcolormesh
plot will be plotted. (default is True)
colorbar : bool
Boolean used to determine if a color bar will be added to
the matplotlib.pyplot.pcolormesh. Only used if pcolor=True.
(default is False)
contour : bool
Boolean used to determine if matplotlib.pyplot.contour
plot will be plotted. (default is False)
clabel : bool
Boolean used to determine if matplotlib.pyplot.clabel
will be plotted. Only used if contour=True. (default is False)
grid : bool
Boolean used to determine if the model grid will be plotted
on the figure. (default is False)
masked_values : list
List of unique values to be excluded from the plot.
file_extension : str
Valid matplotlib.pyplot file extension for savefig(). Only used
if filename_base is not None. (default is 'png')
Returns
----------
None
See Also
--------
Notes
-----
Examples
--------
>>> import flopy
>>> hdobj = flopy.utils.HeadFile('test.hds')
>>> times = hdobj.get_times()
>>> hdobj.plot(totim=times[-1])
'''
if 'file_extension' in kwargs:
fext = kwargs.pop('file_extension')
fext = fext.replace('.', '')
else:
fext = 'png'
filenames = None
if filename_base is not None:
if mflay is not None:
i0 = int(mflay)
if i0+1 >= self.nlay:
i0 = self.nlay - 1
i1 = i0 + 1
else:
i0 = 0
i1 = self.nlay
filenames = []
[filenames.append('{}_Layer{}.{}'.format(filename_base, k+1, fext)) for k in range(i0, i1)]
# make sure we have a (lay,row,col) shape plotarray
plotarray = np.atleast_3d(self.get_data(kstpkper=kstpkper,
totim=totim, mflay=mflay)
.transpose()).transpose()
import flopy.plot.plotutil as pu
return pu._plot_array_helper(plotarray, model=self.model, sr=self.sr, axes=axes,
filenames=filenames,
mflay=mflay, **kwargs)
def _build_index(self):
"""
Build the recordarray and iposarray, which maps the header information
to the position in the formatted file.
"""
raise Exception('Abstract method _build_index called in LayerFile. This method needs to be overridden.')
def list_records(self):
"""
Print a list of all of the records in the file
obj.list_records()
"""
for header in self.recordarray:
print(header)
return
def _get_data_array(self, totim=0):
"""
Get the three dimensional data array for the
specified kstp and kper value or totim value.
"""
if totim > 0.:
keyindices = np.where((self.recordarray['totim'] == totim))[0]
else:
raise Exception('Data not found...')
#initialize head with nan and then fill it
data = np.empty((self.nlay, self.nrow, self.ncol),
dtype=self.realtype)
data[:, :, :] = np.nan
for idx in keyindices:
ipos = self.iposarray[idx]
ilay = self.recordarray['ilay'][idx]
if self.verbose:
print('Byte position in file: {0}'.format(ipos))
self.file.seek(ipos, 0)
data[ilay - 1, :, :] = self._read_data()
return data
def get_times(self):
"""
Get a list of unique times in the file
Returns
----------
out : list of floats
List contains unique simulation times (totim) in binary file.
"""
return self.times
def get_kstpkper(self):
"""
Get a list of unique stress periods and time steps in the file
Returns
----------
out : list of (kstp, kper) tuples
List of unique kstp, kper combinations in binary file. kstp and
kper values are presently zero-based.
"""
kstpkper = []
for kstp, kper in self.kstpkper:
kstpkper.append((kstp - 1, kper - 1))
return kstpkper
def get_data(self, kstpkper=None, idx=None, totim=None, mflay=None):
"""
Get data from the file for the specified conditions.
Parameters
----------
idx : int
The zero-based record number. The first record is record 0.
kstpkper : tuple of ints
A tuple containing the time step and stress period (kstp, kper).
These are zero-based kstp and kper values.
totim : float
The simulation time.
mflay : integer
MODFLOW zero-based layer number to return. If None, then all
all layers will be included. (Default is None.)
Returns
----------
data : numpy array
Array has size (nlay, nrow, ncol) if mflay is None or it has size
(nrow, ncol) if mlay is specified.
See Also
--------
Notes
-----
if both kstpkper and totim are None, will return the last entry
Examples
--------
"""
# One-based kstp and kper for pulling out of recarray
if kstpkper is not None:
kstp1 = kstpkper[0] + 1
kper1 = kstpkper[1] + 1
totim1 = self.recordarray[np.where(
(self.recordarray['kstp'] == kstp1) &
(self.recordarray['kper'] == kper1))]["totim"][0]
elif totim is not None:
totim1 = totim
elif idx is not None:
totim1 = self.recordarray['totim'][idx]
else:
totim1 =self.times[-1]
data = self._get_data_array(totim1)
if mflay is None:
return data
else:
return data[mflay, :, :]
def get_alldata(self, mflay=None, nodata=-9999):
"""
Get all of the data from the file.
Parameters
----------
mflay : integer
MODFLOW zero-based layer number to return. If None, then all
all layers will be included. (Default is None.)
nodata : float
The nodata value in the data array. All array values that have the
nodata value will be assigned np.nan.
Returns
----------
data : numpy array
Array has size (ntimes, nlay, nrow, ncol) if mflay is None or it
has size (ntimes, nrow, ncol) if mlay is specified.
See Also
--------
Notes
-----
Examples
--------
"""
rv = []
for totim in self.times:
h = self.get_data(totim=totim, mflay=mflay)
rv.append(h)
rv = np.array(rv)
rv[rv == nodata] = np.nan
return rv
def _read_data(self):
"""
Read data from file
"""
raise Exception('Abstract method _read_data called in LayerFile. This method needs to be overridden.')
def _build_kijlist(self, idx):
if isinstance(idx, list):
kijlist = idx
elif isinstance(idx, tuple):
kijlist = [idx]
# Check to make sure that k, i, j are within range, otherwise
# the seek approach won't work. Can't use k = -1, for example.
for k, i, j in kijlist:
fail = False
errmsg = 'Invalid cell index. Cell ' + str((k, i, j)) + ' not within model grid: ' + \
str((self.nlay, self.nrow, self.ncol))
if k < 0 or k > self.nlay - 1:
fail = True
if i < 0 or i > self.nrow - 1:
fail = True
if j < 0 or j > self.ncol - 1:
fail = True
if fail:
raise Exception(errmsg)
return kijlist
def _get_nstation(self, idx, kijlist):
if isinstance(idx, list):
return len(kijlist)
elif isinstance(idx, tuple):
return 1
def _init_result(self, nstation):
# Initialize result array and put times in first column
result = np.empty((len(self.times), nstation + 1),
dtype=self.realtype)
result[:, :] = np.nan
result[:, 0] = np.array(self.times)
return result
def close(self):
"""
Close the file handle.
"""
self.file.close()
return
| gpl-2.0 |
rrohan/scikit-learn | examples/tree/plot_tree_regression_multioutput.py | 206 | 1800 | """
===================================================================
Multi-output Decision Tree Regression
===================================================================
An example to illustrate multi-output regression with decision tree.
The :ref:`decision trees <tree>`
is used to predict simultaneously the noisy x and y observations of a circle
given a single underlying feature. As a result, it learns local linear
regressions approximating the circle.
We can see that if the maximum depth of the tree (controlled by the
`max_depth` parameter) is set too high, the decision trees learn too fine
details of the training data and learn from the noise, i.e. they overfit.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeRegressor
# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(200 * rng.rand(100, 1) - 100, axis=0)
y = np.array([np.pi * np.sin(X).ravel(), np.pi * np.cos(X).ravel()]).T
y[::5, :] += (0.5 - rng.rand(20, 2))
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=2)
regr_2 = DecisionTreeRegressor(max_depth=5)
regr_3 = DecisionTreeRegressor(max_depth=8)
regr_1.fit(X, y)
regr_2.fit(X, y)
regr_3.fit(X, y)
# Predict
X_test = np.arange(-100.0, 100.0, 0.01)[:, np.newaxis]
y_1 = regr_1.predict(X_test)
y_2 = regr_2.predict(X_test)
y_3 = regr_3.predict(X_test)
# Plot the results
plt.figure()
plt.scatter(y[:, 0], y[:, 1], c="k", label="data")
plt.scatter(y_1[:, 0], y_1[:, 1], c="g", label="max_depth=2")
plt.scatter(y_2[:, 0], y_2[:, 1], c="r", label="max_depth=5")
plt.scatter(y_3[:, 0], y_3[:, 1], c="b", label="max_depth=8")
plt.xlim([-6, 6])
plt.ylim([-6, 6])
plt.xlabel("data")
plt.ylabel("target")
plt.title("Multi-output Decision Tree Regression")
plt.legend()
plt.show()
| bsd-3-clause |
ankurankan/scikit-learn | sklearn/metrics/pairwise.py | 1 | 41106 | # -*- coding: utf-8 -*-
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Robert Layton <robertlayton@gmail.com>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# Philippe Gervais <philippe.gervais@inria.fr>
# Lars Buitinck <larsmans@gmail.com>
# License: BSD 3 clause
import numpy as np
from scipy.spatial import distance
from scipy.sparse import csr_matrix
from scipy.sparse import issparse
from ..utils import check_array
from ..utils import gen_even_slices
from ..utils import gen_batches
from ..utils.extmath import row_norms, safe_sparse_dot
from ..preprocessing import normalize
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
from ..externals.joblib.parallel import cpu_count
from .pairwise_fast import _chi2_kernel_fast, _sparse_manhattan
# Utility Functions
def _return_float_dtype(X, Y):
"""
1. If dtype of X and Y is float32, then dtype float32 is returned.
2. Else dtype float is returned.
"""
if not issparse(X) and not isinstance(X, np.ndarray):
X = np.asarray(X)
if Y is None:
Y_dtype = X.dtype
elif not issparse(Y) and not isinstance(Y, np.ndarray):
Y = np.asarray(Y)
Y_dtype = Y.dtype
else:
Y_dtype = Y.dtype
if X.dtype == Y_dtype == np.float32:
dtype = np.float32
else:
dtype = np.float
return X, Y, dtype
def check_pairwise_arrays(X, Y):
""" Set X and Y appropriately and checks inputs
If Y is None, it is set as a pointer to X (i.e. not a copy).
If Y is given, this does not happen.
All distance metrics should use this function first to assert that the
given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the second dimension of the two arrays is equal.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y, dtype = _return_float_dtype(X, Y)
if Y is X or Y is None:
X = Y = check_array(X, accept_sparse='csr', dtype=dtype)
else:
X = check_array(X, accept_sparse='csr', dtype=dtype)
Y = check_array(Y, accept_sparse='csr', dtype=dtype)
if X.shape[1] != Y.shape[1]:
raise ValueError("Incompatible dimension for X and Y matrices: "
"X.shape[1] == %d while Y.shape[1] == %d" % (
X.shape[1], Y.shape[1]))
return X, Y
def check_paired_arrays(X, Y):
""" Set X and Y appropriately and checks inputs for paired distances
All paired distance metrics should use this function first to assert that
the given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the dimensions of the two arrays are equal.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y = check_pairwise_arrays(X, Y)
if X.shape != Y.shape:
raise ValueError("X and Y should be of same shape. They were "
"respectively %r and %r long." % (X.shape, Y.shape))
return X, Y
# Pairwise distances
def euclidean_distances(X, Y=None, Y_norm_squared=None, squared=False):
"""
Considering the rows of X (and Y=X) as vectors, compute the
distance matrix between each pair of vectors.
For efficiency reasons, the euclidean distance between a pair of row
vector x and y is computed as::
dist(x, y) = sqrt(dot(x, x) - 2 * dot(x, y) + dot(y, y))
This formulation has two advantages over other ways of computing distances.
First, it is computationally efficient when dealing with sparse data.
Second, if x varies but y remains unchanged, then the right-most dot
product `dot(y, y)` can be pre-computed.
However, this is not the most precise way of doing this computation, and
the distance matrix returned by this function may not be exactly
symmetric as required by, e.g., ``scipy.spatial.distance`` functions.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_1, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_2, n_features)
Y_norm_squared : array-like, shape (n_samples_2, ), optional
Pre-computed dot-products of vectors in Y (e.g.,
``(Y**2).sum(axis=1)``)
squared : boolean, optional
Return squared Euclidean distances.
Returns
-------
distances : {array, sparse matrix}, shape (n_samples_1, n_samples_2)
Examples
--------
>>> from sklearn.metrics.pairwise import euclidean_distances
>>> X = [[0, 1], [1, 1]]
>>> # distance between rows of X
>>> euclidean_distances(X, X)
array([[ 0., 1.],
[ 1., 0.]])
>>> # get distance to origin
>>> euclidean_distances(X, [[0, 0]])
array([[ 1. ],
[ 1.41421356]])
See also
--------
paired_distances : distances betweens pairs of elements of X and Y.
"""
# should not need X_norm_squared because if you could precompute that as
# well as Y, then you should just pre-compute the output and not even
# call this function.
X, Y = check_pairwise_arrays(X, Y)
if Y_norm_squared is not None:
YY = check_array(Y_norm_squared)
if YY.shape != (1, Y.shape[0]):
raise ValueError(
"Incompatible dimensions for Y and Y_norm_squared")
else:
YY = row_norms(Y, squared=True)[np.newaxis, :]
if X is Y: # shortcut in the common case euclidean_distances(X, X)
XX = YY.T
else:
XX = row_norms(X, squared=True)[:, np.newaxis]
distances = safe_sparse_dot(X, Y.T, dense_output=True)
distances *= -2
distances += XX
distances += YY
np.maximum(distances, 0, out=distances)
if X is Y:
# Ensure that distances between vectors and themselves are set to 0.0.
# This may not be the case due to floating point rounding errors.
distances.flat[::distances.shape[0] + 1] = 0.0
return distances if squared else np.sqrt(distances, out=distances)
def pairwise_distances_argmin_min(X, Y, axis=1, metric="euclidean",
batch_size=500, metric_kwargs=None):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance). The minimal distances are
also returned.
This is mostly equivalent to calling:
(pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis),
pairwise_distances(X, Y=Y, metric=metric).min(axis=axis))
but uses much less memory, and is faster for large arrays.
Parameters
----------
X, Y : {array-like, sparse matrix}
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
batch_size : integer
To reduce memory consumption over the naive solution, data are
processed in batches, comprising batch_size rows of X and
batch_size rows of Y. The default value is quite conservative, but
can be changed for fine-tuning. The larger the number, the larger the
memory usage.
metric : string or callable
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto', 'russellrao',
'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict, optional
Keyword arguments to pass to specified metric function.
Returns
-------
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
distances : numpy.ndarray
distances[i] is the distance between the i-th row in X and the
argmin[i]-th row in Y.
See also
--------
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin
"""
dist_func = None
if metric in PAIRWISE_DISTANCE_FUNCTIONS:
dist_func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif not callable(metric) and not isinstance(metric, str):
raise ValueError("'metric' must be a string or a callable")
X, Y = check_pairwise_arrays(X, Y)
if metric_kwargs is None:
metric_kwargs = {}
if axis == 0:
X, Y = Y, X
# Allocate output arrays
indices = np.empty(X.shape[0], dtype=np.intp)
values = np.empty(X.shape[0])
values.fill(np.infty)
for chunk_x in gen_batches(X.shape[0], batch_size):
X_chunk = X[chunk_x, :]
for chunk_y in gen_batches(Y.shape[0], batch_size):
Y_chunk = Y[chunk_y, :]
if dist_func is not None:
if metric == 'euclidean': # special case, for speed
d_chunk = safe_sparse_dot(X_chunk, Y_chunk.T,
dense_output=True)
d_chunk *= -2
d_chunk += row_norms(X_chunk, squared=True)[:, np.newaxis]
d_chunk += row_norms(Y_chunk, squared=True)[np.newaxis, :]
np.maximum(d_chunk, 0, d_chunk)
else:
d_chunk = dist_func(X_chunk, Y_chunk, **metric_kwargs)
else:
d_chunk = pairwise_distances(X_chunk, Y_chunk,
metric=metric, **metric_kwargs)
# Update indices and minimum values using chunk
min_indices = d_chunk.argmin(axis=1)
min_values = d_chunk[np.arange(chunk_x.stop - chunk_x.start),
min_indices]
flags = values[chunk_x] > min_values
indices[chunk_x][flags] = min_indices[flags] + chunk_y.start
values[chunk_x][flags] = min_values[flags]
if metric == "euclidean" and not metric_kwargs.get("squared", False):
np.sqrt(values, values)
return indices, values
def pairwise_distances_argmin(X, Y, axis=1, metric="euclidean",
batch_size=500, metric_kwargs={}):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance).
This is mostly equivalent to calling:
pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis)
but uses much less memory, and is faster for large arrays.
This function works with dense 2D arrays only.
Parameters
==========
X, Y : array-like
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
batch_size : integer
To reduce memory consumption over the naive solution, data are
processed in batches, comprising batch_size rows of X and
batch_size rows of Y. The default value is quite conservative, but
can be changed for fine-tuning. The larger the number, the larger the
memory usage.
metric : string or callable
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto', 'russellrao',
'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict
keyword arguments to pass to specified metric function.
Returns
=======
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
See also
========
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin_min
"""
return pairwise_distances_argmin_min(X, Y, axis, metric, batch_size,
metric_kwargs)[0]
def manhattan_distances(X, Y=None, sum_over_features=True,
size_threshold=5e8):
""" Compute the L1 distances between the vectors in X and Y.
With sum_over_features equal to False it returns the componentwise
distances.
Parameters
----------
X : array_like
An array with shape (n_samples_X, n_features).
Y : array_like, optional
An array with shape (n_samples_Y, n_features).
sum_over_features : bool, default=True
If True the function returns the pairwise distance matrix
else it returns the componentwise L1 pairwise-distances.
Not supported for sparse matrix inputs.
size_threshold : int, default=5e8
Unused parameter.
Returns
-------
D : array
If sum_over_features is False shape is
(n_samples_X * n_samples_Y, n_features) and D contains the
componentwise L1 pairwise-distances (ie. absolute difference),
else shape is (n_samples_X, n_samples_Y) and D contains
the pairwise L1 distances.
Examples
--------
>>> from sklearn.metrics.pairwise import manhattan_distances
>>> manhattan_distances(3, 3)#doctest:+ELLIPSIS
array([[ 0.]])
>>> manhattan_distances(3, 2)#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances(2, 3)#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances([[1, 2], [3, 4]],\
[[1, 2], [0, 3]])#doctest:+ELLIPSIS
array([[ 0., 2.],
[ 4., 4.]])
>>> import numpy as np
>>> X = np.ones((1, 2))
>>> y = 2 * np.ones((2, 2))
>>> manhattan_distances(X, y, sum_over_features=False)#doctest:+ELLIPSIS
array([[ 1., 1.],
[ 1., 1.]]...)
"""
X, Y = check_pairwise_arrays(X, Y)
if issparse(X) or issparse(Y):
if not sum_over_features:
raise TypeError("sum_over_features=%r not supported"
" for sparse matrices" % sum_over_features)
X = csr_matrix(X, copy=False)
Y = csr_matrix(Y, copy=False)
D = np.zeros((X.shape[0], Y.shape[0]))
_sparse_manhattan(X.data, X.indices, X.indptr,
Y.data, Y.indices, Y.indptr,
X.shape[1], D)
return D
if sum_over_features:
return distance.cdist(X, Y, 'cityblock')
D = X[:, np.newaxis, :] - Y[np.newaxis, :, :]
D = np.abs(D, D)
return D.reshape((-1, X.shape[1]))
def cosine_distances(X, Y=None):
"""
Compute cosine distance between samples in X and Y.
Cosine distance is defined as 1.0 minus the cosine similarity.
Parameters
----------
X : array_like, sparse matrix
with shape (n_samples_X, n_features).
Y : array_like, sparse matrix (optional)
with shape (n_samples_Y, n_features).
Returns
-------
distance matrix : array
An array with shape (n_samples_X, n_samples_Y).
See also
--------
sklearn.metrics.pairwise.cosine_similarity
scipy.spatial.distance.cosine (dense matrices only)
"""
# 1.0 - cosine_similarity(X, Y) without copy
S = cosine_similarity(X, Y)
S *= -1
S += 1
return S
# Paired distances
def paired_euclidean_distances(X, Y):
"""
Computes the paired euclidean distances between X and Y
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray (n_samples, )
"""
X, Y = check_paired_arrays(X, Y)
return np.sqrt(((X - Y) ** 2).sum(axis=-1))
def paired_manhattan_distances(X, Y):
"""Compute the L1 distances between the vectors in X and Y.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray (n_samples, )
"""
X, Y = check_paired_arrays(X, Y)
return np.abs(X - Y).sum(axis=-1)
def paired_cosine_distances(X, Y):
"""
Computes the paired cosine distances between X and Y
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray, shape (n_samples, )
Notes
------
The cosine distance is equivalent to the half the squared
euclidean distance if each sample is normalized to unit norm
"""
X, Y = check_paired_arrays(X, Y)
X_normalized = normalize(X, copy=True)
X_normalized -= normalize(Y, copy=True)
return .5 * (X_normalized ** 2).sum(axis=-1)
PAIRED_DISTANCES = {
'cosine': paired_cosine_distances,
'euclidean': paired_euclidean_distances,
'l2': paired_euclidean_distances,
'l1': paired_manhattan_distances,
'manhattan': paired_manhattan_distances,
'cityblock': paired_manhattan_distances,
}
def paired_distances(X, Y, metric="euclidean", **kwds):
"""
Computes the paired distances between X and Y.
Computes the distances between (X[0], Y[0]), (X[1], Y[1]), etc...
Parameters
----------
X, Y : ndarray (n_samples, n_features)
metric : string or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
specified in PAIRED_DISTANCES, including "euclidean",
"manhattan", or "cosine".
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
Returns
-------
distances : ndarray (n_samples, )
Examples
--------
>>> from sklearn.metrics.pairwise import paired_distances
>>> X = [[0, 1], [1, 1]]
>>> Y = [[0, 1], [2, 1]]
>>> paired_distances(X, Y)
array([ 0., 1.])
See also
--------
pairwise_distances : pairwise distances.
"""
if metric in PAIRED_DISTANCES:
func = PAIRED_DISTANCES[metric]
return func(X, Y)
elif callable(metric):
# Check the matrix first (it is usually done by the metric)
X, Y = check_paired_arrays(X, Y)
distances = np.zeros(len(X))
for i in range(len(X)):
distances[i] = metric(X[i], Y[i])
return distances
else:
raise ValueError('Unknown distance %s' % metric)
# Kernels
def linear_kernel(X, Y=None):
"""
Compute the linear kernel between X and Y.
Parameters
----------
X : array of shape (n_samples_1, n_features)
Y : array of shape (n_samples_2, n_features)
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
return safe_sparse_dot(X, Y.T, dense_output=True)
def polynomial_kernel(X, Y=None, degree=3, gamma=None, coef0=1):
"""
Compute the polynomial kernel between X and Y::
K(X, Y) = (gamma <X, Y> + coef0)^degree
Parameters
----------
X : array of shape (n_samples_1, n_features)
Y : array of shape (n_samples_2, n_features)
degree : int
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = linear_kernel(X, Y)
K *= gamma
K += coef0
K **= degree
return K
def sigmoid_kernel(X, Y=None, gamma=None, coef0=1):
"""
Compute the sigmoid kernel between X and Y::
K(X, Y) = tanh(gamma <X, Y> + coef0)
Parameters
----------
X : array of shape (n_samples_1, n_features)
Y : array of shape (n_samples_2, n_features)
degree : int
Returns
-------
Gram matrix: array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = linear_kernel(X, Y)
K *= gamma
K += coef0
np.tanh(K, K) # compute tanh in-place
return K
def rbf_kernel(X, Y=None, gamma=None):
"""
Compute the rbf (gaussian) kernel between X and Y::
K(x, y) = exp(-gamma ||x-y||^2)
for each pair of rows x in X and y in Y.
Parameters
----------
X : array of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = euclidean_distances(X, Y, squared=True)
K *= -gamma
np.exp(K, K) # exponentiate K in-place
return K
def cosine_similarity(X, Y=None):
"""Compute cosine similarity between samples in X and Y.
Cosine similarity, or the cosine kernel, computes similarity as the
normalized dot product of X and Y:
K(X, Y) = <X, Y> / (||X||*||Y||)
On L2-normalized data, this function is equivalent to linear_kernel.
Parameters
----------
X : array_like, sparse matrix
with shape (n_samples_X, n_features).
Y : array_like, sparse matrix (optional)
with shape (n_samples_Y, n_features).
Returns
-------
kernel matrix : array
An array with shape (n_samples_X, n_samples_Y).
"""
# to avoid recursive import
X, Y = check_pairwise_arrays(X, Y)
X_normalized = normalize(X, copy=True)
if X is Y:
Y_normalized = X_normalized
else:
Y_normalized = normalize(Y, copy=True)
K = linear_kernel(X_normalized, Y_normalized)
return K
def additive_chi2_kernel(X, Y=None):
"""Computes the additive chi-squared kernel between observations in X and Y
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = -Sum [(x - y)^2 / (x + y)]
It can be interpreted as a weighted difference per entry.
Notes
-----
As the negative of a distance, this kernel is only conditionally positive
definite.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://eprints.pascal-network.org/archive/00002309/01/Zhang06-IJCV.pdf
See also
--------
chi2_kernel : The exponentiated version of the kernel, which is usually
preferable.
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to this kernel.
"""
if issparse(X) or issparse(Y):
raise ValueError("additive_chi2 does not support sparse matrices.")
X, Y = check_pairwise_arrays(X, Y)
if (X < 0).any():
raise ValueError("X contains negative values.")
if Y is not X and (Y < 0).any():
raise ValueError("Y contains negative values.")
result = np.zeros((X.shape[0], Y.shape[0]), dtype=X.dtype)
_chi2_kernel_fast(X, Y, result)
return result
def chi2_kernel(X, Y=None, gamma=1.):
"""Computes the exponential chi-squared kernel X and Y.
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = exp(-gamma Sum [(x - y)^2 / (x + y)])
It can be interpreted as a weighted difference per entry.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float, default=1.
Scaling parameter of the chi2 kernel.
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://eprints.pascal-network.org/archive/00002309/01/Zhang06-IJCV.pdf
See also
--------
additive_chi2_kernel : The additive version of this kernel
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to the additive version of this kernel.
"""
K = additive_chi2_kernel(X, Y)
K *= gamma
return np.exp(K, K)
# Helper functions - distance
PAIRWISE_DISTANCE_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'cityblock': manhattan_distances,
'cosine': cosine_distances,
'euclidean': euclidean_distances,
'l2': euclidean_distances,
'l1': manhattan_distances,
'manhattan': manhattan_distances, }
def distance_metrics():
"""Valid metrics for pairwise_distances.
This function simply returns the valid pairwise distance metrics.
It exists to allow for a description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
============ ====================================
metric Function
============ ====================================
'cityblock' metrics.pairwise.manhattan_distances
'cosine' metrics.pairwise.cosine_distances
'euclidean' metrics.pairwise.euclidean_distances
'l1' metrics.pairwise.manhattan_distances
'l2' metrics.pairwise.euclidean_distances
'manhattan' metrics.pairwise.manhattan_distances
============ ====================================
"""
return PAIRWISE_DISTANCE_FUNCTIONS
def _parallel_pairwise(X, Y, func, n_jobs, **kwds):
"""Break the pairwise matrix in n_jobs even slices
and compute them in parallel"""
if n_jobs < 0:
n_jobs = max(cpu_count() + 1 + n_jobs, 1)
if Y is None:
Y = X
ret = Parallel(n_jobs=n_jobs, verbose=0)(
delayed(func)(X, Y[s], **kwds)
for s in gen_even_slices(Y.shape[0], n_jobs))
return np.hstack(ret)
_VALID_METRICS = ['euclidean', 'l2', 'l1', 'manhattan', 'cityblock',
'braycurtis', 'canberra', 'chebyshev', 'correlation',
'cosine', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener',
'sokalsneath', 'sqeuclidean', 'yule', "wminkowski"]
def pairwise_distances(X, Y=None, metric="euclidean", n_jobs=1, **kwds):
""" Compute the distance matrix from a vector array X and optional Y.
This method takes either a vector array or a distance matrix, and returns
a distance matrix. If the input is a vector array, the distances are
computed. If the input is a distances matrix, it is returned instead.
This method provides a safe way to take a distance matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
distance between the arrays from both X and Y.
Valid values for metric are:
- From scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']. These metrics support sparse matrix inputs.
- From scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis',
'matching', 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean',
'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics. These metrics do not support sparse matrix inputs.
Note that in the case of 'cityblock', 'cosine' and 'euclidean' (which are
valid scipy.spatial.distance metrics), the scikit-learn implementation
will be used, which is faster and has support for sparse matrices (except
for 'cityblock'). For a verbose description of the metrics from
scikit-learn, see the __doc__ of the sklearn.pairwise.distance_metrics
function.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
Y : array [n_samples_b, n_features]
A second feature array only if X has shape [n_samples_a, n_features].
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
D : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A distance matrix D such that D_{i, j} is the distance between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then D_{i, j} is the distance between the ith array
from X and the jth array from Y.
"""
if (metric not in _VALID_METRICS and
not callable(metric) and metric != "precomputed"):
raise ValueError("Unknown metric %s. "
"Valid metrics are %s, or 'precomputed', or a "
"callable" % (metric, _VALID_METRICS))
if metric == "precomputed":
return X
elif metric in PAIRWISE_DISTANCE_FUNCTIONS:
func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
if n_jobs == 1:
return func(X, Y, **kwds)
else:
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
elif callable(metric):
# Check matrices first (this is usually done by the metric).
X, Y = check_pairwise_arrays(X, Y)
n_x, n_y = X.shape[0], Y.shape[0]
# Calculate distance for each element in X and Y.
# FIXME: can use n_jobs here too
# FIXME: np.zeros can be replaced by np.empty
D = np.zeros((n_x, n_y), dtype='float')
for i in range(n_x):
start = 0
if X is Y:
start = i
for j in range(start, n_y):
# distance assumed to be symmetric.
D[i][j] = metric(X[i], Y[j], **kwds)
if X is Y:
D[j][i] = D[i][j]
return D
else:
# Note: the distance module doesn't support sparse matrices!
if type(X) is csr_matrix:
raise TypeError("scipy distance metrics do not"
" support sparse matrices.")
if Y is None:
return distance.squareform(distance.pdist(X, metric=metric,
**kwds))
else:
if type(Y) is csr_matrix:
raise TypeError("scipy distance metrics do not"
" support sparse matrices.")
return distance.cdist(X, Y, metric=metric, **kwds)
# Helper functions - distance
PAIRWISE_KERNEL_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'additive_chi2': additive_chi2_kernel,
'chi2': chi2_kernel,
'linear': linear_kernel,
'polynomial': polynomial_kernel,
'poly': polynomial_kernel,
'rbf': rbf_kernel,
'sigmoid': sigmoid_kernel,
'cosine': cosine_similarity, }
def kernel_metrics():
""" Valid metrics for pairwise_kernels
This function simply returns the valid pairwise distance metrics.
It exists, however, to allow for a verbose description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
=============== ========================================
metric Function
=============== ========================================
'additive_chi2' sklearn.pairwise.additive_chi2_kernel
'chi2' sklearn.pairwise.chi2_kernel
'linear' sklearn.pairwise.linear_kernel
'poly' sklearn.pairwise.polynomial_kernel
'polynomial' sklearn.pairwise.polynomial_kernel
'rbf' sklearn.pairwise.rbf_kernel
'sigmoid' sklearn.pairwise.sigmoid_kernel
'cosine' sklearn.pairwise.cosine_similarity
=============== ========================================
"""
return PAIRWISE_KERNEL_FUNCTIONS
KERNEL_PARAMS = {
"additive_chi2": (),
"chi2": (),
"cosine": (),
"exp_chi2": frozenset(["gamma"]),
"linear": (),
"poly": frozenset(["gamma", "degree", "coef0"]),
"polynomial": frozenset(["gamma", "degree", "coef0"]),
"rbf": frozenset(["gamma"]),
"sigmoid": frozenset(["gamma", "coef0"]),
}
def pairwise_kernels(X, Y=None, metric="linear", filter_params=False,
n_jobs=1, **kwds):
"""Compute the kernel between arrays X and optional array Y.
This method takes either a vector array or a kernel matrix, and returns
a kernel matrix. If the input is a vector array, the kernels are
computed. If the input is a kernel matrix, it is returned instead.
This method provides a safe way to take a kernel matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
kernel between the arrays from both X and Y.
Valid values for metric are::
['rbf', 'sigmoid', 'polynomial', 'poly', 'linear', 'cosine']
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise kernels between samples, or a feature array.
Y : array [n_samples_b, n_features]
A second feature array only if X has shape [n_samples_a, n_features].
metric : string, or callable
The metric to use when calculating kernel between instances in a
feature array. If metric is a string, it must be one of the metrics
in pairwise.PAIRWISE_KERNEL_FUNCTIONS.
If metric is "precomputed", X is assumed to be a kernel matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
filter_params: boolean
Whether to filter invalid parameters or not.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the kernel function.
Returns
-------
K : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A kernel matrix K such that K_{i, j} is the kernel between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then K_{i, j} is the kernel between the ith array
from X and the jth array from Y.
Notes
-----
If metric is 'precomputed', Y is ignored and X is returned.
"""
if metric == "precomputed":
return X
elif metric in PAIRWISE_KERNEL_FUNCTIONS:
if filter_params:
kwds = dict((k, kwds[k]) for k in kwds
if k in KERNEL_PARAMS[metric])
func = PAIRWISE_KERNEL_FUNCTIONS[metric]
if n_jobs == 1:
return func(X, Y, **kwds)
else:
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
elif callable(metric):
# Check matrices first (this is usually done by the metric).
X, Y = check_pairwise_arrays(X, Y)
n_x, n_y = X.shape[0], Y.shape[0]
# Calculate kernel for each element in X and Y.
K = np.zeros((n_x, n_y), dtype='float')
for i in range(n_x):
start = 0
if X is Y:
start = i
for j in range(start, n_y):
# Kernel assumed to be symmetric.
K[i][j] = metric(X[i], Y[j], **kwds)
if X is Y:
K[j][i] = K[i][j]
return K
else:
raise ValueError("Unknown kernel %r" % metric)
| bsd-3-clause |
nelango/ViralityAnalysis | model/lib/sklearn/cluster/spectral.py | 233 | 18153 | # -*- coding: utf-8 -*-
"""Algorithms for spectral clustering"""
# Author: Gael Varoquaux gael.varoquaux@normalesup.org
# Brian Cheung
# Wei LI <kuantkid@gmail.com>
# License: BSD 3 clause
import warnings
import numpy as np
from ..base import BaseEstimator, ClusterMixin
from ..utils import check_random_state, as_float_array
from ..utils.validation import check_array
from ..utils.extmath import norm
from ..metrics.pairwise import pairwise_kernels
from ..neighbors import kneighbors_graph
from ..manifold import spectral_embedding
from .k_means_ import k_means
def discretize(vectors, copy=True, max_svd_restarts=30, n_iter_max=20,
random_state=None):
"""Search for a partition matrix (clustering) which is closest to the
eigenvector embedding.
Parameters
----------
vectors : array-like, shape: (n_samples, n_clusters)
The embedding space of the samples.
copy : boolean, optional, default: True
Whether to copy vectors, or perform in-place normalization.
max_svd_restarts : int, optional, default: 30
Maximum number of attempts to restart SVD if convergence fails
n_iter_max : int, optional, default: 30
Maximum number of iterations to attempt in rotation and partition
matrix search if machine precision convergence is not reached
random_state: int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization of the
of the rotation matrix
Returns
-------
labels : array of integers, shape: n_samples
The labels of the clusters.
References
----------
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
Notes
-----
The eigenvector embedding is used to iteratively search for the
closest discrete partition. First, the eigenvector embedding is
normalized to the space of partition matrices. An optimal discrete
partition matrix closest to this normalized embedding multiplied by
an initial rotation is calculated. Fixing this discrete partition
matrix, an optimal rotation matrix is calculated. These two
calculations are performed until convergence. The discrete partition
matrix is returned as the clustering solution. Used in spectral
clustering, this method tends to be faster and more robust to random
initialization than k-means.
"""
from scipy.sparse import csc_matrix
from scipy.linalg import LinAlgError
random_state = check_random_state(random_state)
vectors = as_float_array(vectors, copy=copy)
eps = np.finfo(float).eps
n_samples, n_components = vectors.shape
# Normalize the eigenvectors to an equal length of a vector of ones.
# Reorient the eigenvectors to point in the negative direction with respect
# to the first element. This may have to do with constraining the
# eigenvectors to lie in a specific quadrant to make the discretization
# search easier.
norm_ones = np.sqrt(n_samples)
for i in range(vectors.shape[1]):
vectors[:, i] = (vectors[:, i] / norm(vectors[:, i])) \
* norm_ones
if vectors[0, i] != 0:
vectors[:, i] = -1 * vectors[:, i] * np.sign(vectors[0, i])
# Normalize the rows of the eigenvectors. Samples should lie on the unit
# hypersphere centered at the origin. This transforms the samples in the
# embedding space to the space of partition matrices.
vectors = vectors / np.sqrt((vectors ** 2).sum(axis=1))[:, np.newaxis]
svd_restarts = 0
has_converged = False
# If there is an exception we try to randomize and rerun SVD again
# do this max_svd_restarts times.
while (svd_restarts < max_svd_restarts) and not has_converged:
# Initialize first column of rotation matrix with a row of the
# eigenvectors
rotation = np.zeros((n_components, n_components))
rotation[:, 0] = vectors[random_state.randint(n_samples), :].T
# To initialize the rest of the rotation matrix, find the rows
# of the eigenvectors that are as orthogonal to each other as
# possible
c = np.zeros(n_samples)
for j in range(1, n_components):
# Accumulate c to ensure row is as orthogonal as possible to
# previous picks as well as current one
c += np.abs(np.dot(vectors, rotation[:, j - 1]))
rotation[:, j] = vectors[c.argmin(), :].T
last_objective_value = 0.0
n_iter = 0
while not has_converged:
n_iter += 1
t_discrete = np.dot(vectors, rotation)
labels = t_discrete.argmax(axis=1)
vectors_discrete = csc_matrix(
(np.ones(len(labels)), (np.arange(0, n_samples), labels)),
shape=(n_samples, n_components))
t_svd = vectors_discrete.T * vectors
try:
U, S, Vh = np.linalg.svd(t_svd)
svd_restarts += 1
except LinAlgError:
print("SVD did not converge, randomizing and trying again")
break
ncut_value = 2.0 * (n_samples - S.sum())
if ((abs(ncut_value - last_objective_value) < eps) or
(n_iter > n_iter_max)):
has_converged = True
else:
# otherwise calculate rotation and continue
last_objective_value = ncut_value
rotation = np.dot(Vh.T, U.T)
if not has_converged:
raise LinAlgError('SVD did not converge')
return labels
def spectral_clustering(affinity, n_clusters=8, n_components=None,
eigen_solver=None, random_state=None, n_init=10,
eigen_tol=0.0, assign_labels='kmeans'):
"""Apply clustering to a projection to the normalized laplacian.
In practice Spectral Clustering is very useful when the structure of
the individual clusters is highly non-convex or more generally when
a measure of the center and spread of the cluster is not a suitable
description of the complete cluster. For instance when clusters are
nested circles on the 2D plan.
If affinity is the adjacency matrix of a graph, this method can be
used to find normalized graph cuts.
Read more in the :ref:`User Guide <spectral_clustering>`.
Parameters
-----------
affinity : array-like or sparse matrix, shape: (n_samples, n_samples)
The affinity matrix describing the relationship of the samples to
embed. **Must be symmetric**.
Possible examples:
- adjacency matrix of a graph,
- heat kernel of the pairwise distance matrix of the samples,
- symmetric k-nearest neighbours connectivity matrix of the samples.
n_clusters : integer, optional
Number of clusters to extract.
n_components : integer, optional, default is n_clusters
Number of eigen vectors to use for the spectral embedding
eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'}
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization
of the lobpcg eigen vectors decomposition when eigen_solver == 'amg'
and by the K-Means initialization.
n_init : int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
eigen_tol : float, optional, default: 0.0
Stopping criterion for eigendecomposition of the Laplacian matrix
when using arpack eigen_solver.
assign_labels : {'kmeans', 'discretize'}, default: 'kmeans'
The strategy to use to assign labels in the embedding
space. There are two ways to assign labels after the laplacian
embedding. k-means can be applied and is a popular choice. But it can
also be sensitive to initialization. Discretization is another
approach which is less sensitive to random initialization. See
the 'Multiclass spectral clustering' paper referenced below for
more details on the discretization approach.
Returns
-------
labels : array of integers, shape: n_samples
The labels of the clusters.
References
----------
- Normalized cuts and image segmentation, 2000
Jianbo Shi, Jitendra Malik
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324
- A Tutorial on Spectral Clustering, 2007
Ulrike von Luxburg
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
Notes
------
The graph should contain only one connect component, elsewhere
the results make little sense.
This algorithm solves the normalized cut for k=2: it is a
normalized spectral clustering.
"""
if assign_labels not in ('kmeans', 'discretize'):
raise ValueError("The 'assign_labels' parameter should be "
"'kmeans' or 'discretize', but '%s' was given"
% assign_labels)
random_state = check_random_state(random_state)
n_components = n_clusters if n_components is None else n_components
maps = spectral_embedding(affinity, n_components=n_components,
eigen_solver=eigen_solver,
random_state=random_state,
eigen_tol=eigen_tol, drop_first=False)
if assign_labels == 'kmeans':
_, labels, _ = k_means(maps, n_clusters, random_state=random_state,
n_init=n_init)
else:
labels = discretize(maps, random_state=random_state)
return labels
class SpectralClustering(BaseEstimator, ClusterMixin):
"""Apply clustering to a projection to the normalized laplacian.
In practice Spectral Clustering is very useful when the structure of
the individual clusters is highly non-convex or more generally when
a measure of the center and spread of the cluster is not a suitable
description of the complete cluster. For instance when clusters are
nested circles on the 2D plan.
If affinity is the adjacency matrix of a graph, this method can be
used to find normalized graph cuts.
When calling ``fit``, an affinity matrix is constructed using either
kernel function such the Gaussian (aka RBF) kernel of the euclidean
distanced ``d(X, X)``::
np.exp(-gamma * d(X,X) ** 2)
or a k-nearest neighbors connectivity matrix.
Alternatively, using ``precomputed``, a user-provided affinity
matrix can be used.
Read more in the :ref:`User Guide <spectral_clustering>`.
Parameters
-----------
n_clusters : integer, optional
The dimension of the projection subspace.
affinity : string, array-like or callable, default 'rbf'
If a string, this may be one of 'nearest_neighbors', 'precomputed',
'rbf' or one of the kernels supported by
`sklearn.metrics.pairwise_kernels`.
Only kernels that produce similarity scores (non-negative values that
increase with similarity) should be used. This property is not checked
by the clustering algorithm.
gamma : float
Scaling factor of RBF, polynomial, exponential chi^2 and
sigmoid affinity kernel. Ignored for
``affinity='nearest_neighbors'``.
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
n_neighbors : integer
Number of neighbors to use when constructing the affinity matrix using
the nearest neighbors method. Ignored for ``affinity='rbf'``.
eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'}
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization
of the lobpcg eigen vectors decomposition when eigen_solver == 'amg'
and by the K-Means initialization.
n_init : int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
eigen_tol : float, optional, default: 0.0
Stopping criterion for eigendecomposition of the Laplacian matrix
when using arpack eigen_solver.
assign_labels : {'kmeans', 'discretize'}, default: 'kmeans'
The strategy to use to assign labels in the embedding
space. There are two ways to assign labels after the laplacian
embedding. k-means can be applied and is a popular choice. But it can
also be sensitive to initialization. Discretization is another approach
which is less sensitive to random initialization.
kernel_params : dictionary of string to any, optional
Parameters (keyword arguments) and values for kernel passed as
callable object. Ignored by other kernels.
Attributes
----------
affinity_matrix_ : array-like, shape (n_samples, n_samples)
Affinity matrix used for clustering. Available only if after calling
``fit``.
labels_ :
Labels of each point
Notes
-----
If you have an affinity matrix, such as a distance matrix,
for which 0 means identical elements, and high values means
very dissimilar elements, it can be transformed in a
similarity matrix that is well suited for the algorithm by
applying the Gaussian (RBF, heat) kernel::
np.exp(- X ** 2 / (2. * delta ** 2))
Another alternative is to take a symmetric version of the k
nearest neighbors connectivity matrix of the points.
If the pyamg package is installed, it is used: this greatly
speeds up computation.
References
----------
- Normalized cuts and image segmentation, 2000
Jianbo Shi, Jitendra Malik
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324
- A Tutorial on Spectral Clustering, 2007
Ulrike von Luxburg
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
"""
def __init__(self, n_clusters=8, eigen_solver=None, random_state=None,
n_init=10, gamma=1., affinity='rbf', n_neighbors=10,
eigen_tol=0.0, assign_labels='kmeans', degree=3, coef0=1,
kernel_params=None):
self.n_clusters = n_clusters
self.eigen_solver = eigen_solver
self.random_state = random_state
self.n_init = n_init
self.gamma = gamma
self.affinity = affinity
self.n_neighbors = n_neighbors
self.eigen_tol = eigen_tol
self.assign_labels = assign_labels
self.degree = degree
self.coef0 = coef0
self.kernel_params = kernel_params
def fit(self, X, y=None):
"""Creates an affinity matrix for X using the selected affinity,
then applies spectral clustering to this affinity matrix.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
OR, if affinity==`precomputed`, a precomputed affinity
matrix of shape (n_samples, n_samples)
"""
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'],
dtype=np.float64)
if X.shape[0] == X.shape[1] and self.affinity != "precomputed":
warnings.warn("The spectral clustering API has changed. ``fit``"
"now constructs an affinity matrix from data. To use"
" a custom affinity matrix, "
"set ``affinity=precomputed``.")
if self.affinity == 'nearest_neighbors':
connectivity = kneighbors_graph(X, n_neighbors=self.n_neighbors, include_self=True)
self.affinity_matrix_ = 0.5 * (connectivity + connectivity.T)
elif self.affinity == 'precomputed':
self.affinity_matrix_ = X
else:
params = self.kernel_params
if params is None:
params = {}
if not callable(self.affinity):
params['gamma'] = self.gamma
params['degree'] = self.degree
params['coef0'] = self.coef0
self.affinity_matrix_ = pairwise_kernels(X, metric=self.affinity,
filter_params=True,
**params)
random_state = check_random_state(self.random_state)
self.labels_ = spectral_clustering(self.affinity_matrix_,
n_clusters=self.n_clusters,
eigen_solver=self.eigen_solver,
random_state=random_state,
n_init=self.n_init,
eigen_tol=self.eigen_tol,
assign_labels=self.assign_labels)
return self
@property
def _pairwise(self):
return self.affinity == "precomputed"
| mit |
mugizico/scikit-learn | sklearn/datasets/tests/test_mldata.py | 384 | 5221 | """Test functionality of mldata fetching utilities."""
import os
import shutil
import tempfile
import scipy as sp
from sklearn import datasets
from sklearn.datasets import mldata_filename, fetch_mldata
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_not_in
from sklearn.utils.testing import mock_mldata_urlopen
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import with_setup
from sklearn.utils.testing import assert_array_equal
tmpdir = None
def setup_tmpdata():
# create temporary dir
global tmpdir
tmpdir = tempfile.mkdtemp()
os.makedirs(os.path.join(tmpdir, 'mldata'))
def teardown_tmpdata():
# remove temporary dir
if tmpdir is not None:
shutil.rmtree(tmpdir)
def test_mldata_filename():
cases = [('datasets-UCI iris', 'datasets-uci-iris'),
('news20.binary', 'news20binary'),
('book-crossing-ratings-1.0', 'book-crossing-ratings-10'),
('Nile Water Level', 'nile-water-level'),
('MNIST (original)', 'mnist-original')]
for name, desired in cases:
assert_equal(mldata_filename(name), desired)
@with_setup(setup_tmpdata, teardown_tmpdata)
def test_download():
"""Test that fetch_mldata is able to download and cache a data set."""
_urlopen_ref = datasets.mldata.urlopen
datasets.mldata.urlopen = mock_mldata_urlopen({
'mock': {
'label': sp.ones((150,)),
'data': sp.ones((150, 4)),
},
})
try:
mock = fetch_mldata('mock', data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data"]:
assert_in(n, mock)
assert_equal(mock.target.shape, (150,))
assert_equal(mock.data.shape, (150, 4))
assert_raises(datasets.mldata.HTTPError,
fetch_mldata, 'not_existing_name')
finally:
datasets.mldata.urlopen = _urlopen_ref
@with_setup(setup_tmpdata, teardown_tmpdata)
def test_fetch_one_column():
_urlopen_ref = datasets.mldata.urlopen
try:
dataname = 'onecol'
# create fake data set in cache
x = sp.arange(6).reshape(2, 3)
datasets.mldata.urlopen = mock_mldata_urlopen({dataname: {'x': x}})
dset = fetch_mldata(dataname, data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "data"]:
assert_in(n, dset)
assert_not_in("target", dset)
assert_equal(dset.data.shape, (2, 3))
assert_array_equal(dset.data, x)
# transposing the data array
dset = fetch_mldata(dataname, transpose_data=False, data_home=tmpdir)
assert_equal(dset.data.shape, (3, 2))
finally:
datasets.mldata.urlopen = _urlopen_ref
@with_setup(setup_tmpdata, teardown_tmpdata)
def test_fetch_multiple_column():
_urlopen_ref = datasets.mldata.urlopen
try:
# create fake data set in cache
x = sp.arange(6).reshape(2, 3)
y = sp.array([1, -1])
z = sp.arange(12).reshape(4, 3)
# by default
dataname = 'threecol-default'
datasets.mldata.urlopen = mock_mldata_urlopen({
dataname: (
{
'label': y,
'data': x,
'z': z,
},
['z', 'data', 'label'],
),
})
dset = fetch_mldata(dataname, data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "z"]:
assert_in(n, dset)
assert_not_in("x", dset)
assert_not_in("y", dset)
assert_array_equal(dset.data, x)
assert_array_equal(dset.target, y)
assert_array_equal(dset.z, z.T)
# by order
dataname = 'threecol-order'
datasets.mldata.urlopen = mock_mldata_urlopen({
dataname: ({'y': y, 'x': x, 'z': z},
['y', 'x', 'z']), })
dset = fetch_mldata(dataname, data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "z"]:
assert_in(n, dset)
assert_not_in("x", dset)
assert_not_in("y", dset)
assert_array_equal(dset.data, x)
assert_array_equal(dset.target, y)
assert_array_equal(dset.z, z.T)
# by number
dataname = 'threecol-number'
datasets.mldata.urlopen = mock_mldata_urlopen({
dataname: ({'y': y, 'x': x, 'z': z},
['z', 'x', 'y']),
})
dset = fetch_mldata(dataname, target_name=2, data_name=0,
data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "x"]:
assert_in(n, dset)
assert_not_in("y", dset)
assert_not_in("z", dset)
assert_array_equal(dset.data, z)
assert_array_equal(dset.target, y)
# by name
dset = fetch_mldata(dataname, target_name='y', data_name='z',
data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "x"]:
assert_in(n, dset)
assert_not_in("y", dset)
assert_not_in("z", dset)
finally:
datasets.mldata.urlopen = _urlopen_ref
| bsd-3-clause |
dsibournemouth/autoweka | scripts/plot_signal.py | 2 | 2699 | import argparse
import numpy as np
import os
import traceback
import matplotlib.pyplot as plt
from config import *
def plot_target_vs_prediction(targets, predictions, limit, title):
plt.close()
plt.plot(targets)
plt.plot(predictions)
plt.axvline(limit, color='r', linestyle='--')
plt.title(title)
plt.savefig("%s/plots%s/signal.%s.png" % (os.environ['AUTOWEKA_PATH'], suffix, title), bbox_inches='tight')
def get_target_and_predictions(dataset, strategy, generation, seed):
experiment_name = '%s.%s.%s-%s' % (dataset, strategy, generation, dataset)
training_predictions_filename = '%s/%s/%s/training.predictions.%s.csv' % (
os.environ['AUTOWEKA_PATH'], experiments_folder, experiment_name, seed)
predictions_filename = '%s/%s/%s/predictions.%s.csv' % (
os.environ['AUTOWEKA_PATH'], experiments_folder, experiment_name, seed)
training_results = np.genfromtxt(training_predictions_filename, skip_header=1, delimiter=",")
testing_results = np.genfromtxt(predictions_filename, skip_header=1, delimiter=",")
training_size = training_results.shape[0]
data = np.concatenate((training_results, testing_results), axis=0)
# data row = id,actual,predicted,error
return data[:, 1], data[:, 2], training_size
def main():
parser = argparse.ArgumentParser(prog=os.path.basename(__file__))
globals().update(load_config(parser))
parser.add_argument('--dataset', choices=datasets, required=False)
parser.add_argument('--strategy', choices=strategies, required=False)
parser.add_argument('--generation', choices=generations, required=False)
parser.add_argument('--seed', choices=seeds, required=False)
args = parser.parse_args()
# override default values
selected_datasets = [args.dataset] if args.dataset else datasets
selected_strategies = [args.strategy] if args.strategy else strategies
selected_generations = [args.generation] if args.generation else generations
selected_seeds = [args.seed] if args.seed else seeds
for dataset in selected_datasets:
for strategy in selected_strategies:
for generation in selected_generations:
for seed in selected_seeds:
try:
targets, predictions, limit = get_target_and_predictions(dataset, strategy, generation, seed)
title = '%s.%s.%s.%s' % (dataset, strategy, generation, seed)
plot_target_vs_prediction(targets, predictions, limit, title)
except Exception as e:
print e
traceback.print_exc()
if __name__ == "__main__":
main()
| gpl-3.0 |
markYoungH/chromium.src | ppapi/native_client/tests/breakpad_crash_test/crash_dump_tester.py | 154 | 8545 | #!/usr/bin/python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import subprocess
import sys
import tempfile
import time
script_dir = os.path.dirname(__file__)
sys.path.append(os.path.join(script_dir,
'../../tools/browser_tester'))
import browser_tester
import browsertester.browserlauncher
# This script extends browser_tester to check for the presence of
# Breakpad crash dumps.
# This reads a file of lines containing 'key:value' pairs.
# The file contains entries like the following:
# plat:Win32
# prod:Chromium
# ptype:nacl-loader
# rept:crash svc
def ReadDumpTxtFile(filename):
dump_info = {}
fh = open(filename, 'r')
for line in fh:
if ':' in line:
key, value = line.rstrip().split(':', 1)
dump_info[key] = value
fh.close()
return dump_info
def StartCrashService(browser_path, dumps_dir, windows_pipe_name,
cleanup_funcs, crash_service_exe,
skip_if_missing=False):
# Find crash_service.exe relative to chrome.exe. This is a bit icky.
browser_dir = os.path.dirname(browser_path)
crash_service_path = os.path.join(browser_dir, crash_service_exe)
if skip_if_missing and not os.path.exists(crash_service_path):
return
proc = subprocess.Popen([crash_service_path,
'--v=1', # Verbose output for debugging failures
'--dumps-dir=%s' % dumps_dir,
'--pipe-name=%s' % windows_pipe_name])
def Cleanup():
# Note that if the process has already exited, this will raise
# an 'Access is denied' WindowsError exception, but
# crash_service.exe is not supposed to do this and such
# behaviour should make the test fail.
proc.terminate()
status = proc.wait()
sys.stdout.write('crash_dump_tester: %s exited with status %s\n'
% (crash_service_exe, status))
cleanup_funcs.append(Cleanup)
def ListPathsInDir(dir_path):
if os.path.exists(dir_path):
return [os.path.join(dir_path, name)
for name in os.listdir(dir_path)]
else:
return []
def GetDumpFiles(dumps_dirs):
all_files = [filename
for dumps_dir in dumps_dirs
for filename in ListPathsInDir(dumps_dir)]
sys.stdout.write('crash_dump_tester: Found %i files\n' % len(all_files))
for dump_file in all_files:
sys.stdout.write(' %s (size %i)\n'
% (dump_file, os.stat(dump_file).st_size))
return [dump_file for dump_file in all_files
if dump_file.endswith('.dmp')]
def Main(cleanup_funcs):
parser = browser_tester.BuildArgParser()
parser.add_option('--expected_crash_dumps', dest='expected_crash_dumps',
type=int, default=0,
help='The number of crash dumps that we should expect')
parser.add_option('--expected_process_type_for_crash',
dest='expected_process_type_for_crash',
type=str, default='nacl-loader',
help='The type of Chromium process that we expect the '
'crash dump to be for')
# Ideally we would just query the OS here to find out whether we are
# running x86-32 or x86-64 Windows, but Python's win32api module
# does not contain a wrapper for GetNativeSystemInfo(), which is
# what NaCl uses to check this, or for IsWow64Process(), which is
# what Chromium uses. Instead, we just rely on the build system to
# tell us.
parser.add_option('--win64', dest='win64', action='store_true',
help='Pass this if we are running tests for x86-64 Windows')
options, args = parser.parse_args()
temp_dir = tempfile.mkdtemp(prefix='nacl_crash_dump_tester_')
def CleanUpTempDir():
browsertester.browserlauncher.RemoveDirectory(temp_dir)
cleanup_funcs.append(CleanUpTempDir)
# To get a guaranteed unique pipe name, use the base name of the
# directory we just created.
windows_pipe_name = r'\\.\pipe\%s_crash_service' % os.path.basename(temp_dir)
# This environment variable enables Breakpad crash dumping in
# non-official builds of Chromium.
os.environ['CHROME_HEADLESS'] = '1'
if sys.platform == 'win32':
dumps_dir = temp_dir
# Override the default (global) Windows pipe name that Chromium will
# use for out-of-process crash reporting.
os.environ['CHROME_BREAKPAD_PIPE_NAME'] = windows_pipe_name
# Launch the x86-32 crash service so that we can handle crashes in
# the browser process.
StartCrashService(options.browser_path, dumps_dir, windows_pipe_name,
cleanup_funcs, 'crash_service.exe')
if options.win64:
# Launch the x86-64 crash service so that we can handle crashes
# in the NaCl loader process (nacl64.exe).
# Skip if missing, since in win64 builds crash_service.exe is 64-bit
# and crash_service64.exe does not exist.
StartCrashService(options.browser_path, dumps_dir, windows_pipe_name,
cleanup_funcs, 'crash_service64.exe',
skip_if_missing=True)
# We add a delay because there is probably a race condition:
# crash_service.exe might not have finished doing
# CreateNamedPipe() before NaCl does a crash dump and tries to
# connect to that pipe.
# TODO(mseaborn): We could change crash_service.exe to report when
# it has successfully created the named pipe.
time.sleep(1)
elif sys.platform == 'darwin':
dumps_dir = temp_dir
os.environ['BREAKPAD_DUMP_LOCATION'] = dumps_dir
elif sys.platform.startswith('linux'):
# The "--user-data-dir" option is not effective for the Breakpad
# setup in Linux Chromium, because Breakpad is initialized before
# "--user-data-dir" is read. So we set HOME to redirect the crash
# dumps to a temporary directory.
home_dir = temp_dir
os.environ['HOME'] = home_dir
options.enable_crash_reporter = True
result = browser_tester.Run(options.url, options)
# Find crash dump results.
if sys.platform.startswith('linux'):
# Look in "~/.config/*/Crash Reports". This will find crash
# reports under ~/.config/chromium or ~/.config/google-chrome, or
# under other subdirectories in case the branding is changed.
dumps_dirs = [os.path.join(path, 'Crash Reports')
for path in ListPathsInDir(os.path.join(home_dir, '.config'))]
else:
dumps_dirs = [dumps_dir]
dmp_files = GetDumpFiles(dumps_dirs)
failed = False
msg = ('crash_dump_tester: ERROR: Got %i crash dumps but expected %i\n' %
(len(dmp_files), options.expected_crash_dumps))
if len(dmp_files) != options.expected_crash_dumps:
sys.stdout.write(msg)
failed = True
for dump_file in dmp_files:
# Sanity check: Make sure dumping did not fail after opening the file.
msg = 'crash_dump_tester: ERROR: Dump file is empty\n'
if os.stat(dump_file).st_size == 0:
sys.stdout.write(msg)
failed = True
# On Windows, the crash dumps should come in pairs of a .dmp and
# .txt file.
if sys.platform == 'win32':
second_file = dump_file[:-4] + '.txt'
msg = ('crash_dump_tester: ERROR: File %r is missing a corresponding '
'%r file\n' % (dump_file, second_file))
if not os.path.exists(second_file):
sys.stdout.write(msg)
failed = True
continue
# Check that the crash dump comes from the NaCl process.
dump_info = ReadDumpTxtFile(second_file)
if 'ptype' in dump_info:
msg = ('crash_dump_tester: ERROR: Unexpected ptype value: %r != %r\n'
% (dump_info['ptype'], options.expected_process_type_for_crash))
if dump_info['ptype'] != options.expected_process_type_for_crash:
sys.stdout.write(msg)
failed = True
else:
sys.stdout.write('crash_dump_tester: ERROR: Missing ptype field\n')
failed = True
# TODO(mseaborn): Ideally we would also check that a backtrace
# containing an expected function name can be extracted from the
# crash dump.
if failed:
sys.stdout.write('crash_dump_tester: FAILED\n')
result = 1
else:
sys.stdout.write('crash_dump_tester: PASSED\n')
return result
def MainWrapper():
cleanup_funcs = []
try:
return Main(cleanup_funcs)
finally:
for func in cleanup_funcs:
func()
if __name__ == '__main__':
sys.exit(MainWrapper())
| bsd-3-clause |
bbusemeyer/busempyer | busempyer/process_record.py | 2 | 31828 | import numpy as np
import json
import data_processing as dp
import pandas as pd
from pymatgen.io.cif import CifParser
# TODO generalize!
VARTOL = 1e-2
NFE = 8
NORBFE = 10
NORBCH = 4
SMALLSPIN = 1.0 # Spins less than this are considered zero.
def fluctdat_array(jsondat,key='value'):
''' Turn the dictionary of fluctuation data into a single array.'''
# May not work for bundled QWalk jobs. Might need to average instead of [0].
return np.array([d[key] for d in jsondat['fluctuation data']])\
.reshape(jsondat['nspin'],jsondat['nspin'],
jsondat['nregion'],jsondat['nregion'],
jsondat['maxn'],jsondat['maxn'])
# TODO: Inefficient but easy to use.
def old_fluct_vars(flarray):
nspin=flarray.shape[0]
nregion=flarray.shape[2]
nn=flarray.shape[4]
mom=[ (np.arange(nn)*flarray[s1,s1,r1,r1].diagonal()).sum()
for s1 in range(nspin)
for r1 in range(nregion)
]
mom=np.array(mom).reshape(nspin,nregion)
var=[ ((np.arange(nn)-mom[s1,r1])**2*flarray[s1,s1,r1,r1].diagonal()).sum()
for s1 in range(nspin)
for r1 in range(nregion)
]
return np.array(var).reshape(nspin,nregion)
def fluct_covars(flarray):
nspin=flarray.shape[0]
nregion=flarray.shape[2]
nn=flarray.shape[4]
mom=[ (np.arange(nn)*flarray[s1,s1,r1,r1].diagonal()).sum()
for s1 in range(nspin)
for r1 in range(nregion)
]
mom=np.array(mom).reshape(nspin,nregion)
covar=[
((np.arange(nn)-mom[s1,r1])*(np.arange(nn)-mom[s2,r2])\
*flarray[s1,s2,r1,r2]).sum()
for s1 in range(nspin)
for s2 in range(nspin)
for r1 in range(nregion)
for r2 in range(nregion)
]
return np.array(covar).reshape(nspin,nspin,nregion,nregion)
def unpack_nfluct(jsondat):
''' Calculate useful quantities and put them into a nice dataframe.
Example:
>>> mydata=json.load(open('qw.json','r'))
>>> unpack_nfluct(mydata['properties']['region_fluctuation'])
Args:
jsondat (dict): result from calling gosling -json on a QWalk file and using ['properties']['region_fluctuation'].
Returns:
dict: Moments and variances as a dict.
'''
results={}
results['fluctdat']=fluctdat_array(jsondat)
results['flucterr']=fluctdat_array(jsondat,key='error')
count=np.arange(results['fluctdat'].shape[-1])
results['moms']=np.einsum('ssrrnn,n->sr',results['fluctdat'],count)
results['momserr']=np.einsum('ssrrnn,n->sr',results['flucterr']**2,count**2)**0.5
# shifted(s,r,n)=n-mu(s,r)
shifted=count[None,None,:]-results['moms'][:,:,None]
shiftederr=results['momserr'][:,:,None]
results['covars']=np.einsum('aibjck,abc,ijk->aibj',results['fluctdat'],shifted,shifted)
results['covarserr']=\
np.einsum('aibjck,abc,ijk->aibj',results['flucterr']**2,shifted**2,shifted**2)**0.5 +\
np.einsum('aibjck,abc,ijk->aibj',results['fluctdat']**2,shiftederr**2,shifted**2)**0.5 +\
np.einsum('aibjck,abc,ijk->aibj',results['fluctdat']**2,shifted**2,shiftederr**2)**0.5
return results
def analyze_nfluct(fluctdat):
moms=fluctdat['moms']
momserr=fluctdat['momserr']
cov=fluctdat['covars']
coverr=fluctdat['covarserr']
fluctdat.update({
'spin': moms[0] - moms[1],
'charge': moms[0] + moms[1],
'avgerr': (momserr[0]**2 + momserr[1]**2)**0.5,
'magcov': cov[0,0] + cov[1,1] - cov[0,1] - cov[1,0],
'chgcov': cov[0,0] + cov[1,1] + cov[0,1] + cov[1,0],
'coverr': (coverr[0,0]**2 + coverr[1,1]**2 + coverr[0,1]**2 + coverr[1,0]**2)**0.5
})
return fluctdat
################################################################################
# If you're wondering about how to use these, and you're in the Wagner group on
# github, check out my FeTe notebook!
################################################################################
##### !!! These are all written for autogenv1, so they might be obsolete.
###############################################################################
# Process record group of functions.
def process_record(record):
""" Take the json produced from autogen and process into a dictionary of much
processed and more useful results. """
res = {}
copykeys = ['dft','supercell','total_spin','charge','xyz','cif','control']
nonautogen_keys = ['a','c','se_height','ordering','pressure']
for copykey in copykeys+nonautogen_keys:
if copykey in record.keys():
res[copykey] = record[copykey]
if 'dft' in record.keys():
res['dft'] = record['dft']
if 'mag_moments' in record['dft'].keys():
res['dft']['spins_consistent'] = _check_spins(res['dft'],small=SMALLSPIN)
if 'vmc' in record['qmc'].keys():
res['vmc'] = _process_vmc(record['qmc']['vmc'])
if 'dmc' in record['qmc'].keys():
print("Getting DMC")
res['dmc'] = _process_dmc(record['qmc']['dmc'])
if 'results' in record['qmc']['postprocess'].keys():
res['dmc'].update(_process_post(record['qmc']['postprocess']))
return res
def _process_post(post_record):
""" Process postprocess results by k-averaging and site-averaging."""
if 'results' not in post_record.keys(): return {}
res = {}
# Right now just checks the first k-point: problem?
if 'region_fluctuation' in post_record['results'][0]['results']['properties'].keys():
res['fluct'] = _analyze_nfluct(post_record)
if 'tbdm_basis' in post_record['results'][0]['results']['properties'].keys():
res['ordm'] = _analyze_ordm(post_record)
return res
def _process_vmc(dmc_record):
grouplist = ['jastrow','optimizer']
res = {}
if 'results' not in dmc_record.keys():
return res
res['energy'] = json.loads(pd.DataFrame(dmc_record['results'])\
.groupby(grouplist)\
.apply(_kaverage_energy)\
.reset_index()
.to_json()
)
return res
def _process_dmc(dmc_record):
grouplist = ['timestep','jastrow','localization','optimizer']
res = {}
if 'results' not in dmc_record.keys():
return res
res['energy'] = json.loads(pd.DataFrame(dmc_record['results'])\
.groupby(grouplist)\
.apply(_kaverage_energy)\
.reset_index()
.to_json()
)
return res
def mat_diag_exp(pmat,perr):
''' Mean and variance of diagonal of a matrix (diagonal indices are the
elements of the probability distribution). '''
# Not double-checked yet!
avg,avgerr=0.0,0.0
var,varerr=0.0,0.0
nmax = len(pmat)
for n in range(nmax):
avg += n*pmat[n][n]
avgerr += (n*perr[n][n])**2
avgerr=avgerr**0.5
for n in range(nmax):
var += (n-avg)**2*pmat[n][n]
varerr += (perr[n][n]*(n-avg)**2)**2 +\
(2*pmat[n][n]*avgerr*(n-avg))**2
varerr=varerr**0.5
return avg,avgerr,var,varerr
def old_analyze_nfluct(post_record):
""" Version of _analyze_nfluct where no site-averaging is done.
Useful for external versions. """
def diag_exp(rec):
""" Compute mean and variance. """
res = dict(zip(
('avg','avgerr','var','varerr'),
mat_diag_exp(rec['value'],rec['error'])
))
for info in ['jastrow', 'optimizer', 'localization',
'timestep', 'spini', 'sitei']:
res[info] = rec[info]
return pd.Series(res)
def covar(rec,adf):
""" Compute covariance. """
res={}
res['cov']=0.0
pmat=rec['value']
nmax=len(pmat)
avgi=adf.loc[(rec['spini'],rec['sitei']),'avg']
avgj=adf.loc[(rec['spinj'],rec['sitej']),'avg']
for m in range(nmax):
for n in range(nmax):
res['cov']+=pmat[m][n]*(m-avgi)*(n-avgj)
for info in ['jastrow','optimizer','localization','timestep',
'spini','spinj','sitei','sitej']:
res[info] = rec[info]
return pd.Series(res)
def subspins(siterec):
tmpdf = siterec.set_index('spin')
magmom = tmpdf.loc['up','avg'] - tmpdf.loc['down','avg']
totchg = tmpdf.loc['up','avg'] + tmpdf.loc['down','avg']
magerr = (tmpdf.loc['up','avgerr']**2 + tmpdf.loc['down','avgerr']**2)**0.5
return pd.Series({
'site':siterec['site'].values[0],
'magmom':magmom, 'magmom_err':magerr,
'totchg':totchg, 'totchg_err':magerr
})
# Moments and other arithmatic.
#fluctdf = _kaverage_fluct(post_record['results'])
grouplist = ['timestep','jastrow','localization','optimizer']
fluctdf = pd.DataFrame(post_record['results'])\
.groupby(grouplist)\
.apply(_kaverage_fluct)\
.reset_index()
for s in ['spini','spinj']:
ups = (fluctdf[s] == 0)
fluctdf[s] = "down"
fluctdf.loc[ups,s] = "up"
diag=( (fluctdf['spini']==fluctdf['spinj']) &\
(fluctdf['sitei']==fluctdf['sitej']) )
avgdf=fluctdf[diag].apply(diag_exp,axis=1)
avgdf=avgdf.rename(columns={'spini':'spin','sitei':'site'})
magdf=avgdf.groupby(grouplist+['site']).apply(subspins)
avgdf=pd.merge(avgdf,magdf)
covdf=fluctdf.apply(lambda x: covar(x,avgdf.set_index(['spin','site'])),axis=1)
osspsp=((covdf['spini']!=covdf['spinj'])&(covdf['sitei']==covdf['sitej']))
ossdf=covdf[osspsp].rename(columns={'sitei':'site','spini':'spin'})
avgdf=pd.merge(avgdf,ossdf,on=grouplist+['site','spin'])
del avgdf['sitej']
# Catagorization.
avgdf['netmag'] = "down"
avgdf.loc[avgdf['magmom']>0,'netmag'] = "up"
avgdf['spinchan'] = "minority"
avgdf.loc[avgdf['netmag']==avgdf['spin'],'spinchan'] = "majority"
avgdf['element'] = "Se"
return avgdf
def _analyze_nfluct(post_record):
""" Compute physical values and site-average number fluctuation. """
def diag_exp(rec):
""" Compute mean and variance. """
res = {}
for dat in ['avg','var','avgerr','varerr']:
res[dat] = 0.0
for info in ['jastrow', 'optimizer', 'localization',
'timestep', 'spini', 'sitei']:
res[info] = rec[info]
pmat = rec['value']
perr = rec['error']
nmax = len(pmat)
for n in range(nmax):
res['avg'] += n*pmat[n][n]
res['avgerr'] += (n*perr[n][n])**2
res['avgerr']= res['avgerr']**0.5
for n in range(nmax):
res['var'] += (n-res['avg'])**2*pmat[n][n]
res['varerr'] += (perr[n][n]*(n-res['avg'])**2)**2 +\
(2*pmat[n][n]*res['avgerr']*(n-res['avg']))**2
res['varerr'] = res['varerr']**0.5
return pd.Series(res)
def covar(rec,adf):
""" Compute covariance. """
res={}
res['cov']=0.0
pmat=rec['value']
nmax=len(pmat)
avgi=adf.loc[(rec['spini'],rec['sitei']),'avg']
avgj=adf.loc[(rec['spinj'],rec['sitej']),'avg']
for m in range(nmax):
for n in range(nmax):
res['cov']+=pmat[m][n]*(m-avgi)*(n-avgj)
for info in ['jastrow','optimizer','localization','timestep',
'spini','spinj','sitei','sitej']:
res[info] = rec[info]
return pd.Series(res)
def subspins(siterec):
tmpdf = siterec.set_index('spin')
magmom = tmpdf.loc['up','avg'] - tmpdf.loc['down','avg']
totchg = tmpdf.loc['up','avg'] + tmpdf.loc['down','avg']
magerr = (tmpdf.loc['up','avgerr']**2 + tmpdf.loc['down','avgerr']**2)**0.5
return pd.Series({
'site':siterec['site'].values[0],
'magmom':magmom, 'magmom_err':magerr,
'totchg':totchg, 'totchg_err':magerr
})
def siteaverage(sgrp):
tol=10*sgrp['varerr'].mean()
if sgrp['var'].std() > tol:
print("nfluct: Site average warning: variation in sites larger than expected.")
print("%f > %f"%(sgrp['var'].std(),tol))
return pd.Series({
'variance':sgrp['var'].mean(),
'variance_err':(sgrp['varerr']**2).mean()**0.5,
'magmom':abs(sgrp['magmom'].values).mean(),
'totchg':abs(sgrp['totchg'].values).mean(),
'magmom_err':(sgrp['magmom_err']**2).mean()**0.5,
'covariance':sgrp['cov'].mean()
})
# Moments and other arithmatic.
#fluctdf = _kaverage_fluct(post_record['results'])
grouplist = ['timestep','jastrow','localization','optimizer']
fluctdf = pd.DataFrame(post_record['results'])\
.groupby(grouplist)\
.apply(_kaverage_fluct)\
.reset_index()
for s in ['spini','spinj']:
ups = (fluctdf[s] == 0)
fluctdf[s] = "down"
fluctdf.loc[ups,s] = "up"
diag=( (fluctdf['spini']==fluctdf['spinj']) &\
(fluctdf['sitei']==fluctdf['sitej']) )
avgdf=fluctdf[diag].apply(diag_exp,axis=1)
avgdf=avgdf.rename(columns={'spini':'spin','sitei':'site'})
magdf=avgdf.groupby(grouplist+['site']).apply(subspins)
avgdf=pd.merge(avgdf,magdf)
covdf=fluctdf.apply(lambda x: covar(x,avgdf.set_index(['spin','site'])),axis=1)
osspsp=((covdf['spini']!=covdf['spinj'])&(covdf['sitei']==covdf['sitej']))
ossdf=covdf[osspsp].rename(columns={'sitei':'site','spini':'spin'})
avgdf=pd.merge(avgdf,ossdf,on=grouplist+['site','spin'])
# Catagorization.
avgdf['netmag'] = "down"
avgdf.loc[avgdf['magmom']>0,'netmag'] = "up"
avgdf['spinchan'] = "minority"
avgdf.loc[avgdf['netmag']==avgdf['spin'],'spinchan'] = "majority"
avgdf['element'] = "Se"
avgdf.loc[avgdf['site']<NFE,'element'] = "Fe"
# Site average.
## Debug site averaging (ensure averaging is reasonable).
#for lab,df in avgdf.groupby(grouplist+['spinchan','element']):
# print(lab)
# print(df[['avg','avgerr','var','varerr','cov']])
savgdf = avgdf.groupby(grouplist+['spinchan','element'])\
.apply(siteaverage)\
.reset_index()
magdf = savgdf.drop(['spinchan','variance','variance_err','covariance'],axis=1).drop_duplicates()
covdf = savgdf.drop(['magmom','magmom_err'],axis=1)
return { 'magmom':json.loads(magdf.to_json()),
'covariance':json.loads(covdf.to_json()) }
def analyze_ordm(post_record,orbmap):
""" Compute physical values and site-average 1-body RDM. """
grouplist = ['timestep','jastrow','localization','optimizer']
# k-average (currently selects gamma-only due to bug).
ordmdf = pd.DataFrame(post_record['results'])\
.groupby(['timestep','jastrow','localization','optimizer'])\
.apply(_kaverage_ordm)\
.reset_index()
# Classify orbitals based on index.
infodf = ordmdf['orbni'].drop_duplicates().apply(lambda orbnum:
pd.Series(dict(zip(['orbnum','elem','atom','orb'],orbmap[orbnum]))))
ordmdf = pd.merge(ordmdf,infodf,how='outer',left_on='orbni',right_on='orbnum')
ordmdf = pd.merge(ordmdf,infodf,how='outer',left_on='orbnj',right_on='orbnum',
suffixes=("i","j"))
ordmdf = ordmdf.drop(['orbnumi','orbnumj'],axis=1)
# Classify atoms based on spin occupations.
occdf = ordmdf[ordmdf['orbni']==ordmdf['orbnj']]\
.groupby(grouplist+['atomi'])\
.agg({'up':np.sum,'down':np.sum})\
.reset_index()\
.rename(columns={'atomi':'at'})
occdf['net'] = occdf['up'] - occdf['down']
occdf = occdf.drop(['up','down'],axis=1)
occdf['atspin'] = 'up'
occdf.loc[occdf['net'] < 0,'atspin'] = 'down'
occdf.loc[occdf['net'].abs() < 1e-1,'atspin'] = 'zero'
ordmdf = pd.merge(ordmdf,occdf,
left_on=grouplist+['atomi'],right_on=grouplist+['at'])
ordmdf = pd.merge(ordmdf,occdf,
left_on=grouplist+['atomj'],right_on=grouplist+['at'],
suffixes=('i','j'))\
.drop(['ati','atj'],axis=1)
ordmdf['rel_atspin'] = "antiparallel"
ordmdf.loc[ordmdf['atspini']==ordmdf['atspinj'],'rel_atspin'] = "parallel"
ordmdf.loc[ordmdf['atspini']=='zero','rel_atspin'] = "zero"
ordmdf.loc[ordmdf['atspinj']=='zero','rel_atspin'] = "zero"
# Classify spin channels based on minority and majority channels.
ordmdf = ordmdf.set_index([c for c in ordmdf.columns
if c not in ['up','down','up_err','down_err']])
vals = ordmdf[['up','down']].stack()
vals.index.names = vals.index.names[:-1]+['spin']
errs = ordmdf[['up_err','down_err']]\
.rename(columns={'up_err':'up','down_err':'down'})\
.stack()
errs.index.names = errs.index.names[:-1]+['spin']
ordmdf = pd.DataFrame({'ordm':vals,'ordm_err':errs}).reset_index()
ordmdf['spini'] = "minority"
ordmdf['spinj'] = "minority"
ordmdf.loc[ordmdf['spin'] == ordmdf['atspini'],'spini'] = "majority"
ordmdf.loc[ordmdf['spin'] == ordmdf['atspinj'],'spinj'] = "majority"
ordmdf.loc[ordmdf['atspini'] == 'zero','spini'] = 'neither'
ordmdf.loc[ordmdf['atspinj'] == 'zero','spinj'] = 'neither'
return ordmdf
def _analyze_ordm(post_record):
""" Compute physical values and site-average 1-body RDM. """
def saverage_orb(sgrp):
tol=10*sgrp['ordm_err'].mean()
if sgrp['ordm'].std() > tol:
print("saverage_orb: Site average warning: variation in sites larger than expected.")
print("%.3f > %.3f"%(sgrp['ordm'].std(),tol))
return pd.Series({
'ordm':sgrp['ordm'].mean(),
'ordm_err':(sgrp['ordm_err']**2).mean()**0.5,
})
def saverage_hop(sgrp):
tol=10*sgrp['ordm_err'].mean()
if sgrp['ordm'].std() > tol:
print("saverage_hop: Site average warning: variation in sites larger than expected.")
print("%.3f > %.3f"%(sgrp['ordm'].std(),tol))
return pd.Series({
'ordm':sgrp['ordm'].mean(),
'ordm_err':(sgrp['ordm_err']**2).mean()**0.5,
})
grouplist = ['timestep','jastrow','localization','optimizer']
# k-average (currently selects gamma-only due to bug).
ordmdf = pd.DataFrame(post_record['results'])\
.groupby(['timestep','jastrow','localization','optimizer'])\
.apply(_kaverage_ordm)\
.reset_index()
# Classify orbitals based on index.
infodf = ordmdf['orbni'].drop_duplicates().apply(lambda orbnum:
pd.Series(dict(zip(['orbnum','elem','atom','orb'],orbinfo(orbnum)))))
ordmdf = pd.merge(ordmdf,infodf,how='outer',left_on='orbni',right_on='orbnum')
ordmdf = pd.merge(ordmdf,infodf,how='outer',left_on='orbnj',right_on='orbnum',
suffixes=("i","j"))
ordmdf = ordmdf.drop(['orbnumi','orbnumj'],axis=1)
# Classify atoms based on spin occupations.
occdf = ordmdf[ordmdf['orbni']==ordmdf['orbnj']]\
.groupby(grouplist+['atomi'])\
.agg({'up':np.sum,'down':np.sum})\
.reset_index()\
.rename(columns={'atomi':'at'})
occdf['net'] = occdf['up'] - occdf['down']
occdf = occdf.drop(['up','down'],axis=1)
occdf['atspin'] = 'up'
occdf.loc[occdf['net'] < 0,'atspin'] = 'down'
occdf.loc[occdf['net'].abs() < 1e-1,'atspin'] = 'zero'
ordmdf = pd.merge(ordmdf,occdf,
left_on=grouplist+['atomi'],right_on=grouplist+['at'])
ordmdf = pd.merge(ordmdf,occdf,
left_on=grouplist+['atomj'],right_on=grouplist+['at'],
suffixes=('i','j'))\
.drop(['ati','atj'],axis=1)
ordmdf['rel_atspin'] = "antiparallel"
ordmdf.loc[ordmdf['atspini']==ordmdf['atspinj'],'rel_atspin'] = "parallel"
ordmdf.loc[ordmdf['atspini']=='zero','rel_atspin'] = "zero"
ordmdf.loc[ordmdf['atspinj']=='zero','rel_atspin'] = "zero"
# Classify spin channels based on minority and majority channels.
ordmdf = ordmdf.set_index([c for c in ordmdf.columns
if c not in ['up','down','up_err','down_err']])
vals = ordmdf[['up','down']].stack()
vals.index.names = vals.index.names[:-1]+['spin']
errs = ordmdf[['up_err','down_err']]\
.rename(columns={'up_err':'up','down_err':'down'})\
.stack()
errs.index.names = errs.index.names[:-1]+['spin']
ordmdf = pd.DataFrame({'ordm':vals,'ordm_err':errs}).reset_index()
ordmdf['spini'] = "minority"
ordmdf['spinj'] = "minority"
ordmdf.loc[ordmdf['spin'] == ordmdf['atspini'],'spini'] = "majority"
ordmdf.loc[ordmdf['spin'] == ordmdf['atspinj'],'spinj'] = "majority"
ordmdf.loc[ordmdf['atspini'] == 'zero','spini'] = 'neither'
ordmdf.loc[ordmdf['atspinj'] == 'zero','spinj'] = 'neither'
# Focus in on orbital occupations.
orboccdf = ordmdf[ordmdf['orbni']==ordmdf['orbnj']]\
.drop([col for col in ordmdf.columns if col[-1]=='j'],1)\
.groupby(grouplist+['elemi','orbi','spini'])\
.apply(saverage_orb)\
.reset_index()
# Focus in on parallel or antiparallel hopping.
orbsumsel = grouplist+['atomi','atomj','elemi','elemj','rel_atspin','spini','spinj']
siteavgsel = [c for c in orbsumsel if c not in ['atomi','atomj']]
hopdf = ordmdf[ordmdf['atomi'] != ordmdf['atomj']]\
.groupby(orbsumsel)\
.agg({'ordm':lambda x:x.abs().sum(), 'ordm_err':lambda x:sum(x**2)**0.5})\
.reset_index()\
.groupby(siteavgsel)\
.agg({'ordm':np.mean, 'ordm_err':lambda x:np.mean(x**2)**0.5})\
.reset_index()
return {'orb':json.loads(orboccdf.to_json()),
'hop':json.loads(hopdf.to_json())}
def _kaverage_energy(kavgdf):
# Keep unpacking until reaching energy.
egydf = \
unpack(
unpack(
unpack(
kavgdf
['results'])\
['properties'])\
['total_energy']).applymap(dp.unlist)
# TODO generalize!
weights = np.tile(1./egydf['value'].shape[0],egydf['value'].shape)
return pd.Series({
"value":(weights*egydf['value'].values).sum(),
"error":((weights*egydf['error'].values)**2).sum()**.5
})
def _kaverage_fluct(reclist):
# Warning! _kaverage_qmc() assuming equal k-point weight!
datdf = \
unpack(
unpack(
unpack(
unpack(
pd.DataFrame(reclist)\
['results'])\
['properties'])\
['region_fluctuation'])\
['fluctuation data'])
spiniser = datdf.applymap(lambda x: x['spin'][0]).drop_duplicates()
spinjser = datdf.applymap(lambda x: x['spin'][1]).drop_duplicates()
siteiser = datdf.applymap(lambda x: x['region'][0]).drop_duplicates()
sitejser = datdf.applymap(lambda x: x['region'][1]).drop_duplicates()
valser = datdf.applymap(lambda x: x['value']).apply(dp.mean_array)
errser = datdf.applymap(lambda x: x['error']).apply(dp.mean_array_err)
# Safely turn DataFrame into Series.
if spiniser.shape[0] == 1: spiniser = spiniser.iloc[0]
if spinjser.shape[0] == 1: spinjser = spinjser.iloc[0]
if siteiser.shape[0] == 1: siteiser = siteiser.iloc[0]
if sitejser.shape[0] == 1: sitejser = sitejser.iloc[0]
ret = pd.DataFrame({
'spini':spiniser,
'spinj':spinjser,
'sitei':siteiser,
'sitej':sitejser,
'value':valser,
'error':errser
}).set_index(['spini','spinj','sitei','sitej'])
return ret
def _kaverage_ordm(kavgdf):
# Warning! _kaverage_qmc() assuming equal k-point weight!
datdf =\
unpack(
unpack(
unpack(
unpack(
kavgdf\
['results'])\
['properties'])\
['tbdm_basis'])\
['obdm'])
res = pd.DataFrame(datdf['up'].iloc[0]).stack().to_frame('up')
res = res.join(pd.DataFrame(datdf['down'].iloc[0]).stack().to_frame('down'))
res = res.join(pd.DataFrame(datdf['up_err'].iloc[0]).stack().to_frame('up_err'))
res = res.join(pd.DataFrame(datdf['down_err'].iloc[0]).stack().to_frame('down_err'))
res = res.reset_index()\
.rename(columns={'level_0':'orbni','level_1':'orbnj'})\
.set_index(['orbni','orbnj'])
return res
def _check_spins(dft_record,small=1.0):
""" Check that the spins that were set at the beginning correspond to the
spins it ends up having. Anything less than small is considered zero."""
init_spins = dft_record['initial_spin']
moms = dft_record['mag_moments']
moms = np.array(moms)
print(init_spins)
print(moms)
zs = abs(moms) < small
up = moms > 0.
dn = moms < 0.
moms.dtype = int
moms[up] = 1
moms[dn] = -1
moms[zs] = 0
if len(init_spins) < len(moms):
init_spins = np.append(init_spins,np.zeros(len(moms)-len(init_spins)))
if len(init_spins)==0:
if (moms == np.zeros(moms.shape)).all():
return True
else:
return False
else:
# Note casting prevents numpy.bool.
return bool((moms == np.array(init_spins)).all())
def orbinfo(orbnum):
""" Compute orbital info based on orbital number: [element,atomnum,orbital].
Currently only useful for Fe-chalcogenides. Warning: this depends on how you
define the basis!"""
NFe = 8
NSe = 8
# CRYSTAL: 'order of internal storage'.
# s, px, py, pz, dz2-r2, dxz, dyz, dx2-y2, dxy, ...
Feorbs = ['3s','3px','3py','3pz','4s','3dz2-r2','3dxz','3dyz','3dx2-y2','3dxy']
Seorbs = ['3s','3px','3py','3pz']
NbFe = len(Feorbs)
NbSe = len(Seorbs)
res = [orbnum]
if float(orbnum)/(NFe * NbFe) > (1 - 1e-8):
res += ['Se',(orbnum - NFe*NbFe) // NbSe + 1 + NFe]
res.append(Seorbs[orbnum%NbSe])
else:
res += ['Fe',orbnum // NbFe + 1]
res.append(Feorbs[orbnum%NbFe])
return res
###############################################################################
# Format autogen group of function.
def format_datajson(inp_json="results.json",filterfunc=lambda x:True):
""" Takes processed autogen json file and organizes it into a Pandas DataFrame."""
rawdf = pd.read_json(open(inp_json,'r'))
rawdf['ncell'] = rawdf['supercell'].apply(lambda x:
abs(np.linalg.det(np.array(x).reshape(3,3)))
)
# Unpacking the energies.
alldf = _format_dftdf(rawdf)
for qmc in ['vmc','dmc']:
qmcdf = unpack(rawdf[qmc])
if 'energy' in qmcdf.columns:
qmcdf = qmcdf.join(
unpack(qmcdf['energy'].dropna()).applymap(dp.undict)
)
qmcdf = qmcdf\
.rename(columns={'value':"%s_energy"%qmc,'error':"%s_energy_err"%qmc})\
.drop('energy',axis=1)
# FIXME some bug in reading the jastrow and optimizer, not sure where it's coming from.
qmcdf.loc[qmcdf['jastrow'].isnull(),'jastrow']='twobody'
qmcdf.loc[qmcdf['optimizer'].isnull(),'optimizer']='energy'
alldf = alldf.join(qmcdf,lsuffix='',rsuffix='_new')
for col in alldf.columns:
if '_new' in col:
sel=alldf[col].notnull()
diff=alldf.loc[sel,col.replace('_new','')]!=alldf.loc[sel,col]
assert not any(diff),'''
Joined QMC data changed something. {}'''.format(alldf.loc[sel,[col,col+'_new']][diff])
#assert all(alldf.loc[sel,col.replace('_new','')]==alldf.loc[sel,col])
del alldf[col]
if "%s_energy"%qmc in qmcdf.columns:
alldf["%s_energy"%qmc] = alldf["%s_energy"%qmc]
alldf["%s_energy_err"%qmc] = alldf["%s_energy_err"%qmc]
listcols = [
'broyden',
'initial_charges',
'energy_trace',
'initial_spin',
'kmesh',
'levshift',
# 'localization',
# 'timestep',
# 'jastrow',
# 'optimizer'
]
alldf=alldf[alldf['id'].apply(filterfunc)]
if 'mag_moments' in alldf.columns: listcols.append('mag_moments')
# Convert lists.
for col in listcols:
if col in alldf.columns:
alldf.loc[alldf[col].notnull(),col] = \
alldf.loc[alldf[col].notnull(),col].apply(lambda x:tuple(x))
for col in alldf.columns:
alldf[col] = pd.to_numeric(alldf[col],errors='ignore')
if 'cif' in alldf.keys():
alldf = alldf.join(
alldf.loc[alldf['cif']!="None",'cif'].apply(extract_struct),
rsuffix="_extract"
)
return alldf
def cast_supercell(sup):
for rix,row in enumerate(sup):
sup[rix] = tuple(row)
return tuple(sup)
def make_basis_consistent(row):
if type(row['basis'])==dict:
return row['basis']
atoms=list(row['initial_charges'].keys())
return dict(zip(atoms,[row['basis'] for a in atoms]))
def _format_dftdf(rawdf):
def desect_basis(basis_info):
if type(basis_info)==list:
return pd.Series(dict(zip(
['basis_lowest','basis_number','basis_factor'],basis_info)))
# This part of the method is for the old basis part.
elif type(basis_info)==dict:
min_basis = 1e10
for atom in basis_info.keys():
new = min([np.array(elem['coefs'])[0,:].min() for elem in basis_info[atom]])
if new < min_basis: min_basis = new
return pd.Series(dict(zip(
['basis_lowest','basis_number','basis_factor'],[min_basis,0,0])))
# For now taking the best part of each atom so it works.
# This is the case of split basis, which I determined is not that useful.
# Not sure if this is the best behavior.
#elif type(basis_info)==dict:
# min_basis = min((basis[0] for atom,basis in basis_info.items()))
# max_factor = max((basis[1] for atom,basis in basis_info.items()))
# max_number = max((basis[2] for atom,basis in basis_info.items()))
# return pd.Series(dict(zip(
# ['basis_lowest','basis_number','basis_factor'],[min_basis,max_factor,max_number])))
else:
return pd.Series(dict(zip(
['basis_lowest','basis_number','basis_factor'],[0,0,0])))
def hashable_basis(basis_info):
if type(basis_info)==dict:
atoms=sorted(basis_info.keys())
return tuple(zip(atoms,(tuple(basis_info[a]) for a in atoms)))
else:
return tuple(basis_info)
ids = rawdf['control'].apply(lambda x:x['id'])
dftdf = unpack(rawdf['dft'])
dftdf = dftdf.join(ids).rename(columns={'control':'id'})
copylist = ['supercell','ncell','cif','xyz','a','c','se_height','pressure','ordering','total_spin']
for rawinfo in copylist:
if rawinfo in rawdf.columns:
dftdf = dftdf.join(rawdf[rawinfo])
funcdf = pd.DataFrame(dftdf['functional'].to_dict()).T
dftdf = dftdf.join(funcdf)
dftdf['tolinteg'] = dftdf['tolinteg'].apply(lambda x:x[0])
dftdf['spins_consistent'] = dftdf['spins_consistent'].astype(bool)
dftdf = dftdf.join(dftdf['basis'].apply(desect_basis))
#dftdf['basis']=dftdf.apply(make_basis_consistent,axis=1)
#dftdf['basis'] = dftdf['basis'].apply(hashable_basis)
dftdf['basis_number'] = dftdf['basis_number'].astype(int)
dftdf.loc[dftdf['supercell'].notnull(),'supercell'] = \
dftdf.loc[dftdf['supercell'].notnull(),'supercell']\
.apply(lambda x:cast_supercell(x))
dftdf.loc[dftdf['levshift'].isnull(),'levshift']=\
dftdf.loc[dftdf['levshift'].isnull(),'levshift']\
.apply(lambda x:(0.0,0))
dftdf['levshift_shift']=dftdf['levshift'].apply(lambda x: x[0])
if 'mag_moments' in dftdf.columns:
dftdf['max_mag_moment'] = np.nan
dftdf.loc[dftdf['mag_moments'].notnull(),'max_mag_moment'] =\
dftdf.loc[dftdf['mag_moments'].notnull(),'mag_moments'].apply(lambda x:
max(abs(np.array(x)))
)
dftdf['dft_energy'] = dftdf['total_energy']
dftdf=dftdf.drop(['functional'],axis=1)
return dftdf
###############################################################################
# Misc. tools.
def unpack(ser):
""" Attempt to turn a series of dictionaries into a new DataFrame.
Works with most autogen levels of data storage. """
return pd.DataFrame(ser.to_dict()).T
# Tuple to DF entry.
def parse_err(df,key='energy'):
tmpdf = df[key].apply(lambda x: pd.Series({key:x[0],'energy_err':x[1]}))
del df[key]
return df.join(tmpdf)
# Get out atomic positions (and possibly more later).
# Pretty slow: can be made faster by saving cifs that are already done.
def extract_struct(cifstr):
parser = CifParser.from_string(cifstr)\
.get_structures()[0]\
.as_dict()
lat_a = parser['lattice']['a']
lat_b = parser['lattice']['b']
lat_c = parser['lattice']['c']
poss = [
tuple(site['abc']) for site in
parser['sites']
]
ions = [
site['species'][0]['element'] for site in
parser['sites']
]
positions = {}
for iidx,ion in enumerate(ions):
if ion in positions.keys():
positions[ion].append(poss[iidx])
else:
positions[ion] = [poss[iidx]]
for key in positions.keys():
positions[key] = np.array(positions[key])
return pd.Series(
[lat_a,lat_b,lat_c,positions],
['a','b','c','positions']
)
def match(df,cond,keys):
match_this = df.loc[cond,keys]
if len(match_this)>1:
print("Multiple rows match:")
print(match_this)
raise AssertionError("Row match not unique")
match_this = match_this.iloc[0].values
return df.set_index(keys).xs(match_this,level=keys).reset_index()
def find_duplicates(df,def_cols):
#TODO hard to compare arrays with NANs correctly.
duped=df[def_cols]
clean=df.drop_duplicates()
duped.drop(clean.index)
##############################################################################
# Testing.
if __name__=='__main__':
datajson=process_record(json.load(open('exampledata/fese_mags_0.record.json','r')))
print(datajson['dmc']['fluct'].keys())
| gpl-2.0 |
imito/odin | examples/nist_sre/analyze.py | 1 | 6119 | from __future__ import print_function, division, absolute_import
import os
os.environ['ODIN'] = 'gpu,float32'
import shutil
from collections import defaultdict
import numpy as np
import tensorflow as tf
from odin import fuel as F
from odin import nnet as N, backend as K
from odin.utils import (ctext, mpi, Progbar, catch_warnings_ignore, stdio,
get_logpath)
from sklearn.metrics import accuracy_score, log_loss, f1_score
from helpers import (FEATURE_RECIPE, FEATURE_NAME, PATH_ACOUSTIC_FEATURES,
MINIMUM_UTT_DURATION, ANALYSIS_DIR, EXP_DIR,
filter_utterances, prepare_dnn_data)
MODEL_ID = 'xvec_mfccmusanrirs.mfcc.5_pad_5_8.fisher_swb_voxceleb1_voxceleb2'
MODEL_ID = 'xvec_mfccmusanrirs.mfcc.5_pad_5_8.fisher_voxceleb1_voxceleb2'
MODEL_ID = 'xvec_mfccmusanrirs.mfcc.5_pad_5_8.fisher_sre10_swb_voxceleb1_voxceleb2'
info = MODEL_ID.split('.')
feat_name = info[1]
utt_length, seq_mode, min_dur, min_utt = info[2].split('_')
exclude_datasets = info[-1].split('_')
# ====== base dir ====== #
BASE_DIR = os.path.join(EXP_DIR, MODEL_ID)
assert FEATURE_RECIPE.replace('_', '') in os.path.basename(BASE_DIR)
assert FEATURE_NAME in os.path.basename(BASE_DIR)
# ====== get the last model ====== #
all_model = sorted([name
for name in os.listdir(BASE_DIR)
if 'model.ai.' in name],
key=lambda x: int(x.split('.')[-1]))
assert len(all_model) > 0, "Cannot find any model.ai. at path: %s" % BASE_DIR
MODEL = os.path.join(BASE_DIR, all_model[-1])
# ====== prepare log ====== #
stdio(get_logpath(name="analyze.log", increasing=True,
odin_base=False, root=ANALYSIS_DIR))
print(ctext(BASE_DIR, 'lightyellow'))
print(ctext(MODEL, 'lightyellow'))
print("Feature name:", ctext(feat_name, 'lightyellow'))
print("Utt length :", ctext(utt_length, 'lightyellow'))
print("Seq mode :", ctext(seq_mode, 'lightyellow'))
print("Min Duration:", ctext(min_dur, 'lightyellow'))
print("Min #Utt :", ctext(min_utt, 'lightyellow'))
print("Excluded :", ctext(exclude_datasets, 'lightyellow'))
# ===========================================================================
# Load the data
# ===========================================================================
train, valid, all_speakers, ds = prepare_dnn_data(save_dir=BASE_DIR,
feat_name=feat_name, utt_length=int(utt_length), seq_mode=str(seq_mode),
min_dur=int(min_dur), min_utt=int(min_utt),
exclude=exclude_datasets, train_proportion=0.5,
return_dataset=True)
print(ds)
label2spk = {i: spk for i, spk in enumerate(all_speakers)}
labels = np.arange(len(all_speakers))
X = ds[FEATURE_NAME]
indices = ds['indices_%s' % FEATURE_NAME]
spkid = ds['spkid']
# ===========================================================================
# Load the model
# ===========================================================================
# ====== load the network ====== #
x_vec = N.deserialize(path=MODEL, force_restore_vars=True)
# ====== get output tensors ====== #
y_logit = x_vec()
y_proba = tf.nn.softmax(y_logit)
X = K.ComputationGraph(y_proba).placeholders[0]
z = K.ComputationGraph(y_proba).get(roles=N.Dense, scope='LatentOutput',
beginning_scope=False)[0]
f_prob = K.function(inputs=X, outputs=y_proba, training=False)
f_z = K.function(inputs=X, outputs=z, training=False)
print('Inputs:', ctext(X, 'cyan'))
print('Predic:', ctext(y_proba, 'cyan'))
print('Latent:', ctext(z, 'cyan'))
# ===========================================================================
# Helper
# ===========================================================================
def evaluate_prediction(name_list, y_pred, y_true, title):
def _report(y_p, y_t, pad=''):
with catch_warnings_ignore(Warning):
z_ = np.concatenate(y_p, axis=0)
z = np.concatenate(y_t, axis=0)
print(pad, '*** %s ***' % ctext('Frame-level', 'lightcyan'))
print(pad, "#Samples:", ctext(len(z), 'cyan'))
print(pad, "Log loss:", log_loss(y_true=z, y_pred=z_, labels=labels))
print(pad, "Accuracy:", accuracy_score(y_true=z, y_pred=np.argmax(z_, axis=-1)))
z_ = np.concatenate([np.mean(i, axis=0, keepdims=True) for i in y_p],
axis=0)
z = np.array([i[0] for i in y_t])
print(pad, '*** %s ***' % ctext('Utterance-level', 'lightcyan'))
print(pad, "#Samples:", ctext(len(z), 'cyan'))
print(pad, "Log loss:", log_loss(y_true=z, y_pred=z_, labels=labels))
print(pad, "Accuracy:", accuracy_score(y_true=z, y_pred=np.argmax(z_, axis=-1)))
datasets_2_samples = defaultdict(list)
for name, y_p, y_t in zip(name_list, y_pred, y_true):
dsname = ds['dsname'][name]
datasets_2_samples[dsname].append((name, y_p, y_t))
print('=' * 12, ctext(title, 'lightyellow'), '=' * 12)
_report(y_p=y_pred, y_t=y_true)
for dsname, data in sorted(datasets_2_samples.items(),
key=lambda x: x[0]):
print(ctext(dsname, 'yellow'), ':')
y_pred = [i[1] for i in data]
y_true = [i[2] for i in data]
_report(y_p=y_pred, y_t=y_true, pad=' ')
# ===========================================================================
# make prediction
# ===========================================================================
def make_prediction(feeder, title):
prog = Progbar(target=len(feeder), print_summary=True, name=title)
name_list = []
y_pred = []
y_true = []
for name, idx, X, y in feeder.set_batch(batch_size=100000,
batch_mode='file',
seed=None, shuffle_level=0):
name_list.append(name)
y = np.argmax(y, axis=-1)
assert len(np.unique(y)) == 1, name
spk = label2spk[y[0]]
assert spkid[name] == spk, name
y_true.append(y)
y_ = f_prob(X)
y_pred.append(y_)
assert len(y) == len(y_)
prog.add(X.shape[0])
evaluate_prediction(name_list, y_pred, y_true, title=title)
# ====== do it ====== #
make_prediction(train, title="Train Data")
make_prediction(valid, title="Valid Data")
| mit |
GuessWhoSamFoo/pandas | pandas/core/indexes/interval.py | 2 | 45940 | """ define the IntervalIndex """
import textwrap
import warnings
import numpy as np
from pandas._libs import Timedelta, Timestamp
from pandas._libs.interval import Interval, IntervalMixin, IntervalTree
from pandas.compat import add_metaclass
from pandas.util._decorators import Appender, cache_readonly
from pandas.util._doctools import _WritableDoc
from pandas.util._exceptions import rewrite_exception
from pandas.core.dtypes.cast import (
find_common_type, infer_dtype_from_scalar, maybe_downcast_to_dtype)
from pandas.core.dtypes.common import (
ensure_platform_int, is_datetime64tz_dtype, is_datetime_or_timedelta_dtype,
is_dtype_equal, is_float, is_float_dtype, is_integer, is_integer_dtype,
is_interval_dtype, is_list_like, is_number, is_object_dtype, is_scalar)
from pandas.core.dtypes.missing import isna
from pandas.core.arrays.interval import IntervalArray, _interval_shared_docs
import pandas.core.common as com
from pandas.core.config import get_option
import pandas.core.indexes.base as ibase
from pandas.core.indexes.base import (
Index, _index_shared_docs, default_pprint, ensure_index)
from pandas.core.indexes.datetimes import DatetimeIndex, date_range
from pandas.core.indexes.multi import MultiIndex
from pandas.core.indexes.timedeltas import TimedeltaIndex, timedelta_range
from pandas.core.ops import get_op_result_name
from pandas.tseries.frequencies import to_offset
from pandas.tseries.offsets import DateOffset
_VALID_CLOSED = {'left', 'right', 'both', 'neither'}
_index_doc_kwargs = dict(ibase._index_doc_kwargs)
_index_doc_kwargs.update(
dict(klass='IntervalIndex',
qualname="IntervalIndex",
target_klass='IntervalIndex or list of Intervals',
name=textwrap.dedent("""\
name : object, optional
Name to be stored in the index.
"""),
))
def _get_next_label(label):
dtype = getattr(label, 'dtype', type(label))
if isinstance(label, (Timestamp, Timedelta)):
dtype = 'datetime64'
if is_datetime_or_timedelta_dtype(dtype) or is_datetime64tz_dtype(dtype):
return label + np.timedelta64(1, 'ns')
elif is_integer_dtype(dtype):
return label + 1
elif is_float_dtype(dtype):
return np.nextafter(label, np.infty)
else:
raise TypeError('cannot determine next label for type {typ!r}'
.format(typ=type(label)))
def _get_prev_label(label):
dtype = getattr(label, 'dtype', type(label))
if isinstance(label, (Timestamp, Timedelta)):
dtype = 'datetime64'
if is_datetime_or_timedelta_dtype(dtype) or is_datetime64tz_dtype(dtype):
return label - np.timedelta64(1, 'ns')
elif is_integer_dtype(dtype):
return label - 1
elif is_float_dtype(dtype):
return np.nextafter(label, -np.infty)
else:
raise TypeError('cannot determine next label for type {typ!r}'
.format(typ=type(label)))
def _get_interval_closed_bounds(interval):
"""
Given an Interval or IntervalIndex, return the corresponding interval with
closed bounds.
"""
left, right = interval.left, interval.right
if interval.open_left:
left = _get_next_label(left)
if interval.open_right:
right = _get_prev_label(right)
return left, right
def _new_IntervalIndex(cls, d):
"""
This is called upon unpickling, rather than the default which doesn't have
arguments and breaks __new__
"""
return cls.from_arrays(**d)
@Appender(_interval_shared_docs['class'] % dict(
klass="IntervalIndex",
summary="Immutable index of intervals that are closed on the same side.",
name=_index_doc_kwargs['name'],
versionadded="0.20.0",
extra_attributes="is_overlapping\nvalues\n",
extra_methods="contains\n",
examples=textwrap.dedent("""\
Examples
--------
A new ``IntervalIndex`` is typically constructed using
:func:`interval_range`:
>>> pd.interval_range(start=0, end=5)
IntervalIndex([(0, 1], (1, 2], (2, 3], (3, 4], (4, 5]],
closed='right',
dtype='interval[int64]')
It may also be constructed using one of the constructor
methods: :meth:`IntervalIndex.from_arrays`,
:meth:`IntervalIndex.from_breaks`, and :meth:`IntervalIndex.from_tuples`.
See further examples in the doc strings of ``interval_range`` and the
mentioned constructor methods.
"""),
))
@add_metaclass(_WritableDoc)
class IntervalIndex(IntervalMixin, Index):
_typ = 'intervalindex'
_comparables = ['name']
_attributes = ['name', 'closed']
# we would like our indexing holder to defer to us
_defer_to_indexing = True
# Immutable, so we are able to cache computations like isna in '_mask'
_mask = None
# --------------------------------------------------------------------
# Constructors
def __new__(cls, data, closed=None, dtype=None, copy=False,
name=None, verify_integrity=True):
if name is None and hasattr(data, 'name'):
name = data.name
with rewrite_exception("IntervalArray", cls.__name__):
array = IntervalArray(data, closed=closed, copy=copy, dtype=dtype,
verify_integrity=verify_integrity)
return cls._simple_new(array, name)
@classmethod
def _simple_new(cls, array, name, closed=None):
"""
Construct from an IntervalArray
Parameters
----------
array : IntervalArray
name : str
Attached as result.name
closed : Any
Ignored.
"""
result = IntervalMixin.__new__(cls)
result._data = array
result.name = name
result._reset_identity()
return result
@classmethod
@Appender(_interval_shared_docs['from_breaks'] % _index_doc_kwargs)
def from_breaks(cls, breaks, closed='right', name=None, copy=False,
dtype=None):
with rewrite_exception("IntervalArray", cls.__name__):
array = IntervalArray.from_breaks(breaks, closed=closed, copy=copy,
dtype=dtype)
return cls._simple_new(array, name=name)
@classmethod
@Appender(_interval_shared_docs['from_arrays'] % _index_doc_kwargs)
def from_arrays(cls, left, right, closed='right', name=None, copy=False,
dtype=None):
with rewrite_exception("IntervalArray", cls.__name__):
array = IntervalArray.from_arrays(left, right, closed, copy=copy,
dtype=dtype)
return cls._simple_new(array, name=name)
@classmethod
@Appender(_interval_shared_docs['from_intervals'] % _index_doc_kwargs)
def from_intervals(cls, data, closed=None, name=None, copy=False,
dtype=None):
msg = ('IntervalIndex.from_intervals is deprecated and will be '
'removed in a future version; Use IntervalIndex(...) instead')
warnings.warn(msg, FutureWarning, stacklevel=2)
with rewrite_exception("IntervalArray", cls.__name__):
array = IntervalArray(data, closed=closed, copy=copy, dtype=dtype)
if name is None and isinstance(data, cls):
name = data.name
return cls._simple_new(array, name=name)
@classmethod
@Appender(_interval_shared_docs['from_tuples'] % _index_doc_kwargs)
def from_tuples(cls, data, closed='right', name=None, copy=False,
dtype=None):
with rewrite_exception("IntervalArray", cls.__name__):
arr = IntervalArray.from_tuples(data, closed=closed, copy=copy,
dtype=dtype)
return cls._simple_new(arr, name=name)
# --------------------------------------------------------------------
@Appender(_index_shared_docs['_shallow_copy'])
def _shallow_copy(self, left=None, right=None, **kwargs):
result = self._data._shallow_copy(left=left, right=right)
attributes = self._get_attributes_dict()
attributes.update(kwargs)
return self._simple_new(result, **attributes)
@cache_readonly
def _isnan(self):
"""Return a mask indicating if each value is NA"""
if self._mask is None:
self._mask = isna(self.left)
return self._mask
@cache_readonly
def _engine(self):
left = self._maybe_convert_i8(self.left)
right = self._maybe_convert_i8(self.right)
return IntervalTree(left, right, closed=self.closed)
def __contains__(self, key):
"""
return a boolean if this key is IN the index
We *only* accept an Interval
Parameters
----------
key : Interval
Returns
-------
boolean
"""
if not isinstance(key, Interval):
return False
try:
self.get_loc(key)
return True
except KeyError:
return False
def contains(self, key):
"""
Return a boolean indicating if the key is IN the index
We accept / allow keys to be not *just* actual
objects.
Parameters
----------
key : int, float, Interval
Returns
-------
boolean
"""
try:
self.get_loc(key)
return True
except KeyError:
return False
@Appender(_interval_shared_docs['to_tuples'] % dict(
return_type="Index",
examples="""
Examples
--------
>>> idx = pd.IntervalIndex.from_arrays([0, np.nan, 2], [1, np.nan, 3])
>>> idx.to_tuples()
Index([(0.0, 1.0), (nan, nan), (2.0, 3.0)], dtype='object')
>>> idx.to_tuples(na_tuple=False)
Index([(0.0, 1.0), nan, (2.0, 3.0)], dtype='object')""",
))
def to_tuples(self, na_tuple=True):
tuples = self._data.to_tuples(na_tuple=na_tuple)
return Index(tuples)
@cache_readonly
def _multiindex(self):
return MultiIndex.from_arrays([self.left, self.right],
names=['left', 'right'])
@property
def left(self):
"""
Return the left endpoints of each Interval in the IntervalIndex as
an Index
"""
return self._data._left
@property
def right(self):
"""
Return the right endpoints of each Interval in the IntervalIndex as
an Index
"""
return self._data._right
@property
def closed(self):
"""
Whether the intervals are closed on the left-side, right-side, both or
neither
"""
return self._data._closed
@Appender(_interval_shared_docs['set_closed'] % _index_doc_kwargs)
def set_closed(self, closed):
if closed not in _VALID_CLOSED:
msg = "invalid option for 'closed': {closed}"
raise ValueError(msg.format(closed=closed))
# return self._shallow_copy(closed=closed)
array = self._data.set_closed(closed)
return self._simple_new(array, self.name)
@property
def length(self):
"""
Return an Index with entries denoting the length of each Interval in
the IntervalIndex
"""
return self._data.length
@property
def size(self):
# Avoid materializing ndarray[Interval]
return self._data.size
@property
def shape(self):
# Avoid materializing ndarray[Interval]
return self._data.shape
@property
def itemsize(self):
msg = ('IntervalIndex.itemsize is deprecated and will be removed in '
'a future version')
warnings.warn(msg, FutureWarning, stacklevel=2)
# supress the warning from the underlying left/right itemsize
with warnings.catch_warnings():
warnings.simplefilter('ignore')
return self.left.itemsize + self.right.itemsize
def __len__(self):
return len(self.left)
@cache_readonly
def values(self):
"""
Return the IntervalIndex's data as an IntervalArray.
"""
return self._data
@cache_readonly
def _values(self):
return self._data
@cache_readonly
def _ndarray_values(self):
return np.array(self._data)
def __array__(self, result=None):
""" the array interface, return my values """
return self._ndarray_values
def __array_wrap__(self, result, context=None):
# we don't want the superclass implementation
return result
def __reduce__(self):
d = dict(left=self.left,
right=self.right)
d.update(self._get_attributes_dict())
return _new_IntervalIndex, (self.__class__, d), None
@Appender(_index_shared_docs['copy'])
def copy(self, deep=False, name=None):
array = self._data.copy(deep=deep)
attributes = self._get_attributes_dict()
if name is not None:
attributes.update(name=name)
return self._simple_new(array, **attributes)
@Appender(_index_shared_docs['astype'])
def astype(self, dtype, copy=True):
with rewrite_exception('IntervalArray', self.__class__.__name__):
new_values = self.values.astype(dtype, copy=copy)
if is_interval_dtype(new_values):
return self._shallow_copy(new_values.left, new_values.right)
return super(IntervalIndex, self).astype(dtype, copy=copy)
@cache_readonly
def dtype(self):
"""Return the dtype object of the underlying data"""
return self._data.dtype
@property
def inferred_type(self):
"""Return a string of the type inferred from the values"""
return 'interval'
@Appender(Index.memory_usage.__doc__)
def memory_usage(self, deep=False):
# we don't use an explicit engine
# so return the bytes here
return (self.left.memory_usage(deep=deep) +
self.right.memory_usage(deep=deep))
@cache_readonly
def mid(self):
"""
Return the midpoint of each Interval in the IntervalIndex as an Index
"""
return self._data.mid
@cache_readonly
def is_monotonic(self):
"""
Return True if the IntervalIndex is monotonic increasing (only equal or
increasing values), else False
"""
return self._multiindex.is_monotonic
@cache_readonly
def is_monotonic_increasing(self):
"""
Return True if the IntervalIndex is monotonic increasing (only equal or
increasing values), else False
"""
return self._multiindex.is_monotonic_increasing
@cache_readonly
def is_monotonic_decreasing(self):
"""
Return True if the IntervalIndex is monotonic decreasing (only equal or
decreasing values), else False
"""
return self._multiindex.is_monotonic_decreasing
@cache_readonly
def is_unique(self):
"""
Return True if the IntervalIndex contains unique elements, else False
"""
return self._multiindex.is_unique
@cache_readonly
@Appender(_interval_shared_docs['is_non_overlapping_monotonic']
% _index_doc_kwargs)
def is_non_overlapping_monotonic(self):
return self._data.is_non_overlapping_monotonic
@property
def is_overlapping(self):
"""
Return True if the IntervalIndex has overlapping intervals, else False.
Two intervals overlap if they share a common point, including closed
endpoints. Intervals that only have an open endpoint in common do not
overlap.
.. versionadded:: 0.24.0
Returns
-------
bool
Boolean indicating if the IntervalIndex has overlapping intervals.
See Also
--------
Interval.overlaps : Check whether two Interval objects overlap.
IntervalIndex.overlaps : Check an IntervalIndex elementwise for
overlaps.
Examples
--------
>>> index = pd.IntervalIndex.from_tuples([(0, 2), (1, 3), (4, 5)])
>>> index
IntervalIndex([(0, 2], (1, 3], (4, 5]],
closed='right',
dtype='interval[int64]')
>>> index.is_overlapping
True
Intervals that share closed endpoints overlap:
>>> index = pd.interval_range(0, 3, closed='both')
>>> index
IntervalIndex([[0, 1], [1, 2], [2, 3]],
closed='both',
dtype='interval[int64]')
>>> index.is_overlapping
True
Intervals that only have an open endpoint in common do not overlap:
>>> index = pd.interval_range(0, 3, closed='left')
>>> index
IntervalIndex([[0, 1), [1, 2), [2, 3)],
closed='left',
dtype='interval[int64]')
>>> index.is_overlapping
False
"""
# GH 23309
return self._engine.is_overlapping
@Appender(_index_shared_docs['_convert_scalar_indexer'])
def _convert_scalar_indexer(self, key, kind=None):
if kind == 'iloc':
return super(IntervalIndex, self)._convert_scalar_indexer(
key, kind=kind)
return key
def _maybe_cast_slice_bound(self, label, side, kind):
return getattr(self, side)._maybe_cast_slice_bound(label, side, kind)
@Appender(_index_shared_docs['_convert_list_indexer'])
def _convert_list_indexer(self, keyarr, kind=None):
"""
we are passed a list-like indexer. Return the
indexer for matching intervals.
"""
locs = self.get_indexer_for(keyarr)
# we have missing values
if (locs == -1).any():
raise KeyError
return locs
def _maybe_cast_indexed(self, key):
"""
we need to cast the key, which could be a scalar
or an array-like to the type of our subtype
"""
if isinstance(key, IntervalIndex):
return key
subtype = self.dtype.subtype
if is_float_dtype(subtype):
if is_integer(key):
key = float(key)
elif isinstance(key, (np.ndarray, Index)):
key = key.astype('float64')
elif is_integer_dtype(subtype):
if is_integer(key):
key = int(key)
return key
def _needs_i8_conversion(self, key):
"""
Check if a given key needs i8 conversion. Conversion is necessary for
Timestamp, Timedelta, DatetimeIndex, and TimedeltaIndex keys. An
Interval-like requires conversion if it's endpoints are one of the
aforementioned types.
Assumes that any list-like data has already been cast to an Index.
Parameters
----------
key : scalar or Index-like
The key that should be checked for i8 conversion
Returns
-------
boolean
"""
if is_interval_dtype(key) or isinstance(key, Interval):
return self._needs_i8_conversion(key.left)
i8_types = (Timestamp, Timedelta, DatetimeIndex, TimedeltaIndex)
return isinstance(key, i8_types)
def _maybe_convert_i8(self, key):
"""
Maybe convert a given key to it's equivalent i8 value(s). Used as a
preprocessing step prior to IntervalTree queries (self._engine), which
expects numeric data.
Parameters
----------
key : scalar or list-like
The key that should maybe be converted to i8.
Returns
-------
key: scalar or list-like
The original key if no conversion occured, int if converted scalar,
Int64Index if converted list-like.
"""
original = key
if is_list_like(key):
key = ensure_index(key)
if not self._needs_i8_conversion(key):
return original
scalar = is_scalar(key)
if is_interval_dtype(key) or isinstance(key, Interval):
# convert left/right and reconstruct
left = self._maybe_convert_i8(key.left)
right = self._maybe_convert_i8(key.right)
constructor = Interval if scalar else IntervalIndex.from_arrays
return constructor(left, right, closed=self.closed)
if scalar:
# Timestamp/Timedelta
key_dtype, key_i8 = infer_dtype_from_scalar(key, pandas_dtype=True)
else:
# DatetimeIndex/TimedeltaIndex
key_dtype, key_i8 = key.dtype, Index(key.asi8)
if key.hasnans:
# convert NaT from it's i8 value to np.nan so it's not viewed
# as a valid value, maybe causing errors (e.g. is_overlapping)
key_i8 = key_i8.where(~key._isnan)
# ensure consistency with IntervalIndex subtype
subtype = self.dtype.subtype
msg = ('Cannot index an IntervalIndex of subtype {subtype} with '
'values of dtype {other}')
if not is_dtype_equal(subtype, key_dtype):
raise ValueError(msg.format(subtype=subtype, other=key_dtype))
return key_i8
def _check_method(self, method):
if method is None:
return
if method in ['bfill', 'backfill', 'pad', 'ffill', 'nearest']:
msg = 'method {method} not yet implemented for IntervalIndex'
raise NotImplementedError(msg.format(method=method))
raise ValueError("Invalid fill method")
def _searchsorted_monotonic(self, label, side, exclude_label=False):
if not self.is_non_overlapping_monotonic:
raise KeyError('can only get slices from an IntervalIndex if '
'bounds are non-overlapping and all monotonic '
'increasing or decreasing')
if isinstance(label, IntervalMixin):
raise NotImplementedError
# GH 20921: "not is_monotonic_increasing" for the second condition
# instead of "is_monotonic_decreasing" to account for single element
# indexes being both increasing and decreasing
if ((side == 'left' and self.left.is_monotonic_increasing) or
(side == 'right' and not self.left.is_monotonic_increasing)):
sub_idx = self.right
if self.open_right or exclude_label:
label = _get_next_label(label)
else:
sub_idx = self.left
if self.open_left or exclude_label:
label = _get_prev_label(label)
return sub_idx._searchsorted_monotonic(label, side)
def _get_loc_only_exact_matches(self, key):
if isinstance(key, Interval):
if not self.is_unique:
raise ValueError("cannot index with a slice Interval"
" and a non-unique index")
# TODO: this expands to a tuple index, see if we can
# do better
return Index(self._multiindex.values).get_loc(key)
raise KeyError
def _find_non_overlapping_monotonic_bounds(self, key):
if isinstance(key, IntervalMixin):
start = self._searchsorted_monotonic(
key.left, 'left', exclude_label=key.open_left)
stop = self._searchsorted_monotonic(
key.right, 'right', exclude_label=key.open_right)
elif isinstance(key, slice):
# slice
start, stop = key.start, key.stop
if (key.step or 1) != 1:
raise NotImplementedError("cannot slice with a slice step")
if start is None:
start = 0
else:
start = self._searchsorted_monotonic(start, 'left')
if stop is None:
stop = len(self)
else:
stop = self._searchsorted_monotonic(stop, 'right')
else:
# scalar or index-like
start = self._searchsorted_monotonic(key, 'left')
stop = self._searchsorted_monotonic(key, 'right')
return start, stop
def get_loc(self, key, method=None):
"""Get integer location, slice or boolean mask for requested label.
Parameters
----------
key : label
method : {None}, optional
* default: matches where the label is within an interval only.
Returns
-------
loc : int if unique index, slice if monotonic index, else mask
Examples
---------
>>> i1, i2 = pd.Interval(0, 1), pd.Interval(1, 2)
>>> index = pd.IntervalIndex([i1, i2])
>>> index.get_loc(1)
0
You can also supply an interval or an location for a point inside an
interval.
>>> index.get_loc(pd.Interval(0, 2))
array([0, 1], dtype=int64)
>>> index.get_loc(1.5)
1
If a label is in several intervals, you get the locations of all the
relevant intervals.
>>> i3 = pd.Interval(0, 2)
>>> overlapping_index = pd.IntervalIndex([i2, i3])
>>> overlapping_index.get_loc(1.5)
array([0, 1], dtype=int64)
"""
self._check_method(method)
original_key = key
key = self._maybe_cast_indexed(key)
if self.is_non_overlapping_monotonic:
if isinstance(key, Interval):
left = self._maybe_cast_slice_bound(key.left, 'left', None)
right = self._maybe_cast_slice_bound(key.right, 'right', None)
key = Interval(left, right, key.closed)
else:
key = self._maybe_cast_slice_bound(key, 'left', None)
start, stop = self._find_non_overlapping_monotonic_bounds(key)
if start is None or stop is None:
return slice(start, stop)
elif start + 1 == stop:
return start
elif start < stop:
return slice(start, stop)
else:
raise KeyError(original_key)
else:
# use the interval tree
key = self._maybe_convert_i8(key)
if isinstance(key, Interval):
left, right = _get_interval_closed_bounds(key)
return self._engine.get_loc_interval(left, right)
else:
return self._engine.get_loc(key)
def get_value(self, series, key):
if com.is_bool_indexer(key):
loc = key
elif is_list_like(key):
loc = self.get_indexer(key)
elif isinstance(key, slice):
if not (key.step is None or key.step == 1):
raise ValueError("cannot support not-default step in a slice")
try:
loc = self.get_loc(key)
except TypeError:
# we didn't find exact intervals or are non-unique
msg = "unable to slice with this key: {key}".format(key=key)
raise ValueError(msg)
else:
loc = self.get_loc(key)
return series.iloc[loc]
@Appender(_index_shared_docs['get_indexer'] % _index_doc_kwargs)
def get_indexer(self, target, method=None, limit=None, tolerance=None):
self._check_method(method)
target = ensure_index(target)
target = self._maybe_cast_indexed(target)
if self.equals(target):
return np.arange(len(self), dtype='intp')
if self.is_non_overlapping_monotonic:
start, stop = self._find_non_overlapping_monotonic_bounds(target)
start_plus_one = start + 1
if not ((start_plus_one < stop).any()):
return np.where(start_plus_one == stop, start, -1)
if not self.is_unique:
raise ValueError("cannot handle non-unique indices")
# IntervalIndex
if isinstance(target, IntervalIndex):
indexer = self._get_reindexer(target)
# non IntervalIndex
else:
indexer = np.concatenate([self.get_loc(i) for i in target])
return ensure_platform_int(indexer)
def _get_reindexer(self, target):
"""
Return an indexer for a target IntervalIndex with self
"""
# find the left and right indexers
left = self._maybe_convert_i8(target.left)
right = self._maybe_convert_i8(target.right)
lindexer = self._engine.get_indexer(left.values)
rindexer = self._engine.get_indexer(right.values)
# we want to return an indexer on the intervals
# however, our keys could provide overlapping of multiple
# intervals, so we iterate thru the indexers and construct
# a set of indexers
indexer = []
n = len(self)
for i, (lhs, rhs) in enumerate(zip(lindexer, rindexer)):
target_value = target[i]
# matching on the lhs bound
if (lhs != -1 and
self.closed == 'right' and
target_value.left == self[lhs].right):
lhs += 1
# matching on the lhs bound
if (rhs != -1 and
self.closed == 'left' and
target_value.right == self[rhs].left):
rhs -= 1
# not found
if lhs == -1 and rhs == -1:
indexer.append(np.array([-1]))
elif rhs == -1:
indexer.append(np.arange(lhs, n))
elif lhs == -1:
# care about left/right closed here
value = self[i]
# target.closed same as self.closed
if self.closed == target.closed:
if target_value.left < value.left:
indexer.append(np.array([-1]))
continue
# target.closed == 'left'
elif self.closed == 'right':
if target_value.left <= value.left:
indexer.append(np.array([-1]))
continue
# target.closed == 'right'
elif self.closed == 'left':
if target_value.left <= value.left:
indexer.append(np.array([-1]))
continue
indexer.append(np.arange(0, rhs + 1))
else:
indexer.append(np.arange(lhs, rhs + 1))
return np.concatenate(indexer)
@Appender(_index_shared_docs['get_indexer_non_unique'] % _index_doc_kwargs)
def get_indexer_non_unique(self, target):
target = self._maybe_cast_indexed(ensure_index(target))
return super(IntervalIndex, self).get_indexer_non_unique(target)
@Appender(_index_shared_docs['where'])
def where(self, cond, other=None):
if other is None:
other = self._na_value
values = np.where(cond, self.values, other)
return self._shallow_copy(values)
def delete(self, loc):
"""
Return a new IntervalIndex with passed location(-s) deleted
Returns
-------
new_index : IntervalIndex
"""
new_left = self.left.delete(loc)
new_right = self.right.delete(loc)
return self._shallow_copy(new_left, new_right)
def insert(self, loc, item):
"""
Return a new IntervalIndex inserting new item at location. Follows
Python list.append semantics for negative values. Only Interval
objects and NA can be inserted into an IntervalIndex
Parameters
----------
loc : int
item : object
Returns
-------
new_index : IntervalIndex
"""
if isinstance(item, Interval):
if item.closed != self.closed:
raise ValueError('inserted item must be closed on the same '
'side as the index')
left_insert = item.left
right_insert = item.right
elif is_scalar(item) and isna(item):
# GH 18295
left_insert = right_insert = item
else:
raise ValueError('can only insert Interval objects and NA into '
'an IntervalIndex')
new_left = self.left.insert(loc, left_insert)
new_right = self.right.insert(loc, right_insert)
return self._shallow_copy(new_left, new_right)
def _as_like_interval_index(self, other):
self._assert_can_do_setop(other)
other = ensure_index(other)
if not isinstance(other, IntervalIndex):
msg = ('the other index needs to be an IntervalIndex too, but '
'was type {}').format(other.__class__.__name__)
raise TypeError(msg)
elif self.closed != other.closed:
msg = ('can only do set operations between two IntervalIndex '
'objects that are closed on the same side')
raise ValueError(msg)
return other
def _concat_same_dtype(self, to_concat, name):
"""
assert that we all have the same .closed
we allow a 0-len index here as well
"""
if not len({i.closed for i in to_concat if len(i)}) == 1:
msg = ('can only append two IntervalIndex objects '
'that are closed on the same side')
raise ValueError(msg)
return super(IntervalIndex, self)._concat_same_dtype(to_concat, name)
@Appender(_index_shared_docs['take'] % _index_doc_kwargs)
def take(self, indices, axis=0, allow_fill=True,
fill_value=None, **kwargs):
result = self._data.take(indices, axis=axis, allow_fill=allow_fill,
fill_value=fill_value, **kwargs)
attributes = self._get_attributes_dict()
return self._simple_new(result, **attributes)
def __getitem__(self, value):
result = self._data[value]
if isinstance(result, IntervalArray):
return self._shallow_copy(result)
else:
# scalar
return result
# --------------------------------------------------------------------
# Rendering Methods
# __repr__ associated methods are based on MultiIndex
def _format_with_header(self, header, **kwargs):
return header + list(self._format_native_types(**kwargs))
def _format_native_types(self, na_rep='', quoting=None, **kwargs):
""" actually format my specific types """
from pandas.io.formats.format import ExtensionArrayFormatter
return ExtensionArrayFormatter(values=self,
na_rep=na_rep,
justify='all',
leading_space=False).get_result()
def _format_data(self, name=None):
# TODO: integrate with categorical and make generic
# name argument is unused here; just for compat with base / categorical
n = len(self)
max_seq_items = min((get_option(
'display.max_seq_items') or n) // 10, 10)
formatter = str
if n == 0:
summary = '[]'
elif n == 1:
first = formatter(self[0])
summary = '[{first}]'.format(first=first)
elif n == 2:
first = formatter(self[0])
last = formatter(self[-1])
summary = '[{first}, {last}]'.format(first=first, last=last)
else:
if n > max_seq_items:
n = min(max_seq_items // 2, 10)
head = [formatter(x) for x in self[:n]]
tail = [formatter(x) for x in self[-n:]]
summary = '[{head} ... {tail}]'.format(
head=', '.join(head), tail=', '.join(tail))
else:
tail = [formatter(x) for x in self]
summary = '[{tail}]'.format(tail=', '.join(tail))
return summary + ',' + self._format_space()
def _format_attrs(self):
attrs = [('closed', repr(self.closed))]
if self.name is not None:
attrs.append(('name', default_pprint(self.name)))
attrs.append(('dtype', "'{dtype}'".format(dtype=self.dtype)))
return attrs
def _format_space(self):
space = ' ' * (len(self.__class__.__name__) + 1)
return "\n{space}".format(space=space)
# --------------------------------------------------------------------
def argsort(self, *args, **kwargs):
return np.lexsort((self.right, self.left))
def equals(self, other):
"""
Determines if two IntervalIndex objects contain the same elements
"""
if self.is_(other):
return True
# if we can coerce to an II
# then we can compare
if not isinstance(other, IntervalIndex):
if not is_interval_dtype(other):
return False
other = Index(getattr(other, '.values', other))
return (self.left.equals(other.left) and
self.right.equals(other.right) and
self.closed == other.closed)
@Appender(_interval_shared_docs['overlaps'] % _index_doc_kwargs)
def overlaps(self, other):
return self._data.overlaps(other)
def _setop(op_name, sort=None):
def func(self, other, sort=sort):
other = self._as_like_interval_index(other)
# GH 19016: ensure set op will not return a prohibited dtype
subtypes = [self.dtype.subtype, other.dtype.subtype]
common_subtype = find_common_type(subtypes)
if is_object_dtype(common_subtype):
msg = ('can only do {op} between two IntervalIndex '
'objects that have compatible dtypes')
raise TypeError(msg.format(op=op_name))
result = getattr(self._multiindex, op_name)(other._multiindex,
sort=sort)
result_name = get_op_result_name(self, other)
# GH 19101: ensure empty results have correct dtype
if result.empty:
result = result.values.astype(self.dtype.subtype)
else:
result = result.values
return type(self).from_tuples(result, closed=self.closed,
name=result_name)
return func
@property
def is_all_dates(self):
"""
This is False even when left/right contain datetime-like objects,
as the check is done on the Interval itself
"""
return False
union = _setop('union')
intersection = _setop('intersection', sort=False)
difference = _setop('difference')
symmetric_difference = _setop('symmetric_difference')
# TODO: arithmetic operations
IntervalIndex._add_logical_methods_disabled()
def _is_valid_endpoint(endpoint):
"""helper for interval_range to check if start/end are valid types"""
return any([is_number(endpoint),
isinstance(endpoint, Timestamp),
isinstance(endpoint, Timedelta),
endpoint is None])
def _is_type_compatible(a, b):
"""helper for interval_range to check type compat of start/end/freq"""
is_ts_compat = lambda x: isinstance(x, (Timestamp, DateOffset))
is_td_compat = lambda x: isinstance(x, (Timedelta, DateOffset))
return ((is_number(a) and is_number(b)) or
(is_ts_compat(a) and is_ts_compat(b)) or
(is_td_compat(a) and is_td_compat(b)) or
com._any_none(a, b))
def interval_range(start=None, end=None, periods=None, freq=None,
name=None, closed='right'):
"""
Return a fixed frequency IntervalIndex
Parameters
----------
start : numeric or datetime-like, default None
Left bound for generating intervals
end : numeric or datetime-like, default None
Right bound for generating intervals
periods : integer, default None
Number of periods to generate
freq : numeric, string, or DateOffset, default None
The length of each interval. Must be consistent with the type of start
and end, e.g. 2 for numeric, or '5H' for datetime-like. Default is 1
for numeric and 'D' for datetime-like.
name : string, default None
Name of the resulting IntervalIndex
closed : {'left', 'right', 'both', 'neither'}, default 'right'
Whether the intervals are closed on the left-side, right-side, both
or neither.
Returns
-------
rng : IntervalIndex
See Also
--------
IntervalIndex : An Index of intervals that are all closed on the same side.
Notes
-----
Of the four parameters ``start``, ``end``, ``periods``, and ``freq``,
exactly three must be specified. If ``freq`` is omitted, the resulting
``IntervalIndex`` will have ``periods`` linearly spaced elements between
``start`` and ``end``, inclusively.
To learn more about datetime-like frequency strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.
Examples
--------
Numeric ``start`` and ``end`` is supported.
>>> pd.interval_range(start=0, end=5)
IntervalIndex([(0, 1], (1, 2], (2, 3], (3, 4], (4, 5]],
closed='right', dtype='interval[int64]')
Additionally, datetime-like input is also supported.
>>> pd.interval_range(start=pd.Timestamp('2017-01-01'),
... end=pd.Timestamp('2017-01-04'))
IntervalIndex([(2017-01-01, 2017-01-02], (2017-01-02, 2017-01-03],
(2017-01-03, 2017-01-04]],
closed='right', dtype='interval[datetime64[ns]]')
The ``freq`` parameter specifies the frequency between the left and right.
endpoints of the individual intervals within the ``IntervalIndex``. For
numeric ``start`` and ``end``, the frequency must also be numeric.
>>> pd.interval_range(start=0, periods=4, freq=1.5)
IntervalIndex([(0.0, 1.5], (1.5, 3.0], (3.0, 4.5], (4.5, 6.0]],
closed='right', dtype='interval[float64]')
Similarly, for datetime-like ``start`` and ``end``, the frequency must be
convertible to a DateOffset.
>>> pd.interval_range(start=pd.Timestamp('2017-01-01'),
... periods=3, freq='MS')
IntervalIndex([(2017-01-01, 2017-02-01], (2017-02-01, 2017-03-01],
(2017-03-01, 2017-04-01]],
closed='right', dtype='interval[datetime64[ns]]')
Specify ``start``, ``end``, and ``periods``; the frequency is generated
automatically (linearly spaced).
>>> pd.interval_range(start=0, end=6, periods=4)
IntervalIndex([(0.0, 1.5], (1.5, 3.0], (3.0, 4.5], (4.5, 6.0]],
closed='right',
dtype='interval[float64]')
The ``closed`` parameter specifies which endpoints of the individual
intervals within the ``IntervalIndex`` are closed.
>>> pd.interval_range(end=5, periods=4, closed='both')
IntervalIndex([[1, 2], [2, 3], [3, 4], [4, 5]],
closed='both', dtype='interval[int64]')
"""
start = com.maybe_box_datetimelike(start)
end = com.maybe_box_datetimelike(end)
endpoint = start if start is not None else end
if freq is None and com._any_none(periods, start, end):
freq = 1 if is_number(endpoint) else 'D'
if com.count_not_none(start, end, periods, freq) != 3:
raise ValueError('Of the four parameters: start, end, periods, and '
'freq, exactly three must be specified')
if not _is_valid_endpoint(start):
msg = 'start must be numeric or datetime-like, got {start}'
raise ValueError(msg.format(start=start))
elif not _is_valid_endpoint(end):
msg = 'end must be numeric or datetime-like, got {end}'
raise ValueError(msg.format(end=end))
if is_float(periods):
periods = int(periods)
elif not is_integer(periods) and periods is not None:
msg = 'periods must be a number, got {periods}'
raise TypeError(msg.format(periods=periods))
if freq is not None and not is_number(freq):
try:
freq = to_offset(freq)
except ValueError:
raise ValueError('freq must be numeric or convertible to '
'DateOffset, got {freq}'.format(freq=freq))
# verify type compatibility
if not all([_is_type_compatible(start, end),
_is_type_compatible(start, freq),
_is_type_compatible(end, freq)]):
raise TypeError("start, end, freq need to be type compatible")
# +1 to convert interval count to breaks count (n breaks = n-1 intervals)
if periods is not None:
periods += 1
if is_number(endpoint):
# force consistency between start/end/freq (lower end if freq skips it)
if com._all_not_none(start, end, freq):
end -= (end - start) % freq
# compute the period/start/end if unspecified (at most one)
if periods is None:
periods = int((end - start) // freq) + 1
elif start is None:
start = end - (periods - 1) * freq
elif end is None:
end = start + (periods - 1) * freq
breaks = np.linspace(start, end, periods)
if all(is_integer(x) for x in com._not_none(start, end, freq)):
# np.linspace always produces float output
breaks = maybe_downcast_to_dtype(breaks, 'int64')
else:
# delegate to the appropriate range function
if isinstance(endpoint, Timestamp):
range_func = date_range
else:
range_func = timedelta_range
breaks = range_func(start=start, end=end, periods=periods, freq=freq)
return IntervalIndex.from_breaks(breaks, name=name, closed=closed)
| bsd-3-clause |
abramhindle/marsyas-fork | scripts/Python/icme2011_plot_dtw.py | 7 | 1697 | #!/usr/bin/python
#
# Run DTW on all .txt files in this directory, and generate a
# matrix from this
#
import sys
import os
import datetime
import commands
import re
import numpy as np
import matplotlib.pyplot as plt
import mlpy
if len(sys.argv) != 3:
print "Usage: icme2001_dtw_matrix.py path output_file"
exit(0);
#
# The path that we're going to look for text files in
#
path = sys.argv[1]
output_filename = sys.argv[2]
#
# Make a list of all the text files in path
#
files = []
for f in os.listdir(path):
files.append(f[0:-4])
#
# Read in all the values from this into a numpy array
#
all_data = []
for i in range(0,len(files)):
# A normal python array to read the data into
data = []
# Open the file and read all the lines into data
filename = "%s/%s.txt" % (path, files[i])
file = open(filename, "r")
line = file.readline()
while line:
data.append(float(line))
line = file.readline()
a = np.array(data)
all_data.append(a)
dtw = mlpy.Dtw(onlydist=False)
# for i in range(0,len(all_data)):
# for j in range(0,len(all_data)):
# a = dtw.compute(all_data[i], all_data[j])
# print a
#
# Plot the data
#
plot = false
if plot:
# Plot the two sets of input data
fig1 = plt.figure(1)
sub1 = plt.subplot(311)
p1 = plt.plot(all_data[0])
sub2 = plt.subplot(312)
p2 = plt.plot(all_data[1])
# Plot the similarity matrix and DTW path
dtw = mlpy.Dtw(onlydist=False)
d = dtw.compute(all_data[0], all_data[1])
sub3 = plt.subplot(313)
p3 = plt.imshow(dtw.cost.T, interpolation='nearest', origin='lower')
p4 = plt.plot(dtw.px, dtw.py, 'r')
plt.show()
| gpl-2.0 |
LukasMosser/Jupetro | notebooks/casing_plot.py | 1 | 2894 | import matplotlib.pyplot as plt
def plot_casing_setting_depths(ax, fracture_pressure, pore_pressure, TVD_frac, TVD_pore,
fracture_pressure_safety=None, pore_pressure_safety=None,
casing_seats_ppg=None, casing_seats_tvd=None):
"""
This is a simple method to display the pore pressure and fracture pressure
for a casing setting depth plot. Can include safety margins and casing seats if provided.
Example usage:
main.py:
%matplotlib inline #if you want to inline the display in a jupyter notebook
fig, ax = plt.subplots(1, figsize=(13, 13))
plot_casing_setting_depths(ax, my_frac_pres, my_pore_pres, my_tvd_frac, my_tvd_pore,
fracture_pressure_safety=my_frac_safety,
pore_pressure_safety=my_pore_safety,
casing_seats_ppg=my_seats, casing_seats_tvd=my_seats_tvd)
"""
ax.set_title("Casing Setting Depths", fontsize=30, y=1.08)
label_size = 12
ax.plot(fracture_pressure, TVD_frac, color="red", linewidth=3, label="Fracture Pressure")
ax.plot(pore_pressure, TVD_pore, color="blue", linewidth=3, label="Pore Pressure")
if fracture_pressure_safety is not None:
ax.plot(fracture_pressure_safety, TVD_frac, color="red", linewidth=3, label="Fracture Pressure", linestyle="--")
if pore_pressure_safety is not None:
ax.plot(pore_pressure_safety, TVD_pore, color="blue", linewidth=3, label="Pore Pressure", linestyle="--")
if casing_seats_ppg is not None and casing_seats_tvd is not None:
ax.plot(casing_seats_ppg, casing_seats_tvd, color="black", linestyle="--", linewidth=3, label="Casing Seats")
ax.set_ylabel("Total Vertical Depth [ft]", fontsize=25)
ax.set_ylim(ax.get_ylim()[::-1])
ax.xaxis.tick_top()
ax.xaxis.set_label_position("top")
yed = [tick.label.set_fontsize(label_size) for tick in ax.yaxis.get_major_ticks()]
xed = [tick.label.set_fontsize(label_size) for tick in ax.xaxis.get_major_ticks()]
ax.set_xlabel("Equivalent Mud Density [ppg]", fontsize=25)
ax.ticklabel_format(fontsize=25)
ax.grid()
ax.legend(fontsize=20)
def get_casing_seat_plot_data(pore_pressure, tvd_pore, casing_seats):
"""
This method transforms our casing seat data into a representation that is easier to plot.
"""
casing_seats_tvd = [tvd_pore[-1], casing_seats[0][0]]
casing_seats_ppg = [pore_pressure[-1], pore_pressure[-1]]
for p1, p2 in zip(casing_seats, casing_seats[1::]):
casing_seats_tvd.append(p1[0])
casing_seats_tvd.append(p2[0])
for p1, p2 in zip(casing_seats, casing_seats):
casing_seats_ppg.append(p1[1])
casing_seats_ppg.append(p2[1])
return casing_seats_tvd, casing_seats_ppg[0:-2] | gpl-3.0 |
SusanJL/iris | lib/iris/tests/unit/quickplot/test_pcolormesh.py | 11 | 2344 | # (C) British Crown Copyright 2014 - 2016, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""Unit tests for the `iris.quickplot.pcolormesh` function."""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
# Import iris.tests first so that some things can be initialised before
# importing anything else.
import iris.tests as tests
import numpy as np
from iris.tests.stock import simple_2d
from iris.tests.unit.plot import TestGraphicStringCoord, MixinCoords
if tests.MPL_AVAILABLE:
import iris.quickplot as qplt
@tests.skip_plot
class TestStringCoordPlot(TestGraphicStringCoord):
def test_yaxis_labels(self):
qplt.pcolormesh(self.cube, coords=('bar', 'str_coord'))
self.assertBoundsTickLabels('yaxis')
def test_xaxis_labels(self):
qplt.pcolormesh(self.cube, coords=('str_coord', 'bar'))
self.assertBoundsTickLabels('xaxis')
@tests.skip_plot
class TestCoords(tests.IrisTest, MixinCoords):
def setUp(self):
# We have a 2d cube with dimensionality (bar: 3; foo: 4)
self.cube = simple_2d(with_bounds=True)
coord = self.cube.coord('foo')
self.foo = coord.contiguous_bounds()
self.foo_index = np.arange(coord.points.size + 1)
coord = self.cube.coord('bar')
self.bar = coord.contiguous_bounds()
self.bar_index = np.arange(coord.points.size + 1)
self.data = self.cube.data
self.dataT = self.data.T
self.mpl_patch = self.patch('matplotlib.pyplot.pcolormesh',
return_value=None)
self.draw_func = qplt.pcolormesh
if __name__ == "__main__":
tests.main()
| gpl-3.0 |
DmitryOdinoky/sms-tools | lectures/07-Sinusoidal-plus-residual-model/plots-code/stochasticSynthesisFrame.py | 24 | 2966 | import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import hamming, hanning, triang, blackmanharris, resample
import math
import sys, os, time
from scipy.fftpack import fft, ifft
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import utilFunctions as UF
def stochasticModelFrame(x, w, N, stocf) :
# x: input array sound, w: analysis window, N: FFT size,
# stocf: decimation factor of mag spectrum for stochastic analysis
hN = N/2+1 # size of positive spectrum
hM = (w.size)/2 # half analysis window size
pin = hM # initialize sound pointer in middle of analysis window
fftbuffer = np.zeros(N) # initialize buffer for FFT
yw = np.zeros(w.size) # initialize output sound frame
w = w / sum(w) # normalize analysis window
#-----analysis-----
xw = x[pin-hM:pin+hM] * w # window the input sound
X = fft(xw) # compute FFT
mX = 20 * np.log10( abs(X[:hN]) ) # magnitude spectrum of positive frequencies
mXenv = resample(np.maximum(-200, mX), mX.size*stocf) # decimate the mag spectrum
pX = np.angle(X[:hN])
#-----synthesis-----
mY = resample(mXenv, hN) # interpolate to original size
pY = 2*np.pi*np.random.rand(hN) # generate phase random values
Y = np.zeros(N, dtype = complex)
Y[:hN] = 10**(mY/20) * np.exp(1j*pY) # generate positive freq.
Y[hN:] = 10**(mY[-2:0:-1]/20) * np.exp(-1j*pY[-2:0:-1]) # generate negative freq.
fftbuffer = np.real( ifft(Y) ) # inverse FFT
y = fftbuffer*N/2
return mX, pX, mY, pY, y
# example call of stochasticModel function
if __name__ == '__main__':
(fs, x) = UF.wavread('../../../sounds/ocean.wav')
w = np.hanning(1024)
N = 1024
stocf = 0.1
maxFreq = 10000.0
lastbin = N*maxFreq/fs
first = 1000
last = first+w.size
mX, pX, mY, pY, y = stochasticModelFrame(x[first:last], w, N, stocf)
plt.figure(1, figsize=(9, 5))
plt.subplot(3,1,1)
plt.plot(float(fs)*np.arange(mY.size)/N, mY, 'r', lw=1.5, label="mY")
plt.axis([0, maxFreq, -78, max(mX)+0.5])
plt.title('mY (stochastic approximation of mX)')
plt.subplot(3,1,2)
plt.plot(float(fs)*np.arange(pY.size)/N, pY-np.pi, 'c', lw=1.5, label="pY")
plt.axis([0, maxFreq, -np.pi, np.pi])
plt.title('pY (random phases)')
plt.subplot(3,1,3)
plt.plot(np.arange(first, last)/float(fs), y, 'b', lw=1.5)
plt.axis([first/float(fs), last/float(fs), min(y), max(y)])
plt.title('yst')
plt.tight_layout()
plt.savefig('stochasticSynthesisFrame.png')
plt.show()
| agpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.