repo_name stringlengths 6 67 | path stringlengths 5 185 | copies stringlengths 1 3 | size stringlengths 4 6 | content stringlengths 1.02k 962k | license stringclasses 15 values |
|---|---|---|---|---|---|
MPTCP-smartphone-thesis/pcap-measurement | bursts_duration_bytes_wcdf.py | 1 | 10996 | #! /usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2015 Quentin De Coninck
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
# To install on this machine: matplotlib, numpy
from __future__ import print_function
import argparse
import common as co
import common_graph as cog
import matplotlib
# Do not use any X11 backend
matplotlib.use('Agg')
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
import matplotlib.pyplot as plt
import mptcp
import numpy as np
import os
import tcp
##################################################
## ARGUMENTS ##
##################################################
parser = argparse.ArgumentParser(
description="Summarize stat files generated by analyze")
parser.add_argument("-s",
"--stat", help="directory where the stat files are stored", default=co.DEF_STAT_DIR + '_' + co.DEF_IFACE)
parser.add_argument('-S',
"--sums", help="directory where the summary graphs will be stored", default=co.DEF_SUMS_DIR + '_' + co.DEF_IFACE)
parser.add_argument("-d",
"--dirs", help="list of directories to aggregate", nargs="+")
args = parser.parse_args()
stat_dir_exp = os.path.abspath(os.path.expanduser(args.stat))
sums_dir_exp = os.path.abspath(os.path.expanduser(args.sums))
co.check_directory_exists(sums_dir_exp)
##################################################
## GET THE DATA ##
##################################################
connections = cog.fetch_valid_data(stat_dir_exp, args)
multiflow_connections, singleflow_connections = cog.get_multiflow_connections(connections)
##################################################
## PLOTTING RESULTS ##
##################################################
TINY = '0B-10KB'
SMALL = '10KB-100KB'
MEDIUM = '100KB-1MB'
LARGE = '>=1MB'
results_duration_bytes = {co.C2S: {TINY: [], SMALL: [], MEDIUM: [], LARGE: []}, co.S2C: {TINY: [], SMALL: [], MEDIUM: [], LARGE: []}}
results_pkts = {co.C2S: {TINY: [], SMALL: [], MEDIUM: [], LARGE: []}, co.S2C: {TINY: [], SMALL: [], MEDIUM: [], LARGE: []}}
min_duration = 0.001
for fname, conns in multiflow_connections.iteritems():
for conn_id, conn in conns.iteritems():
# Restrict to only 2SFs, but we can also see with more than 2
if co.START in conn.attr and len(conn.flows) >= 2:
# Rely here on MPTCP duration, maybe should be duration at TCP level?
# Also rely on the start time of MPTCP; again, should it be the TCP one?
conn_start_time = conn.attr[co.START]
conn_start_time_int = long(conn_start_time)
conn_start_time_dec = float('0.' + str(conn_start_time - conn_start_time_int).split('.')[1])
conn_duration = conn.attr[co.DURATION]
if conn_duration < min_duration:
continue
for direction in co.DIRECTIONS:
tot_packs = 0
to_add_pkts = []
# First count all bytes sent (including retransmissions)
tcp_conn_bytes = 0
for flow_id, flow in conn.flows.iteritems():
tcp_conn_bytes += flow.attr[direction].get(co.BYTES_DATA, 0)
# To cope with unseen TCP connections
conn_bytes = max(conn.attr[direction][co.BYTES_MPTCPTRACE], tcp_conn_bytes)
for flow_id, bytes, pkts, burst_duration, burst_start_time in conn.attr[direction][co.BURSTS]:
frac_bytes = (bytes + 0.0) / conn_bytes
if frac_bytes > 1.1:
print(frac_bytes, bytes, pkts, conn_bytes, direction, conn_id, flow_id)
continue
if frac_bytes < 0:
print(frac_bytes, bytes, pkts, conn_bytes, direction, conn_id, flow_id)
continue
burst_start_time_int = long(burst_start_time)
burst_start_time_dec = float('0.' + str(burst_start_time - burst_start_time_int).split('.')[1])
relative_time_int = burst_start_time_int - conn_start_time_int
relative_time_dec = burst_start_time_dec - conn_start_time_dec
relative_time = relative_time_int + relative_time_dec
frac_duration = relative_time / conn_duration
if frac_duration >= 0.0 and frac_duration <= 2.0:
if conn_bytes < 10000:
label = TINY
elif conn_bytes < 100000:
label = SMALL
elif conn_bytes < 1000000:
label = MEDIUM
else:
label = LARGE
results_duration_bytes[direction][label].append((frac_duration, frac_bytes))
to_add_pkts.append(pkts)
tot_packs += pkts
if conn_bytes < 10000:
label = TINY
elif conn_bytes < 100000:
label = SMALL
elif conn_bytes < 1000000:
label = MEDIUM
else:
label = LARGE
for p in to_add_pkts:
results_pkts[direction][label].append(p * 1.0 / tot_packs)
base_graph_name = 'bursts_'
color = {TINY: 'red', SMALL: 'blue', MEDIUM: 'green', LARGE: 'orange'}
ls = {TINY: ':', SMALL: '-.', MEDIUM: '--', LARGE: '-'}
for direction in co.DIRECTIONS:
plt.figure()
plt.clf()
fig, ax = plt.subplots()
graph_fname = os.path.splitext(base_graph_name)[0] + "duration_wcdf_" + direction + ".pdf"
graph_full_path = os.path.join(sums_dir_exp, graph_fname)
for label in [TINY, SMALL, MEDIUM, LARGE]:
x_val = [x[0] for x in results_duration_bytes[direction][label]]
sample = np.array(sorted(x_val))
sorted_array = np.sort(sample)
tot = 0.0
yvals = []
for elem in sorted_array:
tot += elem
yvals.append(tot)
yvals = [x / tot for x in yvals]
if len(sorted_array) > 0:
# Add a last point
sorted_array = np.append(sorted_array, sorted_array[-1])
yvals = np.append(yvals, 1.0)
ax.plot(sorted_array, yvals, color=color[label], linestyle=ls[label], linewidth=2, label=label)
# Shrink current axis's height by 10% on the top
# box = ax.get_position()
# ax.set_position([box.x0, box.y0,
# box.width, box.height * 0.9])
# ax.set_xscale('log')
# Put a legend above current axis
# ax.legend(loc='lower center', bbox_to_anchor=(0.5, 1.05), fancybox=True, shadow=True, ncol=ncol)
ax.legend(loc='best')
plt.xlabel('Fraction of connection duration', fontsize=24)
plt.ylabel("Weighted CDF", fontsize=24)
plt.savefig(graph_full_path)
plt.close('all')
plt.figure()
plt.clf()
fig, ax = plt.subplots()
graph_fname = os.path.splitext(base_graph_name)[0] + "bytes_wcdf_" + direction + ".pdf"
graph_full_path = os.path.join(sums_dir_exp, graph_fname)
for label in [TINY, SMALL, MEDIUM, LARGE]:
y_val = [x[1] for x in results_duration_bytes[direction][label]]
sample = np.array(sorted(y_val))
sorted_array = np.sort(sample)
tot = 0.0
yvals = []
for elem in sorted_array:
tot += elem
yvals.append(tot)
yvals = [x / tot for x in yvals]
if len(sorted_array) > 0:
# Add a last point
sorted_array = np.append(sorted_array, sorted_array[-1])
yvals = np.append(yvals, 1.0)
ax.plot(sorted_array, yvals, color=color[label], linestyle=ls[label], linewidth=2, label=label)
# Shrink current axis's height by 10% on the top
# box = ax.get_position()
# ax.set_position([box.x0, box.y0,
# box.width, box.height * 0.9])
# ax.set_xscale('log')
# Put a legend above current axis
# ax.legend(loc='lower center', bbox_to_anchor=(0.5, 1.05), fancybox=True, shadow=True, ncol=ncol)
ax.legend(loc='best')
plt.xlim(0.0, 1.0)
plt.xlabel('Fraction of connection bytes', fontsize=24)
plt.ylabel("Weighted CDF", fontsize=24)
plt.savefig(graph_full_path)
plt.close('all')
plt.figure()
plt.clf()
fig, ax = plt.subplots()
graph_fname = os.path.splitext(base_graph_name)[0] + "pkts_wcdf_" + direction + ".pdf"
graph_full_path = os.path.join(sums_dir_exp, graph_fname)
for label in [TINY, SMALL, MEDIUM, LARGE]:
sample = np.array(sorted(results_pkts[direction][label]))
sorted_array = np.sort(sample)
tot = 0.0
yvals = []
for elem in sorted_array:
tot += elem
yvals.append(tot)
yvals = [x / tot for x in yvals]
print("PERCENTAGE 1 BLOCK", direction, label, len([x for x in sorted_array if x >= 0.99]) * 100. / tot)
i = 0
for elem in sorted_array:
if elem >= 0.2:
break
else:
i += 1
print("PERCENTAGE 0.2 block conn", direction, label, yvals[i])
if len(sorted_array) > 0:
# Add a last point
sorted_array = np.append(sorted_array, sorted_array[-1])
yvals = np.append(yvals, 1.0)
ax.plot(sorted_array, yvals, color=color[label], linestyle=ls[label], linewidth=2, label=label)
# Shrink current axis's height by 10% on the top
# box = ax.get_position()
# ax.set_position([box.x0, box.y0,
# box.width, box.height * 0.9])
# ax.set_xscale('log')
# Put a legend above current axis
# ax.legend(loc='lower center', bbox_to_anchor=(0.5, 1.05), fancybox=True, shadow=True, ncol=ncol)
ax.legend(loc='best')
plt.xlim(0.0, 1.0)
plt.xlabel('Fraction of connection packets', fontsize=24, labelpad=-1)
plt.ylabel("Weighted CDF", fontsize=24)
plt.savefig(graph_full_path)
plt.close('all')
| gpl-3.0 |
boomsbloom/dtm-fmri | DTM/for_gensim/lib/python2.7/site-packages/pandas/tests/test_msgpack/test_case.py | 9 | 2759 | #!/usr/bin/env python
# coding: utf-8
from pandas.msgpack import packb, unpackb
def check(length, obj):
v = packb(obj)
assert len(v) == length, \
"%r length should be %r but get %r" % (obj, length, len(v))
assert unpackb(v, use_list=0) == obj
def test_1():
for o in [None, True, False, 0, 1, (1 << 6), (1 << 7) - 1, -1,
-((1 << 5) - 1), -(1 << 5)]:
check(1, o)
def test_2():
for o in [1 << 7, (1 << 8) - 1, -((1 << 5) + 1), -(1 << 7)]:
check(2, o)
def test_3():
for o in [1 << 8, (1 << 16) - 1, -((1 << 7) + 1), -(1 << 15)]:
check(3, o)
def test_5():
for o in [1 << 16, (1 << 32) - 1, -((1 << 15) + 1), -(1 << 31)]:
check(5, o)
def test_9():
for o in [1 << 32, (1 << 64) - 1, -((1 << 31) + 1), -(1 << 63), 1.0, 0.1,
-0.1, -1.0]:
check(9, o)
def check_raw(overhead, num):
check(num + overhead, b" " * num)
def test_fixraw():
check_raw(1, 0)
check_raw(1, (1 << 5) - 1)
def test_raw16():
check_raw(3, 1 << 5)
check_raw(3, (1 << 16) - 1)
def test_raw32():
check_raw(5, 1 << 16)
def check_array(overhead, num):
check(num + overhead, (None, ) * num)
def test_fixarray():
check_array(1, 0)
check_array(1, (1 << 4) - 1)
def test_array16():
check_array(3, 1 << 4)
check_array(3, (1 << 16) - 1)
def test_array32():
check_array(5, (1 << 16))
def match(obj, buf):
assert packb(obj) == buf
assert unpackb(buf, use_list=0) == obj
def test_match():
cases = [
(None, b'\xc0'),
(False, b'\xc2'),
(True, b'\xc3'),
(0, b'\x00'),
(127, b'\x7f'),
(128, b'\xcc\x80'),
(256, b'\xcd\x01\x00'),
(-1, b'\xff'),
(-33, b'\xd0\xdf'),
(-129, b'\xd1\xff\x7f'),
({1: 1}, b'\x81\x01\x01'),
(1.0, b"\xcb\x3f\xf0\x00\x00\x00\x00\x00\x00"),
((), b'\x90'),
(tuple(range(15)), (b"\x9f\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09"
b"\x0a\x0b\x0c\x0d\x0e")),
(tuple(range(16)), (b"\xdc\x00\x10\x00\x01\x02\x03\x04\x05\x06\x07"
b"\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f")),
({}, b'\x80'),
(dict([(x, x) for x in range(15)]),
(b'\x8f\x00\x00\x01\x01\x02\x02\x03\x03\x04\x04\x05\x05\x06\x06\x07'
b'\x07\x08\x08\t\t\n\n\x0b\x0b\x0c\x0c\r\r\x0e\x0e')),
(dict([(x, x) for x in range(16)]),
(b'\xde\x00\x10\x00\x00\x01\x01\x02\x02\x03\x03\x04\x04\x05\x05\x06'
b'\x06\x07\x07\x08\x08\t\t\n\n\x0b\x0b\x0c\x0c\r\r\x0e\x0e'
b'\x0f\x0f')),
]
for v, p in cases:
match(v, p)
def test_unicode():
assert unpackb(packb('foobar'), use_list=1) == b'foobar'
| mit |
ljwolf/pysal | pysal/esda/tests/test_geary.py | 1 | 2995 | """Geary Unittest."""
import unittest
from ... import open as popen
from ... import examples
from .. import geary
import numpy as np
from ...common import pandas
PANDAS_EXTINCT = pandas is None
class Geary_Tester(unittest.TestCase):
"""Geary class for unit tests."""
def setUp(self):
self.w = popen(examples.get_path("book.gal")).read()
f = popen(examples.get_path("book.txt"))
self.y = np.array(f.by_col['y'])
def test_Geary(self):
c = geary.Geary(self.y, self.w, permutations=0)
self.assertAlmostEquals(c.C, 0.33301083591331254)
self.assertAlmostEquals(c.EC, 1.0)
self.assertAlmostEquals(c.VC_norm, 0.031805300245097874)
self.assertAlmostEquals(c.p_norm, 9.2018240680169505e-05)
self.assertAlmostEquals(c.z_norm, -3.7399778367629564)
self.assertAlmostEquals(c.seC_norm, 0.17834040553138225)
self.assertAlmostEquals(c.VC_rand,0.033411917666958356)
self.assertAlmostEquals(c.p_rand,0.00013165646189214729)
self.assertAlmostEquals(c.z_rand, -3.6489513837253944)
self.assertAlmostEquals(c.seC_rand, 0.18278927120309429)
np.random.seed(12345)
c = geary.Geary(self.y, self.w, permutations=999)
self.assertAlmostEquals(c.C, 0.33301083591331254)
self.assertAlmostEquals(c.EC, 1.0)
self.assertAlmostEquals(c.VC_norm, 0.031805300245097874)
self.assertAlmostEquals(c.p_norm, 9.2018240680169505e-05)
self.assertAlmostEquals(c.z_norm, -3.7399778367629564)
self.assertAlmostEquals(c.seC_norm, 0.17834040553138225)
self.assertAlmostEquals(c.VC_rand, 0.033411917666958356)
self.assertAlmostEquals(c.p_rand, 0.00013165646189214729)
self.assertAlmostEquals(c.z_rand, -3.6489513837253944)
self.assertAlmostEquals(c.seC_rand, 0.18278927120309429)
self.assertAlmostEquals(c.EC_sim, 0.9980676303238214)
self.assertAlmostEquals(c.VC_sim, 0.034430408799858946)
self.assertAlmostEquals(c.p_sim, 0.001)
self.assertAlmostEquals(c.p_z_sim, 0.00016908100514811952)
self.assertAlmostEquals(c.z_sim, -3.5841621159171746)
self.assertAlmostEquals(c.seC_sim, 0.18555432843202269)
@unittest.skipIf(PANDAS_EXTINCT, 'missing pandas')
def test_by_col(self):
import pandas as pd
df = pd.DataFrame(self.y, columns=['y'])
r1 = geary.Geary.by_col(df, ['y'], w=self.w, permutations=999)
this_geary = np.unique(r1.y_geary.values)
this_pval = np.unique(r1.y_p_sim.values)
np.random.seed(12345)
c = geary.Geary(self.y, self.w, permutations=999)
self.assertAlmostEquals(this_geary, c.C)
self.assertAlmostEquals(this_pval, c.p_sim)
suite = unittest.TestSuite()
test_classes = [Geary_Tester]
for i in test_classes:
a = unittest.TestLoader().loadTestsFromTestCase(i)
suite.addTest(a)
if __name__ == '__main__':
runner = unittest.TextTestRunner()
runner.run(suite)
| bsd-3-clause |
mne-tools/mne-tools.github.io | 0.17/_downloads/20a1da06c35b0bcff7922862fdce817b/plot_background_filtering.py | 2 | 49239 | # -*- coding: utf-8 -*-
r"""
===================================
Background information on filtering
===================================
Here we give some background information on filtering in general,
and how it is done in MNE-Python in particular.
Recommended reading for practical applications of digital
filter design can be found in Parks & Burrus [1]_ and
Ifeachor and Jervis [2]_, and for filtering in an
M/EEG context we recommend reading Widmann *et al.* 2015 [7]_.
To see how to use the default filters in MNE-Python on actual data, see
the :ref:`sphx_glr_auto_tutorials_plot_artifacts_correction_filtering.py`
tutorial.
.. contents::
:local:
Problem statement
=================
The practical issues with filtering electrophysiological data are covered
well by Widmann *et al.* in [7]_, in a follow-up to an article where they
conclude with this statement:
Filtering can result in considerable distortions of the time course
(and amplitude) of a signal as demonstrated by VanRullen (2011) [[3]_].
Thus, filtering should not be used lightly. However, if effects of
filtering are cautiously considered and filter artifacts are minimized,
a valid interpretation of the temporal dynamics of filtered
electrophysiological data is possible and signals missed otherwise
can be detected with filtering.
In other words, filtering can increase SNR, but if it is not used carefully,
it can distort data. Here we hope to cover some filtering basics so
users can better understand filtering tradeoffs, and why MNE-Python has
chosen particular defaults.
.. _tut_filtering_basics:
Filtering basics
================
Let's get some of the basic math down. In the frequency domain, digital
filters have a transfer function that is given by:
.. math::
H(z) &= \frac{b_0 + b_1 z^{-1} + b_2 z^{-2} + ... + b_M z^{-M}}
{1 + a_1 z^{-1} + a_2 z^{-2} + ... + a_N z^{-M}} \\
&= \frac{\sum_0^Mb_kz^{-k}}{\sum_1^Na_kz^{-k}}
In the time domain, the numerator coefficients :math:`b_k` and denominator
coefficients :math:`a_k` can be used to obtain our output data
:math:`y(n)` in terms of our input data :math:`x(n)` as:
.. math::
:label: summations
y(n) &= b_0 x(n) + b_1 x(n-1) + ... + b_M x(n-M)
- a_1 y(n-1) - a_2 y(n - 2) - ... - a_N y(n - N)\\
&= \sum_0^M b_k x(n-k) - \sum_1^N a_k y(n-k)
In other words, the output at time :math:`n` is determined by a sum over:
1. The numerator coefficients :math:`b_k`, which get multiplied by
the previous input :math:`x(n-k)` values, and
2. The denominator coefficients :math:`a_k`, which get multiplied by
the previous output :math:`y(n-k)` values.
Note that these summations in :eq:`summations` correspond nicely to
(1) a weighted `moving average`_ and (2) an autoregression_.
Filters are broken into two classes: FIR_ (finite impulse response) and
IIR_ (infinite impulse response) based on these coefficients.
FIR filters use a finite number of numerator
coefficients :math:`b_k` (:math:`\forall k, a_k=0`), and thus each output
value of :math:`y(n)` depends only on the :math:`M` previous input values.
IIR filters depend on the previous input and output values, and thus can have
effectively infinite impulse responses.
As outlined in [1]_, FIR and IIR have different tradeoffs:
* A causal FIR filter can be linear-phase -- i.e., the same time delay
across all frequencies -- whereas a causal IIR filter cannot. The phase
and group delay characteristics are also usually better for FIR filters.
* IIR filters can generally have a steeper cutoff than an FIR filter of
equivalent order.
* IIR filters are generally less numerically stable, in part due to
accumulating error (due to its recursive calculations).
In MNE-Python we default to using FIR filtering. As noted in Widmann *et al.*
2015 [7]_:
Despite IIR filters often being considered as computationally more
efficient, they are recommended only when high throughput and sharp
cutoffs are required (Ifeachor and Jervis, 2002 [2]_, p. 321),
...FIR filters are easier to control, are always stable, have a
well-defined passband, can be corrected to zero-phase without
additional computations, and can be converted to minimum-phase.
We therefore recommend FIR filters for most purposes in
electrophysiological data analysis.
When designing a filter (FIR or IIR), there are always tradeoffs that
need to be considered, including but not limited to:
1. Ripple in the pass-band
2. Attenuation of the stop-band
3. Steepness of roll-off
4. Filter order (i.e., length for FIR filters)
5. Time-domain ringing
In general, the sharper something is in frequency, the broader it is in time,
and vice-versa. This is a fundamental time-frequency tradeoff, and it will
show up below.
FIR Filters
===========
First we will focus first on FIR filters, which are the default filters used by
MNE-Python.
"""
###############################################################################
# Designing FIR filters
# ---------------------
# Here we'll try designing a low-pass filter, and look at trade-offs in terms
# of time- and frequency-domain filter characteristics. Later, in
# :ref:`tut_effect_on_signals`, we'll look at how such filters can affect
# signals when they are used.
#
# First let's import some useful tools for filtering, and set some default
# values for our data that are reasonable for M/EEG data.
import numpy as np
from scipy import signal, fftpack
import matplotlib.pyplot as plt
from mne.time_frequency.tfr import morlet
from mne.viz import plot_filter, plot_ideal_filter
import mne
sfreq = 1000.
f_p = 40.
flim = (1., sfreq / 2.) # limits for plotting
###############################################################################
# Take for example an ideal low-pass filter, which would give a value of 1 in
# the pass-band (up to frequency :math:`f_p`) and a value of 0 in the stop-band
# (down to frequency :math:`f_s`) such that :math:`f_p=f_s=40` Hz here
# (shown to a lower limit of -60 dB for simplicity):
nyq = sfreq / 2. # the Nyquist frequency is half our sample rate
freq = [0, f_p, f_p, nyq]
gain = [1, 1, 0, 0]
third_height = np.array(plt.rcParams['figure.figsize']) * [1, 1. / 3.]
ax = plt.subplots(1, figsize=third_height)[1]
plot_ideal_filter(freq, gain, ax, title='Ideal %s Hz lowpass' % f_p, flim=flim)
###############################################################################
# This filter hypothetically achieves zero ripple in the frequency domain,
# perfect attenuation, and perfect steepness. However, due to the discontunity
# in the frequency response, the filter would require infinite ringing in the
# time domain (i.e., infinite order) to be realized. Another way to think of
# this is that a rectangular window in frequency is actually sinc_ function
# in time, which requires an infinite number of samples, and thus infinite
# time, to represent. So although this filter has ideal frequency suppression,
# it has poor time-domain characteristics.
#
# Let's try to naïvely make a brick-wall filter of length 0.1 sec, and look
# at the filter itself in the time domain and the frequency domain:
n = int(round(0.1 * sfreq)) + 1
t = np.arange(-n // 2, n // 2) / sfreq # center our sinc
h = np.sinc(2 * f_p * t) / (4 * np.pi)
plot_filter(h, sfreq, freq, gain, 'Sinc (0.1 sec)', flim=flim)
###############################################################################
# This is not so good! Making the filter 10 times longer (1 sec) gets us a
# bit better stop-band suppression, but still has a lot of ringing in
# the time domain. Note the x-axis is an order of magnitude longer here,
# and the filter has a correspondingly much longer group delay (again equal
# to half the filter length, or 0.5 seconds):
n = int(round(1. * sfreq)) + 1
t = np.arange(-n // 2, n // 2) / sfreq
h = np.sinc(2 * f_p * t) / (4 * np.pi)
plot_filter(h, sfreq, freq, gain, 'Sinc (1.0 sec)', flim=flim)
###############################################################################
# Let's make the stop-band tighter still with a longer filter (10 sec),
# with a resulting larger x-axis:
n = int(round(10. * sfreq)) + 1
t = np.arange(-n // 2, n // 2) / sfreq
h = np.sinc(2 * f_p * t) / (4 * np.pi)
plot_filter(h, sfreq, freq, gain, 'Sinc (10.0 sec)', flim=flim)
###############################################################################
# Now we have very sharp frequency suppression, but our filter rings for the
# entire second. So this naïve method is probably not a good way to build
# our low-pass filter.
#
# Fortunately, there are multiple established methods to design FIR filters
# based on desired response characteristics. These include:
#
# 1. The Remez_ algorithm (:func:`scipy.signal.remez`, `MATLAB firpm`_)
# 2. Windowed FIR design (:func:`scipy.signal.firwin2`, `MATLAB fir2`_
# and :func:`scipy.signal.firwin`)
# 3. Least squares designs (:func:`scipy.signal.firls`, `MATLAB firls`_)
# 4. Frequency-domain design (construct filter in Fourier
# domain and use an :func:`IFFT <scipy.fftpack.ifft>` to invert it)
#
# .. note:: Remez and least squares designs have advantages when there are
# "do not care" regions in our frequency response. However, we want
# well controlled responses in all frequency regions.
# Frequency-domain construction is good when an arbitrary response
# is desired, but generally less clean (due to sampling issues) than
# a windowed approach for more straightfroward filter applications.
# Since our filters (low-pass, high-pass, band-pass, band-stop)
# are fairly simple and we require precise control of all frequency
# regions, here we will use and explore primarily windowed FIR
# design.
#
# If we relax our frequency-domain filter requirements a little bit, we can
# use these functions to construct a lowpass filter that instead has a
# *transition band*, or a region between the pass frequency :math:`f_p`
# and stop frequency :math:`f_s`, e.g.:
trans_bandwidth = 10 # 10 Hz transition band
f_s = f_p + trans_bandwidth # = 50 Hz
freq = [0., f_p, f_s, nyq]
gain = [1., 1., 0., 0.]
ax = plt.subplots(1, figsize=third_height)[1]
title = '%s Hz lowpass with a %s Hz transition' % (f_p, trans_bandwidth)
plot_ideal_filter(freq, gain, ax, title=title, flim=flim)
###############################################################################
# Accepting a shallower roll-off of the filter in the frequency domain makes
# our time-domain response potentially much better. We end up with a
# smoother slope through the transition region, but a *much* cleaner time
# domain signal. Here again for the 1 sec filter:
h = signal.firwin2(n, freq, gain, nyq=nyq)
plot_filter(h, sfreq, freq, gain, 'Windowed 10-Hz transition (1.0 sec)',
flim=flim)
###############################################################################
# Since our lowpass is around 40 Hz with a 10 Hz transition, we can actually
# use a shorter filter (5 cycles at 10 Hz = 0.5 sec) and still get okay
# stop-band attenuation:
n = int(round(sfreq * 0.5)) + 1
h = signal.firwin2(n, freq, gain, nyq=nyq)
plot_filter(h, sfreq, freq, gain, 'Windowed 10-Hz transition (0.5 sec)',
flim=flim)
###############################################################################
# But then if we shorten the filter too much (2 cycles of 10 Hz = 0.2 sec),
# our effective stop frequency gets pushed out past 60 Hz:
n = int(round(sfreq * 0.2)) + 1
h = signal.firwin2(n, freq, gain, nyq=nyq)
plot_filter(h, sfreq, freq, gain, 'Windowed 10-Hz transition (0.2 sec)',
flim=flim)
###############################################################################
# If we want a filter that is only 0.1 seconds long, we should probably use
# something more like a 25 Hz transition band (0.2 sec = 5 cycles @ 25 Hz):
trans_bandwidth = 25
f_s = f_p + trans_bandwidth
freq = [0, f_p, f_s, nyq]
h = signal.firwin2(n, freq, gain, nyq=nyq)
plot_filter(h, sfreq, freq, gain, 'Windowed 50-Hz transition (0.2 sec)',
flim=flim)
###############################################################################
# So far we have only discussed *acausal* filtering, which means that each
# sample at each time point :math:`t` is filtered using samples that come
# after (:math:`t + \Delta t`) *and* before (:math:`t - \Delta t`) :math:`t`.
# In this sense, each sample is influenced by samples that come both before
# and after it. This is useful in many cases, espcially because it does not
# delay the timing of events.
#
# However, sometimes it can be beneficial to use *causal* filtering,
# whereby each sample :math:`t` is filtered only using time points that came
# after it.
#
# Note that the delay is variable (whereas for linear/zero-phase filters it
# is constant) but small in the pass-band. Unlike zero-phase filters, which
# require time-shifting backward the output of a linear-phase filtering stage
# (and thus becoming acausal), minimum-phase filters do not require any
# compensation to achieve small delays in the passband. Note that as an
# artifact of the minimum phase filter construction step, the filter does
# not end up being as steep as the linear/zero-phase version.
#
# We can construct a minimum-phase filter from our existing linear-phase
# filter with the ``minimum_phase`` function (that will be in SciPy 0.19's
# :mod:`scipy.signal`), and note that the falloff is not as steep:
h_min = mne.fixes.minimum_phase(h)
plot_filter(h_min, sfreq, freq, gain, 'Minimum-phase', flim=flim)
###############################################################################
# .. _tut_effect_on_signals:
#
# Applying FIR filters
# --------------------
#
# Now lets look at some practical effects of these filters by applying
# them to some data.
#
# Let's construct a Gaussian-windowed sinusoid (i.e., Morlet imaginary part)
# plus noise (random + line). Note that the original, clean signal contains
# frequency content in both the pass band and transition bands of our
# low-pass filter.
dur = 10.
center = 2.
morlet_freq = f_p
tlim = [center - 0.2, center + 0.2]
tticks = [tlim[0], center, tlim[1]]
flim = [20, 70]
x = np.zeros(int(sfreq * dur) + 1)
blip = morlet(sfreq, [morlet_freq], n_cycles=7)[0].imag / 20.
n_onset = int(center * sfreq) - len(blip) // 2
x[n_onset:n_onset + len(blip)] += blip
x_orig = x.copy()
rng = np.random.RandomState(0)
x += rng.randn(len(x)) / 1000.
x += np.sin(2. * np.pi * 60. * np.arange(len(x)) / sfreq) / 2000.
###############################################################################
# Filter it with a shallow cutoff, linear-phase FIR (which allows us to
# compensate for the constant filter delay):
transition_band = 0.25 * f_p
f_s = f_p + transition_band
filter_dur = 6.6 / transition_band / 2. # sec
n = int(sfreq * filter_dur)
freq = [0., f_p, f_s, sfreq / 2.]
gain = [1., 1., 0., 0.]
# This would be equivalent:
h = mne.filter.create_filter(x, sfreq, l_freq=None, h_freq=f_p,
fir_design='firwin')
x_v16 = np.convolve(h, x)[len(h) // 2:]
plot_filter(h, sfreq, freq, gain, 'MNE-Python 0.16 default', flim=flim)
###############################################################################
# Filter it with a different design mode ``fir_design="firwin2"``, and also
# compensate for the constant filter delay. This method does not produce
# quite as sharp a transition compared to ``fir_design="firwin"``, despite
# being twice as long:
transition_band = 0.25 * f_p
f_s = f_p + transition_band
filter_dur = 6.6 / transition_band # sec
n = int(sfreq * filter_dur)
freq = [0., f_p, f_s, sfreq / 2.]
gain = [1., 1., 0., 0.]
# This would be equivalent:
# h = signal.firwin2(n, freq, gain, nyq=sfreq / 2.)
h = mne.filter.create_filter(x, sfreq, l_freq=None, h_freq=f_p,
fir_design='firwin2')
x_v14 = np.convolve(h, x)[len(h) // 2:]
plot_filter(h, sfreq, freq, gain, 'MNE-Python 0.14 default', flim=flim)
###############################################################################
# This is actually set to become the default type of filter used in MNE-Python
# in 0.14 (see :ref:`tut_filtering_in_python`).
#
# Let's also filter with the MNE-Python 0.13 default, which is a
# long-duration, steep cutoff FIR that gets applied twice:
transition_band = 0.5 # Hz
f_s = f_p + transition_band
filter_dur = 10. # sec
n = int(sfreq * filter_dur)
freq = [0., f_p, f_s, sfreq / 2.]
gain = [1., 1., 0., 0.]
# This would be equivalent
# h = signal.firwin2(n, freq, gain, nyq=sfreq / 2.)
h = mne.filter.create_filter(x, sfreq, l_freq=None, h_freq=f_p,
h_trans_bandwidth=transition_band,
filter_length='%ss' % filter_dur,
fir_design='firwin2')
x_v13 = np.convolve(np.convolve(h, x)[::-1], h)[::-1][len(h) - 1:-len(h) - 1]
plot_filter(h, sfreq, freq, gain, 'MNE-Python 0.13 default', flim=flim)
###############################################################################
# Let's also filter it with the MNE-C default, which is a long-duration
# steep-slope FIR filter designed using frequency-domain techniques:
h = mne.filter.design_mne_c_filter(sfreq, l_freq=None, h_freq=f_p + 2.5)
x_mne_c = np.convolve(h, x)[len(h) // 2:]
transition_band = 5 # Hz (default in MNE-C)
f_s = f_p + transition_band
freq = [0., f_p, f_s, sfreq / 2.]
gain = [1., 1., 0., 0.]
plot_filter(h, sfreq, freq, gain, 'MNE-C default', flim=flim)
###############################################################################
# And now an example of a minimum-phase filter:
h = mne.filter.create_filter(x, sfreq, l_freq=None, h_freq=f_p,
phase='minimum', fir_design='firwin')
x_min = np.convolve(h, x)
transition_band = 0.25 * f_p
f_s = f_p + transition_band
filter_dur = 6.6 / transition_band # sec
n = int(sfreq * filter_dur)
freq = [0., f_p, f_s, sfreq / 2.]
gain = [1., 1., 0., 0.]
plot_filter(h, sfreq, freq, gain, 'Minimum-phase filter', flim=flim)
###############################################################################
# Both the MNE-Python 0.13 and MNE-C filhters have excellent frequency
# attenuation, but it comes at a cost of potential
# ringing (long-lasting ripples) in the time domain. Ringing can occur with
# steep filters, especially on signals with frequency content around the
# transition band. Our Morlet wavelet signal has power in our transition band,
# and the time-domain ringing is thus more pronounced for the steep-slope,
# long-duration filter than the shorter, shallower-slope filter:
axes = plt.subplots(1, 2)[1]
def plot_signal(x, offset):
t = np.arange(len(x)) / sfreq
axes[0].plot(t, x + offset)
axes[0].set(xlabel='Time (sec)', xlim=t[[0, -1]])
X = fftpack.fft(x)
freqs = fftpack.fftfreq(len(x), 1. / sfreq)
mask = freqs >= 0
X = X[mask]
freqs = freqs[mask]
axes[1].plot(freqs, 20 * np.log10(np.maximum(np.abs(X), 1e-16)))
axes[1].set(xlim=flim)
yticks = np.arange(7) / -30.
yticklabels = ['Original', 'Noisy', 'FIR-firwin (0.16)', 'FIR-firwin2 (0.14)',
'FIR-steep (0.13)', 'FIR-steep (MNE-C)', 'Minimum-phase']
plot_signal(x_orig, offset=yticks[0])
plot_signal(x, offset=yticks[1])
plot_signal(x_v16, offset=yticks[2])
plot_signal(x_v14, offset=yticks[3])
plot_signal(x_v13, offset=yticks[4])
plot_signal(x_mne_c, offset=yticks[5])
plot_signal(x_min, offset=yticks[6])
axes[0].set(xlim=tlim, title='FIR, Lowpass=%d Hz' % f_p, xticks=tticks,
ylim=[-0.200, 0.025], yticks=yticks, yticklabels=yticklabels,)
for text in axes[0].get_yticklabels():
text.set(rotation=45, size=8)
axes[1].set(xlim=flim, ylim=(-60, 10), xlabel='Frequency (Hz)',
ylabel='Magnitude (dB)')
mne.viz.tight_layout()
plt.show()
###############################################################################
# IIR filters
# ===========
#
# MNE-Python also offers IIR filtering functionality that is based on the
# methods from :mod:`scipy.signal`. Specifically, we use the general-purpose
# functions :func:`scipy.signal.iirfilter` and :func:`scipy.signal.iirdesign`,
# which provide unified interfaces to IIR filter design.
#
# Designing IIR filters
# ---------------------
#
# Let's continue with our design of a 40 Hz low-pass filter, and look at
# some trade-offs of different IIR filters.
#
# Often the default IIR filter is a `Butterworth filter`_, which is designed
# to have a *maximally flat pass-band*. Let's look at a few orders of filter,
# i.e., a few different number of coefficients used and therefore steepness
# of the filter:
#
# .. note:: Notice that the group delay (which is related to the phase) of
# the IIR filters below are not constant. In the FIR case, we can
# design so-called linear-phase filters that have a constant group
# delay, and thus compensate for the delay (making the filter
# acausal) if necessary. This cannot be done with IIR filters, as
# they have a non-linear phase (non-constant group delay). As the
# filter order increases, the phase distortion near and in the
# transition band worsens. However, if acausal (forward-backward)
# filtering can be used, e.g. with :func:`scipy.signal.filtfilt`,
# these phase issues can theoretically be mitigated.
sos = signal.iirfilter(2, f_p / nyq, btype='low', ftype='butter', output='sos')
plot_filter(dict(sos=sos), sfreq, freq, gain, 'Butterworth order=2', flim=flim)
# Eventually this will just be from scipy signal.sosfiltfilt, but 0.18 is
# not widely adopted yet (as of June 2016), so we use our wrapper...
sosfiltfilt = mne.fixes.get_sosfiltfilt()
x_shallow = sosfiltfilt(sos, x)
###############################################################################
# The falloff of this filter is not very steep.
#
# .. note:: Here we have made use of second-order sections (SOS)
# by using :func:`scipy.signal.sosfilt` and, under the
# hood, :func:`scipy.signal.zpk2sos` when passing the
# ``output='sos'`` keyword argument to
# :func:`scipy.signal.iirfilter`. The filter definitions
# given in tut_filtering_basics_ use the polynomial
# numerator/denominator (sometimes called "tf") form ``(b, a)``,
# which are theoretically equivalent to the SOS form used here.
# In practice, however, the SOS form can give much better results
# due to issues with numerical precision (see
# :func:`scipy.signal.sosfilt` for an example), so SOS should be
# used when possible to do IIR filtering.
#
# Let's increase the order, and note that now we have better attenuation,
# with a longer impulse response:
sos = signal.iirfilter(8, f_p / nyq, btype='low', ftype='butter', output='sos')
plot_filter(dict(sos=sos), sfreq, freq, gain, 'Butterworth order=8', flim=flim)
x_steep = sosfiltfilt(sos, x)
###############################################################################
# There are other types of IIR filters that we can use. For a complete list,
# check out the documentation for :func:`scipy.signal.iirdesign`. Let's
# try a Chebychev (type I) filter, which trades off ripple in the pass-band
# to get better attenuation in the stop-band:
sos = signal.iirfilter(8, f_p / nyq, btype='low', ftype='cheby1', output='sos',
rp=1) # dB of acceptable pass-band ripple
plot_filter(dict(sos=sos), sfreq, freq, gain,
'Chebychev-1 order=8, ripple=1 dB', flim=flim)
###############################################################################
# And if we can live with even more ripple, we can get it slightly steeper,
# but the impulse response begins to ring substantially longer (note the
# different x-axis scale):
sos = signal.iirfilter(8, f_p / nyq, btype='low', ftype='cheby1', output='sos',
rp=6)
plot_filter(dict(sos=sos), sfreq, freq, gain,
'Chebychev-1 order=8, ripple=6 dB', flim=flim)
###############################################################################
# Applying IIR filters
# --------------------
#
# Now let's look at how our shallow and steep Butterworth IIR filters
# perform on our Morlet signal from before:
axes = plt.subplots(1, 2)[1]
yticks = np.arange(4) / -30.
yticklabels = ['Original', 'Noisy', 'Butterworth-2', 'Butterworth-8']
plot_signal(x_orig, offset=yticks[0])
plot_signal(x, offset=yticks[1])
plot_signal(x_shallow, offset=yticks[2])
plot_signal(x_steep, offset=yticks[3])
axes[0].set(xlim=tlim, title='IIR, Lowpass=%d Hz' % f_p, xticks=tticks,
ylim=[-0.125, 0.025], yticks=yticks, yticklabels=yticklabels,)
for text in axes[0].get_yticklabels():
text.set(rotation=45, size=8)
axes[1].set(xlim=flim, ylim=(-60, 10), xlabel='Frequency (Hz)',
ylabel='Magnitude (dB)')
mne.viz.adjust_axes(axes)
mne.viz.tight_layout()
plt.show()
###############################################################################
# Some pitfalls of filtering
# ==========================
#
# Multiple recent papers have noted potential risks of drawing
# errant inferences due to misapplication of filters.
#
# Low-pass problems
# -----------------
#
# Filters in general, especially those that are acausal (zero-phase), can make
# activity appear to occur earlier or later than it truly did. As
# mentioned in VanRullen 2011 [3]_, investigations of commonly (at the time)
# used low-pass filters created artifacts when they were applied to smulated
# data. However, such deleterious effects were minimal in many real-world
# examples in Rousselet 2012 [5]_.
#
# Perhaps more revealing, it was noted in Widmann & Schröger 2012 [6]_ that
# the problematic low-pass filters from VanRullen 2011 [3]_:
#
# 1. Used a least-squares design (like :func:`scipy.signal.firls`) that
# included "do-not-care" transition regions, which can lead to
# uncontrolled behavior.
# 2. Had a filter length that was independent of the transition bandwidth,
# which can cause excessive ringing and signal distortion.
#
# .. _tut_filtering_hp_problems:
#
# High-pass problems
# ------------------
#
# When it comes to high-pass filtering, using corner frequencies above 0.1 Hz
# were found in Acunzo *et al.* 2012 [4]_ to:
#
# "...generate a systematic bias easily leading to misinterpretations of
# neural activity.”
#
# In a related paper, Widmann *et al.* 2015 [7]_ also came to suggest a 0.1 Hz
# highpass. And more evidence followed in Tanner *et al.* 2015 [8]_ of such
# distortions. Using data from language ERP studies of semantic and syntactic
# processing (i.e., N400 and P600), using a high-pass above 0.3 Hz caused
# significant effects to be introduced implausibly early when compared to the
# unfiltered data. From this, the authors suggested the optimal high-pass
# value for language processing to be 0.1 Hz.
#
# We can recreate a problematic simulation from Tanner *et al.* 2015 [8]_:
#
# "The simulated component is a single-cycle cosine wave with an amplitude
# of 5µV, onset of 500 ms poststimulus, and duration of 800 ms. The
# simulated component was embedded in 20 s of zero values to avoid
# filtering edge effects... Distortions [were] caused by 2 Hz low-pass and
# high-pass filters... No visible distortion to the original waveform
# [occurred] with 30 Hz low-pass and 0.01 Hz high-pass filters...
# Filter frequencies correspond to the half-amplitude (-6 dB) cutoff
# (12 dB/octave roll-off)."
#
# .. note:: This simulated signal contains energy not just within the
# pass-band, but also within the transition and stop-bands -- perhaps
# most easily understood because the signal has a non-zero DC value,
# but also because it is a shifted cosine that has been
# *windowed* (here multiplied by a rectangular window), which
# makes the cosine and DC frequencies spread to other frequencies
# (multiplication in time is convolution in frequency, so multiplying
# by a rectangular window in the time domain means convolving a sinc
# function with the impulses at DC and the cosine frequency in the
# frequency domain).
#
x = np.zeros(int(2 * sfreq))
t = np.arange(0, len(x)) / sfreq - 0.2
onset = np.where(t >= 0.5)[0][0]
cos_t = np.arange(0, int(sfreq * 0.8)) / sfreq
sig = 2.5 - 2.5 * np.cos(2 * np.pi * (1. / 0.8) * cos_t)
x[onset:onset + len(sig)] = sig
iir_lp_30 = signal.iirfilter(2, 30. / sfreq, btype='lowpass')
iir_hp_p1 = signal.iirfilter(2, 0.1 / sfreq, btype='highpass')
iir_lp_2 = signal.iirfilter(2, 2. / sfreq, btype='lowpass')
iir_hp_2 = signal.iirfilter(2, 2. / sfreq, btype='highpass')
x_lp_30 = signal.filtfilt(iir_lp_30[0], iir_lp_30[1], x, padlen=0)
x_hp_p1 = signal.filtfilt(iir_hp_p1[0], iir_hp_p1[1], x, padlen=0)
x_lp_2 = signal.filtfilt(iir_lp_2[0], iir_lp_2[1], x, padlen=0)
x_hp_2 = signal.filtfilt(iir_hp_2[0], iir_hp_2[1], x, padlen=0)
xlim = t[[0, -1]]
ylim = [-2, 6]
xlabel = 'Time (sec)'
ylabel = r'Amplitude ($\mu$V)'
tticks = [0, 0.5, 1.3, t[-1]]
axes = plt.subplots(2, 2)[1].ravel()
for ax, x_f, title in zip(axes, [x_lp_2, x_lp_30, x_hp_2, x_hp_p1],
['LP$_2$', 'LP$_{30}$', 'HP$_2$', 'LP$_{0.1}$']):
ax.plot(t, x, color='0.5')
ax.plot(t, x_f, color='k', linestyle='--')
ax.set(ylim=ylim, xlim=xlim, xticks=tticks,
title=title, xlabel=xlabel, ylabel=ylabel)
mne.viz.adjust_axes(axes)
mne.viz.tight_layout()
plt.show()
###############################################################################
# Similarly, in a P300 paradigm reported by Kappenman & Luck 2010 [12]_,
# they found that applying a 1 Hz high-pass decreased the probaility of
# finding a significant difference in the N100 response, likely because
# the P300 response was smeared (and inverted) in time by the high-pass
# filter such that it tended to cancel out the increased N100. However,
# they nonetheless note that some high-passing can still be useful to deal
# with drifts in the data.
#
# Even though these papers generally advise a 0.1 HZ or lower frequency for
# a high-pass, it is important to keep in mind (as most authors note) that
# filtering choices should depend on the frequency content of both the
# signal(s) of interest and the noise to be suppressed. For example, in
# some of the MNE-Python examples involving :ref:`ch_sample_data`,
# high-pass values of around 1 Hz are used when looking at auditory
# or visual N100 responses, because we analyze standard (not deviant) trials
# and thus expect that contamination by later or slower components will
# be limited.
#
# Baseline problems (or solutions?)
# ---------------------------------
#
# In an evolving discussion, Tanner *et al.* 2015 [8]_ suggest using baseline
# correction to remove slow drifts in data. However, Maess *et al.* 2016 [9]_
# suggest that baseline correction, which is a form of high-passing, does
# not offer substantial advantages over standard high-pass filtering.
# Tanner *et al.* [10]_ rebutted that baseline correction can correct for
# problems with filtering.
#
# To see what they mean, consider again our old simulated signal ``x`` from
# before:
def baseline_plot(x):
all_axes = plt.subplots(3, 2)[1]
for ri, (axes, freq) in enumerate(zip(all_axes, [0.1, 0.3, 0.5])):
for ci, ax in enumerate(axes):
if ci == 0:
iir_hp = signal.iirfilter(4, freq / sfreq, btype='highpass',
output='sos')
x_hp = sosfiltfilt(iir_hp, x, padlen=0)
else:
x_hp -= x_hp[t < 0].mean()
ax.plot(t, x, color='0.5')
ax.plot(t, x_hp, color='k', linestyle='--')
if ri == 0:
ax.set(title=('No ' if ci == 0 else '') +
'Baseline Correction')
ax.set(xticks=tticks, ylim=ylim, xlim=xlim, xlabel=xlabel)
ax.set_ylabel('%0.1f Hz' % freq, rotation=0,
horizontalalignment='right')
mne.viz.adjust_axes(axes)
mne.viz.tight_layout()
plt.suptitle(title)
plt.show()
baseline_plot(x)
###############################################################################
# In response, Maess *et al.* 2016 [11]_ note that these simulations do not
# address cases of pre-stimulus activity that is shared across conditions, as
# applying baseline correction will effectively copy the topology outside the
# baseline period. We can see this if we give our signal ``x`` with some
# consistent pre-stimulus activity, which makes everything look bad.
#
# .. note:: An important thing to keep in mind with these plots is that they
# are for a single simulated sensor. In multielectrode recordings
# the topology (i.e., spatial pattiern) of the pre-stimulus activity
# will leak into the post-stimulus period. This will likely create a
# spatially varying distortion of the time-domain signals, as the
# averaged pre-stimulus spatial pattern gets subtracted from the
# sensor time courses.
#
# Putting some activity in the baseline period:
n_pre = (t < 0).sum()
sig_pre = 1 - np.cos(2 * np.pi * np.arange(n_pre) / (0.5 * n_pre))
x[:n_pre] += sig_pre
baseline_plot(x)
###############################################################################
# Both groups seem to acknowledge that the choices of filtering cutoffs, and
# perhaps even the application of baseline correction, depend on the
# characteristics of the data being investigated, especially when it comes to:
#
# 1. The frequency content of the underlying evoked activity relative
# to the filtering parameters.
# 2. The validity of the assumption of no consistent evoked activity
# in the baseline period.
#
# We thus recommend carefully applying baseline correction and/or high-pass
# values based on the characteristics of the data to be analyzed.
#
#
# Filtering defaults
# ==================
#
# .. _tut_filtering_in_python:
#
# Defaults in MNE-Python
# ----------------------
#
# Most often, filtering in MNE-Python is done at the :class:`mne.io.Raw` level,
# and thus :func:`mne.io.Raw.filter` is used. This function under the hood
# (among other things) calls :func:`mne.filter.filter_data` to actually
# filter the data, which by default applies a zero-phase FIR filter designed
# using :func:`scipy.signal.firwin`. In Widmann *et al.* 2015 [7]_, they
# suggest a specific set of parameters to use for high-pass filtering,
# including:
#
# "... providing a transition bandwidth of 25% of the lower passband
# edge but, where possible, not lower than 2 Hz and otherwise the
# distance from the passband edge to the critical frequency.”
#
# In practice, this means that for each high-pass value ``l_freq`` or
# low-pass value ``h_freq`` below, you would get this corresponding
# ``l_trans_bandwidth`` or ``h_trans_bandwidth``, respectively,
# if the sample rate were 100 Hz (i.e., Nyquist frequency of 50 Hz):
#
# +------------------+-------------------+-------------------+
# | l_freq or h_freq | l_trans_bandwidth | h_trans_bandwidth |
# +==================+===================+===================+
# | 0.01 | 0.01 | 2.0 |
# +------------------+-------------------+-------------------+
# | 0.1 | 0.1 | 2.0 |
# +------------------+-------------------+-------------------+
# | 1.0 | 1.0 | 2.0 |
# +------------------+-------------------+-------------------+
# | 2.0 | 2.0 | 2.0 |
# +------------------+-------------------+-------------------+
# | 4.0 | 2.0 | 2.0 |
# +------------------+-------------------+-------------------+
# | 8.0 | 2.0 | 2.0 |
# +------------------+-------------------+-------------------+
# | 10.0 | 2.5 | 2.5 |
# +------------------+-------------------+-------------------+
# | 20.0 | 5.0 | 5.0 |
# +------------------+-------------------+-------------------+
# | 40.0 | 10.0 | 10.0 |
# +------------------+-------------------+-------------------+
# | 45.0 | 11.25 | 5.0 |
# +------------------+-------------------+-------------------+
# | 48.0 | 12.0 | 2.0 |
# +------------------+-------------------+-------------------+
#
# MNE-Python has adopted this definition for its high-pass (and low-pass)
# transition bandwidth choices when using ``l_trans_bandwidth='auto'`` and
# ``h_trans_bandwidth='auto'``.
#
# To choose the filter length automatically with ``filter_length='auto'``,
# the reciprocal of the shortest transition bandwidth is used to ensure
# decent attenuation at the stop frequency. Specifically, the reciprocal
# (in samples) is multiplied by 3.1, 3.3, or 5.0 for the Hann, Hamming,
# or Blackman windows, respectively as selected by the ``fir_window``
# argument for ``fir_design='firwin'``, and double these for
# ``fir_design='firwin2'`` mode.
#
# .. note:: For ``fir_design='firwin2'``, the multiplicative factors are
# doubled compared to what is given in Ifeachor and Jervis [2]_
# (p. 357), as :func:`scipy.signal.firwin2` has a smearing effect
# on the frequency response, which we compensate for by
# increasing the filter length. This is why
# ``fir_desgin='firwin'`` is preferred to ``fir_design='firwin2'``.
#
# In 0.14, we default to using a Hamming window in filter design, as it
# provides up to 53 dB of stop-band attenuation with small pass-band ripple.
#
# .. note:: In band-pass applications, often a low-pass filter can operate
# effectively with fewer samples than the high-pass filter, so
# it is advisable to apply the high-pass and low-pass separately
# when using ``fir_design='firwin2'``. For design mode
# ``fir_design='firwin'``, there is no need to separate the
# operations, as the lowpass and highpass elements are constructed
# separately to meet the transition band requirements.
#
# For more information on how to use the
# MNE-Python filtering functions with real data, consult the preprocessing
# tutorial on
# :ref:`sphx_glr_auto_tutorials_plot_artifacts_correction_filtering.py`.
#
# Defaults in MNE-C
# -----------------
# MNE-C by default uses:
#
# 1. 5 Hz transition band for low-pass filters.
# 2. 3-sample transition band for high-pass filters.
# 3. Filter length of 8197 samples.
#
# The filter is designed in the frequency domain, creating a linear-phase
# filter such that the delay is compensated for as is done with the MNE-Python
# ``phase='zero'`` filtering option.
#
# Squared-cosine ramps are used in the transition regions. Because these
# are used in place of more gradual (e.g., linear) transitions,
# a given transition width will result in more temporal ringing but also more
# rapid attenuation than the same transition width in windowed FIR designs.
#
# The default filter length will generally have excellent attenuation
# but long ringing for the sample rates typically encountered in M-EEG data
# (e.g. 500-2000 Hz).
#
# Defaults in other software
# --------------------------
# A good but possibly outdated comparison of filtering in various software
# packages is available in [7]_. Briefly:
#
# * EEGLAB
# MNE-Python in 0.14 defaults to behavior very similar to that of EEGLAB,
# see the `EEGLAB filtering FAQ`_ for more information.
# * Fieldrip
# By default FieldTrip applies a forward-backward Butterworth IIR filter
# of order 4 (band-pass and band-stop filters) or 2 (for low-pass and
# high-pass filters). Similar filters can be achieved in MNE-Python when
# filtering with :meth:`raw.filter(..., method='iir') <mne.io.Raw.filter>`
# (see also :func:`mne.filter.construct_iir_filter` for options).
# For more information, see e.g. `FieldTrip band-pass documentation`_.
#
# Reporting Filters
# =================
# On page 45 in Widmann *et al.* [7]_, there is a convenient list of important
# filter parameters that should be reported with each publication:
#
# 1. filtertype (high-pass, low-pass, band-pass, band-stop, FIR, IIR)
# 2. cutoff frequency (including definition)
# 3. filter order (or length)
# 4. roll-off or transition bandwidth
# 5. passband ripple and stopband attenuation
# 6. filter delay (zero-phase, linear-phase, non-linear phase) and causality
# 7. direction of computation (one-pass forward/reverse,or two-pass forward and
# reverse)
#
# In the following, we will address how to deal with these parameters in MNE:
#
#
# Filter type
# -----------
# Depending on the function or method used, the filter type can be specified.
# To name an example. in :func:`mne.filter.create_filter`, the relevant
# arguments would be `l_freq`, `h_freg`, `method`, and if the method is FIR:
# `fir_window`, and `fir_design`.
#
#
# Cutoff frequency
# ----------------
# The cutoff of FIR filters in MNE is defined as half-amplitude cutoff in the
# middle of the transition band. That is, if you construct a lowpass FIR filter
# with ``h_freq = 40.``, the filter function will provide a transition
# bandwidth that depens on the `h_trans_bandwidth` argument. The desired
# half-amplitude cutoff of the lowpass FIR filter is then at:
# ``h_freq + transition_bandwidth/2.``.
#
# Filter length (order) and transition bandwidth (roll-off)
# ---------------------------------------------------------
# In the :ref:`tut_filtering_in_python` section, we have already talked about
# the default filter lengths and transition bandwidths that are used when no
# custom values are specified using the respective filter function's arguments.
#
# If you want to find out about the filter length and transition bandwidth that
# were used through the 'auto' setting, you can use
# :func:`mne.filter.create_filter` to print out the settings once more:
# Use the same settings as when calling e.g., `raw.filter()`
fir_coefs = mne.filter.create_filter(data=None, # Data is only used for sanity checking, not strictly needed # noqa
sfreq=1000., # sfreq of your data in Hz
l_freq=None,
h_freq=40., # assuming a lowpass of 40 Hz
method='fir',
fir_window='hamming',
fir_design='firwin',
verbose=True)
# See the printed log for the transition bandwidth and filter length
# Alternatively, get the filter length through:
filter_length = fir_coefs.shape[0]
###############################################################################
# .. note:: If you are using an IIR filter, :func:`mne.filter.create_filter`
# will not print a filter length and transition bandwidth to the log.
# Instead, you can specify the roll off with the `iir_params`
# argument or stay with the default, which is a 4th order
# (Butterworth) filter.
#
# Passband ripple and stopband attenuation
# ----------------------------------------
#
# When use standard :func:`scipy.signal.firwin` design (as for FIR filters in
# MNE), the passband ripple and stopband attenuation are dependent upon the
# window used in design. For standard windows the values are listed in this
# table (see Ifeachor & Jervis, p. 357 [3]_):
#
# +-------------------------+-----------------+----------------------+
# | Name of window function | Passband ripple | Stopband attenuation |
# +=========================+=================+======================+
# | Hann | 0.0545 dB | 44 dB |
# +-------------------------+-----------------+----------------------+
# | Hamming | 0.0194 dB | 53 dB |
# +-------------------------+-----------------+----------------------+
# | Blackman | 0.0017 dB | 74 dB |
# +-------------------------+-----------------+----------------------+
#
#
# Filter delay and direction of computation
# -----------------------------------------
# For reporting this information, it might be sufficient to read the docstring
# of the filter function or method that you apply. For example in the
# docstring of `mne.filter.create_filter`, for the phase parameter it says:
#
# Phase of the filter, only used if ``method='fir'``.
# By default, a symmetric linear-phase FIR filter is constructed.
# If ``phase='zero'`` (default), the delay of this filter
# is compensated for. If ``phase=='zero-double'``, then this filter
# is applied twice, once forward, and once backward. If 'minimum',
# then a minimum-phase, causal filter will be used.
#
#
# Summary
# =======
#
# When filtering, there are always tradeoffs that should be considered.
# One important tradeoff is between time-domain characteristics (like ringing)
# and frequency-domain attenuation characteristics (like effective transition
# bandwidth). Filters with sharp frequency cutoffs can produce outputs that
# ring for a long time when they operate on signals with frequency content
# in the transition band. In general, therefore, the wider a transition band
# that can be tolerated, the better behaved the filter will be in the time
# domain.
#
# References
# ==========
#
# .. [1] Parks TW, Burrus CS (1987). Digital Filter Design.
# New York: Wiley-Interscience.
# .. [2] Ifeachor, E. C., & Jervis, B. W. (2002). Digital Signal Processing:
# A Practical Approach. Prentice Hall.
# .. [3] Vanrullen, R. (2011). Four common conceptual fallacies in mapping
# the time course of recognition. Perception Science, 2, 365.
# .. [4] Acunzo, D. J., MacKenzie, G., & van Rossum, M. C. W. (2012).
# Systematic biases in early ERP and ERF components as a result
# of high-pass filtering. Journal of Neuroscience Methods,
# 209(1), 212–218. https://doi.org/10.1016/j.jneumeth.2012.06.011
# .. [5] Rousselet, G. A. (2012). Does filtering preclude us from studying
# ERP time-courses? Frontiers in Psychology, 3(131)
# .. [6] Widmann, A., & Schröger, E. (2012). Filter effects and filter
# artifacts in the analysis of electrophysiological data.
# Perception Science, 233.
# .. [7] Widmann, A., Schröger, E., & Maess, B. (2015). Digital filter
# design for electrophysiological data – a practical approach.
# Journal of Neuroscience Methods, 250, 34–46.
# https://doi.org/10.1016/j.jneumeth.2014.08.002
# .. [8] Tanner, D., Morgan-Short, K., & Luck, S. J. (2015).
# How inappropriate high-pass filters can produce artifactual effects
# and incorrect conclusions in ERP studies of language and cognition.
# Psychophysiology, 52(8), 997–1009. https://doi.org/10.1111/psyp.12437
# .. [9] Maess, B., Schröger, E., & Widmann, A. (2016).
# High-pass filters and baseline correction in M/EEG analysis.
# Commentary on: “How inappropriate high-pass filters can produce
# artifacts and incorrect conclusions in ERP studies of language
# and cognition.” Journal of Neuroscience Methods, 266, 164–165.
# .. [10] Tanner, D., Norton, J. J. S., Morgan-Short, K., & Luck, S. J. (2016).
# On high-pass filter artifacts (they’re real) and baseline correction
# (it’s a good idea) in ERP/ERMF analysis.
# .. [11] Maess, B., Schröger, E., & Widmann, A. (2016).
# High-pass filters and baseline correction in M/EEG analysis-continued
# discussion. Journal of Neuroscience Methods, 266, 171–172.
# Journal of Neuroscience Methods, 266, 166–170.
# .. [12] Kappenman E. & Luck, S. (2010). The effects of impedance on data
# quality and statistical significance in ERP recordings.
# Psychophysiology, 47, 888-904.
#
# .. _FIR: https://en.wikipedia.org/wiki/Finite_impulse_response
# .. _IIR: https://en.wikipedia.org/wiki/Infinite_impulse_response
# .. _sinc: https://en.wikipedia.org/wiki/Sinc_function
# .. _moving average: https://en.wikipedia.org/wiki/Moving_average
# .. _autoregression: https://en.wikipedia.org/wiki/Autoregressive_model
# .. _Remez: https://en.wikipedia.org/wiki/Remez_algorithm
# .. _matlab firpm: http://www.mathworks.com/help/signal/ref/firpm.html
# .. _matlab fir2: http://www.mathworks.com/help/signal/ref/fir2.html
# .. _matlab firls: http://www.mathworks.com/help/signal/ref/firls.html
# .. _Butterworth filter: https://en.wikipedia.org/wiki/Butterworth_filter
# .. _eeglab filtering faq: https://sccn.ucsd.edu/wiki/Firfilt_FAQ
# .. _fieldtrip band-pass documentation: http://www.fieldtriptoolbox.org/reference/ft_preproc_bandpassfilter # noqa
| bsd-3-clause |
zorojean/scikit-learn | examples/plot_multioutput_face_completion.py | 330 | 3019 | """
==============================================
Face completion with a multi-output estimators
==============================================
This example shows the use of multi-output estimator to complete images.
The goal is to predict the lower half of a face given its upper half.
The first column of images shows true faces. The next columns illustrate
how extremely randomized trees, k nearest neighbors, linear
regression and ridge regression complete the lower half of those faces.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_olivetti_faces
from sklearn.utils.validation import check_random_state
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.neighbors import KNeighborsRegressor
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import RidgeCV
# Load the faces datasets
data = fetch_olivetti_faces()
targets = data.target
data = data.images.reshape((len(data.images), -1))
train = data[targets < 30]
test = data[targets >= 30] # Test on independent people
# Test on a subset of people
n_faces = 5
rng = check_random_state(4)
face_ids = rng.randint(test.shape[0], size=(n_faces, ))
test = test[face_ids, :]
n_pixels = data.shape[1]
X_train = train[:, :np.ceil(0.5 * n_pixels)] # Upper half of the faces
y_train = train[:, np.floor(0.5 * n_pixels):] # Lower half of the faces
X_test = test[:, :np.ceil(0.5 * n_pixels)]
y_test = test[:, np.floor(0.5 * n_pixels):]
# Fit estimators
ESTIMATORS = {
"Extra trees": ExtraTreesRegressor(n_estimators=10, max_features=32,
random_state=0),
"K-nn": KNeighborsRegressor(),
"Linear regression": LinearRegression(),
"Ridge": RidgeCV(),
}
y_test_predict = dict()
for name, estimator in ESTIMATORS.items():
estimator.fit(X_train, y_train)
y_test_predict[name] = estimator.predict(X_test)
# Plot the completed faces
image_shape = (64, 64)
n_cols = 1 + len(ESTIMATORS)
plt.figure(figsize=(2. * n_cols, 2.26 * n_faces))
plt.suptitle("Face completion with multi-output estimators", size=16)
for i in range(n_faces):
true_face = np.hstack((X_test[i], y_test[i]))
if i:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 1)
else:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 1,
title="true faces")
sub.axis("off")
sub.imshow(true_face.reshape(image_shape),
cmap=plt.cm.gray,
interpolation="nearest")
for j, est in enumerate(sorted(ESTIMATORS)):
completed_face = np.hstack((X_test[i], y_test_predict[est][i]))
if i:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 2 + j)
else:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 2 + j,
title=est)
sub.axis("off")
sub.imshow(completed_face.reshape(image_shape),
cmap=plt.cm.gray,
interpolation="nearest")
plt.show()
| bsd-3-clause |
BiRG/Omics-Dashboard | omics/omics_dashboard/blueprints/api/collections.py | 1 | 9001 | import base64
import json
import os
import uuid
from flask import request, jsonify, make_response, send_from_directory, Blueprint
from flask_login import login_required
from werkzeug.utils import secure_filename
import data_tools as dt
from data_tools.file_tools.collection_tools import validate_update
from config.config import DATADIR, UPLOADDIR
from helpers import get_current_user, handle_exception, process_input_dict
collections_api = Blueprint('collections_api', __name__, url_prefix='/api/collections')
@collections_api.route('/', methods=['GET', 'POST'])
@login_required
def list_collections():
try:
current_user = get_current_user()
if request.method == 'GET':
return jsonify([collection.to_dict() for collection in dt.collections.get_collections(current_user)])
if request.method == 'POST':
data = request.get_json(force=True)
if 'sample_ids' in data:
samples = [dt.samples.get_sample(current_user, sample_id) for sample_id in data['sample_ids']]
del data['sample_ids']
else:
samples = []
return jsonify(
dt.collections.create_collection(current_user, samples, data).to_dict())
except Exception as e:
return handle_exception(e)
@collections_api.route('/<collection_id>', methods=['GET', 'POST', 'PATCH', 'DELETE'])
@login_required
def get_collection(collection_id=None):
try:
user = get_current_user()
collection = dt.collections.get_collection(user, collection_id)
if request.method == 'GET':
return jsonify({**collection.to_dict(),
'is_write_permitted': dt.users.is_write_permitted(user, collection)})
if request.method == 'DELETE':
return jsonify(dt.collections.delete_collection(user, collection))
if request.content_type == 'application/json':
new_data = process_input_dict(request.get_json(force=True))
else:
new_data = process_input_dict(request.form.to_dict())
if request.method == 'POST':
if 'file' in request.files or 'file' in new_data:
filename = os.path.join(UPLOADDIR, secure_filename(str(uuid.uuid4())))
if 'file' in request.files:
if request.files['file'].filename == '':
raise ValueError('No file uploaded')
request.files['file'].save(filename)
else:
with open(filename, 'wb') as file:
collection_file_data = base64.b64decode(bytes(new_data['file'], 'utf-8'))
file.write(collection_file_data)
del new_data['file']
if dt.util.validate_file(filename):
collection = dt.collections.update_collection(user, collection, new_data, filename)
return jsonify(collection.to_dict())
return jsonify(dt.collections.update_collection(user, collection, new_data).to_dict())
if request.method == 'PATCH':
# We can have requests to change values in arrays here contents of request will be {path, i, j, new_value}
# or list thereof (POST should be used to update entire arrays).
if not isinstance(new_data, list):
new_data = [new_data]
# improperly formatted patch requests will throw error before anything changed
for patch_data in new_data:
validate_update(collection.filename, patch_data['path'], patch_data['i'],
patch_data['j'] if 'j' in patch_data else None, patch_data['new_value'])
message = ''
for patch_data in new_data:
dt.collections.update_collection_array(user, collection,
patch_data['path'],
patch_data['i'],
patch_data['j'] if 'j' in patch_data else None,
patch_data['new_value'])
message += (f'Changed value of {patch_data["path"]}[{patch_data["i"]}, '
f'{patch_data["j"] if "j" in patch_data else ""}] to {patch_data["new_value"]}\n')
message += f'In collection {collection.id}'
return jsonify({'message': message})
except Exception as e:
return handle_exception(e)
@collections_api.route('/download/<collection_id>', methods=['GET'])
@login_required
def download_collection(collection_id=None):
try:
user = get_current_user()
collection = dt.collections.get_collection(user, collection_id)
if request.args.get('format', '') == 'pandas':
single_column = request.args.get('single_column', '') == 'true'
data_format = request.args.get('data_format') if 'data_format' in request.args else 'csv'
if data_format not in {'json', 'csv'}:
raise ValueError(f'Improper data format {data_format}')
json_orient = request.args.get('orient') if 'orient' in request.args else 'records'
out = dt.collections.download_collection_dataframe(user, collection, single_column, data_format, json_orient)
as_attachment = request.args.get('as_attachment') if 'as_attachment' in request.args else 'true'
if as_attachment == 'false':
response = jsonify({'data_frame': out[data_format]})
else:
if data_format == 'json':
out['json'] = json.dumps(out['json'])
response = make_response(out[data_format])
response.headers['Content-Disposition'] = out['cd']
response.mimetype = f'text/{data_format}'
return response
if request.args.get('path', ''):
path = request.args.get('path', '')
out = dt.collections.download_collection_dataset(user, collection, path)
response = make_response(out['csv'])
response.headers['Content-Disposition'] = out['cd']
response.mimetype = 'text/csv'
return response
out = dt.collections.download_collection(user, collection)
return send_from_directory(f'{DATADIR}/collections', out['filename'], as_attachment=True)
except Exception as e:
return handle_exception(e)
@collections_api.route('/upload', methods=['POST'])
@login_required
def upload_collection():
try:
user = get_current_user()
# for request from MATLAB client that doesn't support multipart/form-data
# file is base64 encoded.
new_data = {}
try:
new_data.update(process_input_dict(request.get_json()))
except:
new_data.update(process_input_dict(request.form))
if 'file' not in new_data and 'file' not in request.files:
raise ValueError('No file uploaded')
filename = os.path.join(UPLOADDIR, secure_filename(str(uuid.uuid4())))
if 'file' in request.files:
if request.files['file'].filename == '':
raise ValueError('No file uploaded')
request.files['file'].save(filename)
else:
with open(filename, 'wb') as file:
collection_file_data = base64.b64decode(bytes(new_data['file'], 'utf-8'))
file.write(collection_file_data)
del new_data['file']
if dt.util.validate_file(filename):
collection = dt.collections.upload_collection(user, filename, new_data)
return jsonify(collection.to_dict())
raise ValueError('invalid content type')
except Exception as e:
return handle_exception(e)
@collections_api.route('/copy/<collection_id>', methods=['GET'])
@login_required
def copy_collection(collection_id):
"""
This only takes POST because it does create a record, but it doesn't take any body, so it could also be GET
"""
try:
current_user = get_current_user()
return jsonify(dt.collections.copy_collection(current_user, dt.collections.get_collection(current_user, collection_id)).to_dict())
except Exception as e:
return handle_exception(e)
@collections_api.route('/merge', methods=['POST'])
@login_required
def merge_collections():
try:
current_user = get_current_user()
# new collection keeps attributes of first collection in list
new_data = request.get_json(force=True)
collections = [dt.collections.get_collection(current_user, collection_id) for collection_id in new_data['collection_ids']]
del new_data['collection_ids']
new_collection = dt.collections.merge_collections(current_user, collections, new_data)
return jsonify(new_collection.to_dict())
except Exception as e:
return handle_exception(e)
| mit |
chrisburr/scikit-learn | sklearn/neighbors/tests/test_kd_tree.py | 159 | 7852 | import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.neighbors.kd_tree import (KDTree, NeighborsHeap,
simultaneous_sort, kernel_norm,
nodeheap_sort, DTYPE, ITYPE)
from sklearn.neighbors.dist_metrics import DistanceMetric
from sklearn.utils.testing import SkipTest, assert_allclose
V = np.random.random((3, 3))
V = np.dot(V, V.T)
DIMENSION = 3
METRICS = {'euclidean': {},
'manhattan': {},
'chebyshev': {},
'minkowski': dict(p=3)}
def brute_force_neighbors(X, Y, k, metric, **kwargs):
D = DistanceMetric.get_metric(metric, **kwargs).pairwise(Y, X)
ind = np.argsort(D, axis=1)[:, :k]
dist = D[np.arange(Y.shape[0])[:, None], ind]
return dist, ind
def test_kd_tree_query():
np.random.seed(0)
X = np.random.random((40, DIMENSION))
Y = np.random.random((10, DIMENSION))
def check_neighbors(dualtree, breadth_first, k, metric, kwargs):
kdt = KDTree(X, leaf_size=1, metric=metric, **kwargs)
dist1, ind1 = kdt.query(Y, k, dualtree=dualtree,
breadth_first=breadth_first)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric, **kwargs)
# don't check indices here: if there are any duplicate distances,
# the indices may not match. Distances should not have this problem.
assert_array_almost_equal(dist1, dist2)
for (metric, kwargs) in METRICS.items():
for k in (1, 3, 5):
for dualtree in (True, False):
for breadth_first in (True, False):
yield (check_neighbors,
dualtree, breadth_first,
k, metric, kwargs)
def test_kd_tree_query_radius(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
kdt = KDTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind = kdt.query_radius([query_pt], r + eps)[0]
i = np.where(rad <= r + eps)[0]
ind.sort()
i.sort()
assert_array_almost_equal(i, ind)
def test_kd_tree_query_radius_distance(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
kdt = KDTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind, dist = kdt.query_radius([query_pt], r + eps, return_distance=True)
ind = ind[0]
dist = dist[0]
d = np.sqrt(((query_pt - X[ind]) ** 2).sum(1))
assert_array_almost_equal(d, dist)
def compute_kernel_slow(Y, X, kernel, h):
d = np.sqrt(((Y[:, None, :] - X) ** 2).sum(-1))
norm = kernel_norm(h, X.shape[1], kernel)
if kernel == 'gaussian':
return norm * np.exp(-0.5 * (d * d) / (h * h)).sum(-1)
elif kernel == 'tophat':
return norm * (d < h).sum(-1)
elif kernel == 'epanechnikov':
return norm * ((1.0 - (d * d) / (h * h)) * (d < h)).sum(-1)
elif kernel == 'exponential':
return norm * (np.exp(-d / h)).sum(-1)
elif kernel == 'linear':
return norm * ((1 - d / h) * (d < h)).sum(-1)
elif kernel == 'cosine':
return norm * (np.cos(0.5 * np.pi * d / h) * (d < h)).sum(-1)
else:
raise ValueError('kernel not recognized')
def test_kd_tree_kde(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
kdt = KDTree(X, leaf_size=10)
for kernel in ['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']:
for h in [0.01, 0.1, 1]:
dens_true = compute_kernel_slow(Y, X, kernel, h)
def check_results(kernel, h, atol, rtol, breadth_first):
dens = kdt.kernel_density(Y, h, atol=atol, rtol=rtol,
kernel=kernel,
breadth_first=breadth_first)
assert_allclose(dens, dens_true, atol=atol,
rtol=max(rtol, 1e-7))
for rtol in [0, 1E-5]:
for atol in [1E-6, 1E-2]:
for breadth_first in (True, False):
yield (check_results, kernel, h, atol, rtol,
breadth_first)
def test_gaussian_kde(n_samples=1000):
# Compare gaussian KDE results to scipy.stats.gaussian_kde
from scipy.stats import gaussian_kde
np.random.seed(0)
x_in = np.random.normal(0, 1, n_samples)
x_out = np.linspace(-5, 5, 30)
for h in [0.01, 0.1, 1]:
kdt = KDTree(x_in[:, None])
try:
gkde = gaussian_kde(x_in, bw_method=h / np.std(x_in))
except TypeError:
raise SkipTest("Old scipy, does not accept explicit bandwidth.")
dens_kdt = kdt.kernel_density(x_out[:, None], h) / n_samples
dens_gkde = gkde.evaluate(x_out)
assert_array_almost_equal(dens_kdt, dens_gkde, decimal=3)
def test_kd_tree_two_point(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
r = np.linspace(0, 1, 10)
kdt = KDTree(X, leaf_size=10)
D = DistanceMetric.get_metric("euclidean").pairwise(Y, X)
counts_true = [(D <= ri).sum() for ri in r]
def check_two_point(r, dualtree):
counts = kdt.two_point_correlation(Y, r=r, dualtree=dualtree)
assert_array_almost_equal(counts, counts_true)
for dualtree in (True, False):
yield check_two_point, r, dualtree
def test_kd_tree_pickle():
import pickle
np.random.seed(0)
X = np.random.random((10, 3))
kdt1 = KDTree(X, leaf_size=1)
ind1, dist1 = kdt1.query(X)
def check_pickle_protocol(protocol):
s = pickle.dumps(kdt1, protocol=protocol)
kdt2 = pickle.loads(s)
ind2, dist2 = kdt2.query(X)
assert_array_almost_equal(ind1, ind2)
assert_array_almost_equal(dist1, dist2)
for protocol in (0, 1, 2):
yield check_pickle_protocol, protocol
def test_neighbors_heap(n_pts=5, n_nbrs=10):
heap = NeighborsHeap(n_pts, n_nbrs)
for row in range(n_pts):
d_in = np.random.random(2 * n_nbrs).astype(DTYPE)
i_in = np.arange(2 * n_nbrs, dtype=ITYPE)
for d, i in zip(d_in, i_in):
heap.push(row, d, i)
ind = np.argsort(d_in)
d_in = d_in[ind]
i_in = i_in[ind]
d_heap, i_heap = heap.get_arrays(sort=True)
assert_array_almost_equal(d_in[:n_nbrs], d_heap[row])
assert_array_almost_equal(i_in[:n_nbrs], i_heap[row])
def test_node_heap(n_nodes=50):
vals = np.random.random(n_nodes).astype(DTYPE)
i1 = np.argsort(vals)
vals2, i2 = nodeheap_sort(vals)
assert_array_almost_equal(i1, i2)
assert_array_almost_equal(vals[i1], vals2)
def test_simultaneous_sort(n_rows=10, n_pts=201):
dist = np.random.random((n_rows, n_pts)).astype(DTYPE)
ind = (np.arange(n_pts) + np.zeros((n_rows, 1))).astype(ITYPE)
dist2 = dist.copy()
ind2 = ind.copy()
# simultaneous sort rows using function
simultaneous_sort(dist, ind)
# simultaneous sort rows using numpy
i = np.argsort(dist2, axis=1)
row_ind = np.arange(n_rows)[:, None]
dist2 = dist2[row_ind, i]
ind2 = ind2[row_ind, i]
assert_array_almost_equal(dist, dist2)
assert_array_almost_equal(ind, ind2)
| bsd-3-clause |
phdowling/scikit-learn | examples/svm/plot_svm_margin.py | 318 | 2328 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
SVM Margins Example
=========================================================
The plots below illustrate the effect the parameter `C` has
on the separation line. A large value of `C` basically tells
our model that we do not have that much faith in our data's
distribution, and will only consider points close to line
of separation.
A small value of `C` includes more/all the observations, allowing
the margins to be calculated using all the data in the area.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
# we create 40 separable points
np.random.seed(0)
X = np.r_[np.random.randn(20, 2) - [2, 2], np.random.randn(20, 2) + [2, 2]]
Y = [0] * 20 + [1] * 20
# figure number
fignum = 1
# fit the model
for name, penalty in (('unreg', 1), ('reg', 0.05)):
clf = svm.SVC(kernel='linear', C=penalty)
clf.fit(X, Y)
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(-5, 5)
yy = a * xx - (clf.intercept_[0]) / w[1]
# plot the parallels to the separating hyperplane that pass through the
# support vectors
margin = 1 / np.sqrt(np.sum(clf.coef_ ** 2))
yy_down = yy + a * margin
yy_up = yy - a * margin
# plot the line, the points, and the nearest vectors to the plane
plt.figure(fignum, figsize=(4, 3))
plt.clf()
plt.plot(xx, yy, 'k-')
plt.plot(xx, yy_down, 'k--')
plt.plot(xx, yy_up, 'k--')
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1], s=80,
facecolors='none', zorder=10)
plt.scatter(X[:, 0], X[:, 1], c=Y, zorder=10, cmap=plt.cm.Paired)
plt.axis('tight')
x_min = -4.8
x_max = 4.2
y_min = -6
y_max = 6
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.predict(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
plt.figure(fignum, figsize=(4, 3))
plt.pcolormesh(XX, YY, Z, cmap=plt.cm.Paired)
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
fignum = fignum + 1
plt.show()
| bsd-3-clause |
gtoonstra/airflow | tests/hooks/test_hive_hook.py | 13 | 19529 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import datetime
import itertools
import os
import random
import unittest
from collections import OrderedDict
import mock
import pandas as pd
from hmsclient import HMSClient
from airflow import DAG, configuration
from airflow.exceptions import AirflowException
from airflow.hooks.hive_hooks import HiveCliHook, HiveMetastoreHook, HiveServer2Hook
from airflow.operators.hive_operator import HiveOperator
from airflow.utils import timezone
from airflow.utils.operator_helpers import AIRFLOW_VAR_NAME_FORMAT_MAPPING
from airflow.utils.tests import assertEqualIgnoreMultipleSpaces
configuration.load_test_config()
DEFAULT_DATE = timezone.datetime(2015, 1, 1)
DEFAULT_DATE_ISO = DEFAULT_DATE.isoformat()
DEFAULT_DATE_DS = DEFAULT_DATE_ISO[:10]
class HiveEnvironmentTest(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
args = {'owner': 'airflow', 'start_date': DEFAULT_DATE}
self.dag = DAG('test_dag_id', default_args=args)
self.next_day = (DEFAULT_DATE +
datetime.timedelta(days=1)).isoformat()[:10]
self.database = 'airflow'
self.partition_by = 'ds'
self.table = 'static_babynames_partitioned'
self.hql = """
CREATE DATABASE IF NOT EXISTS {{ params.database }};
USE {{ params.database }};
DROP TABLE IF EXISTS {{ params.table }};
CREATE TABLE IF NOT EXISTS {{ params.table }} (
state string,
year string,
name string,
gender string,
num int)
PARTITIONED BY ({{ params.partition_by }} string);
ALTER TABLE {{ params.table }}
ADD PARTITION({{ params.partition_by }}='{{ ds }}');
"""
self.hook = HiveMetastoreHook()
t = HiveOperator(
task_id='HiveHook_' + str(random.randint(1, 10000)),
params={
'database': self.database,
'table': self.table,
'partition_by': self.partition_by
},
hive_cli_conn_id='beeline_default',
hql=self.hql, dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
ignore_ti_state=True)
def tearDown(self):
hook = HiveMetastoreHook()
with hook.get_conn() as metastore:
metastore.drop_table(self.database, self.table, deleteData=True)
class TestHiveCliHook(unittest.TestCase):
def test_run_cli(self):
hook = HiveCliHook()
hook.run_cli("SHOW DATABASES")
def test_run_cli_with_hive_conf(self):
hql = "set key;\n" \
"set airflow.ctx.dag_id;\nset airflow.ctx.dag_run_id;\n" \
"set airflow.ctx.task_id;\nset airflow.ctx.execution_date;\n"
dag_id_ctx_var_name = \
AIRFLOW_VAR_NAME_FORMAT_MAPPING['AIRFLOW_CONTEXT_DAG_ID']['env_var_format']
task_id_ctx_var_name = \
AIRFLOW_VAR_NAME_FORMAT_MAPPING['AIRFLOW_CONTEXT_TASK_ID']['env_var_format']
execution_date_ctx_var_name = \
AIRFLOW_VAR_NAME_FORMAT_MAPPING['AIRFLOW_CONTEXT_EXECUTION_DATE'][
'env_var_format']
dag_run_id_ctx_var_name = \
AIRFLOW_VAR_NAME_FORMAT_MAPPING['AIRFLOW_CONTEXT_DAG_RUN_ID'][
'env_var_format']
os.environ[dag_id_ctx_var_name] = 'test_dag_id'
os.environ[task_id_ctx_var_name] = 'test_task_id'
os.environ[execution_date_ctx_var_name] = 'test_execution_date'
os.environ[dag_run_id_ctx_var_name] = 'test_dag_run_id'
hook = HiveCliHook()
output = hook.run_cli(hql=hql, hive_conf={'key': 'value'})
self.assertIn('value', output)
self.assertIn('test_dag_id', output)
self.assertIn('test_task_id', output)
self.assertIn('test_execution_date', output)
self.assertIn('test_dag_run_id', output)
del os.environ[dag_id_ctx_var_name]
del os.environ[task_id_ctx_var_name]
del os.environ[execution_date_ctx_var_name]
del os.environ[dag_run_id_ctx_var_name]
@mock.patch('airflow.hooks.hive_hooks.HiveCliHook.run_cli')
def test_load_file(self, mock_run_cli):
filepath = "/path/to/input/file"
table = "output_table"
hook = HiveCliHook()
hook.load_file(filepath=filepath, table=table, create=False)
query = (
"LOAD DATA LOCAL INPATH '{filepath}' "
"OVERWRITE INTO TABLE {table} \n"
.format(filepath=filepath, table=table)
)
mock_run_cli.assert_called_with(query)
@mock.patch('airflow.hooks.hive_hooks.HiveCliHook.load_file')
@mock.patch('pandas.DataFrame.to_csv')
def test_load_df(self, mock_to_csv, mock_load_file):
df = pd.DataFrame({"c": ["foo", "bar", "baz"]})
table = "t"
delimiter = ","
encoding = "utf-8"
hook = HiveCliHook()
hook.load_df(df=df,
table=table,
delimiter=delimiter,
encoding=encoding)
mock_to_csv.assert_called_once()
kwargs = mock_to_csv.call_args[1]
self.assertEqual(kwargs["header"], False)
self.assertEqual(kwargs["index"], False)
self.assertEqual(kwargs["sep"], delimiter)
mock_load_file.assert_called_once()
kwargs = mock_load_file.call_args[1]
self.assertEqual(kwargs["delimiter"], delimiter)
self.assertEqual(kwargs["field_dict"], {"c": u"STRING"})
self.assertTrue(isinstance(kwargs["field_dict"], OrderedDict))
self.assertEqual(kwargs["table"], table)
@mock.patch('airflow.hooks.hive_hooks.HiveCliHook.load_file')
@mock.patch('pandas.DataFrame.to_csv')
def test_load_df_with_optional_parameters(self, mock_to_csv, mock_load_file):
hook = HiveCliHook()
b = (True, False)
for create, recreate in itertools.product(b, b):
mock_load_file.reset_mock()
hook.load_df(df=pd.DataFrame({"c": range(0, 10)}),
table="t",
create=create,
recreate=recreate)
mock_load_file.assert_called_once()
kwargs = mock_load_file.call_args[1]
self.assertEqual(kwargs["create"], create)
self.assertEqual(kwargs["recreate"], recreate)
@mock.patch('airflow.hooks.hive_hooks.HiveCliHook.run_cli')
def test_load_df_with_data_types(self, mock_run_cli):
d = OrderedDict()
d['b'] = [True]
d['i'] = [-1]
d['t'] = [1]
d['f'] = [0.0]
d['c'] = ['c']
d['M'] = [datetime.datetime(2018, 1, 1)]
d['O'] = [object()]
d['S'] = ['STRING'.encode('utf-8')]
d['U'] = ['STRING']
d['V'] = [None]
df = pd.DataFrame(d)
hook = HiveCliHook()
hook.load_df(df, 't')
query = """
CREATE TABLE IF NOT EXISTS t (
b BOOLEAN,
i BIGINT,
t BIGINT,
f DOUBLE,
c STRING,
M TIMESTAMP,
O STRING,
S STRING,
U STRING,
V STRING)
ROW FORMAT DELIMITED
FIELDS TERMINATED BY ','
STORED AS textfile
;
"""
assertEqualIgnoreMultipleSpaces(self, mock_run_cli.call_args_list[0][0][0], query)
class TestHiveMetastoreHook(HiveEnvironmentTest):
VALID_FILTER_MAP = {'key2': 'value2'}
def test_get_max_partition_from_empty_part_specs(self):
max_partition = \
HiveMetastoreHook._get_max_partition_from_part_specs([],
'key1',
self.VALID_FILTER_MAP)
self.assertIsNone(max_partition)
def test_get_max_partition_from_valid_part_specs_and_invalid_filter_map(self):
with self.assertRaises(AirflowException):
HiveMetastoreHook._get_max_partition_from_part_specs(
[{'key1': 'value1', 'key2': 'value2'},
{'key1': 'value3', 'key2': 'value4'}],
'key1',
{'key3': 'value5'})
def test_get_max_partition_from_valid_part_specs_and_invalid_partition_key(self):
with self.assertRaises(AirflowException):
HiveMetastoreHook._get_max_partition_from_part_specs(
[{'key1': 'value1', 'key2': 'value2'},
{'key1': 'value3', 'key2': 'value4'}],
'key3',
self.VALID_FILTER_MAP)
def test_get_max_partition_from_valid_part_specs_and_none_partition_key(self):
with self.assertRaises(AirflowException):
HiveMetastoreHook._get_max_partition_from_part_specs(
[{'key1': 'value1', 'key2': 'value2'},
{'key1': 'value3', 'key2': 'value4'}],
None,
self.VALID_FILTER_MAP)
def test_get_max_partition_from_valid_part_specs_and_none_filter_map(self):
max_partition = \
HiveMetastoreHook._get_max_partition_from_part_specs(
[{'key1': 'value1', 'key2': 'value2'},
{'key1': 'value3', 'key2': 'value4'}],
'key1',
None)
# No partition will be filtered out.
self.assertEqual(max_partition, b'value3')
def test_get_max_partition_from_valid_part_specs(self):
max_partition = \
HiveMetastoreHook._get_max_partition_from_part_specs(
[{'key1': 'value1', 'key2': 'value2'},
{'key1': 'value3', 'key2': 'value4'}],
'key1',
self.VALID_FILTER_MAP)
self.assertEqual(max_partition, b'value1')
def test_get_metastore_client(self):
self.assertIsInstance(self.hook.get_metastore_client(), HMSClient)
def test_get_conn(self):
self.assertIsInstance(self.hook.get_conn(), HMSClient)
def test_check_for_partition(self):
partition = "{p_by}='{date}'".format(date=DEFAULT_DATE_DS,
p_by=self.partition_by)
missing_partition = "{p_by}='{date}'".format(date=self.next_day,
p_by=self.partition_by)
self.assertTrue(
self.hook.check_for_partition(self.database, self.table,
partition)
)
self.assertFalse(
self.hook.check_for_partition(self.database, self.table,
missing_partition)
)
def test_check_for_named_partition(self):
partition = "{p_by}={date}".format(date=DEFAULT_DATE_DS,
p_by=self.partition_by)
missing_partition = "{p_by}={date}".format(date=self.next_day,
p_by=self.partition_by)
self.assertTrue(
self.hook.check_for_named_partition(self.database,
self.table,
partition)
)
self.assertFalse(
self.hook.check_for_named_partition(self.database,
self.table,
missing_partition)
)
def test_get_table(self):
table_info = self.hook.get_table(db=self.database,
table_name=self.table)
self.assertEqual(table_info.tableName, self.table)
columns = ['state', 'year', 'name', 'gender', 'num']
self.assertEqual([col.name for col in table_info.sd.cols], columns)
def test_get_tables(self):
tables = self.hook.get_tables(db=self.database,
pattern=self.table + "*")
self.assertIn(self.table, {table.tableName for table in tables})
def test_get_databases(self):
databases = self.hook.get_databases(pattern='*')
self.assertIn(self.database, databases)
def test_get_partitions(self):
partitions = self.hook.get_partitions(schema=self.database,
table_name=self.table)
self.assertEqual(len(partitions), 1)
self.assertEqual(partitions, [{self.partition_by: DEFAULT_DATE_DS}])
def test_max_partition(self):
filter_map = {self.partition_by: DEFAULT_DATE_DS}
partition = self.hook.max_partition(schema=self.database,
table_name=self.table,
field=self.partition_by,
filter_map=filter_map)
self.assertEqual(partition, DEFAULT_DATE_DS.encode('utf-8'))
def test_table_exists(self):
self.assertTrue(self.hook.table_exists(self.table, db=self.database))
self.assertFalse(
self.hook.table_exists(str(random.randint(1, 10000)))
)
class TestHiveServer2Hook(unittest.TestCase):
def _upload_dataframe(self):
df = pd.DataFrame({'a': [1, 2], 'b': [1, 2]})
self.local_path = '/tmp/TestHiveServer2Hook.csv'
df.to_csv(self.local_path, header=False, index=False)
def setUp(self):
configuration.load_test_config()
self._upload_dataframe()
args = {'owner': 'airflow', 'start_date': DEFAULT_DATE}
self.dag = DAG('test_dag_id', default_args=args)
self.database = 'airflow'
self.table = 'hive_server_hook'
self.hql = """
CREATE DATABASE IF NOT EXISTS {{ params.database }};
USE {{ params.database }};
DROP TABLE IF EXISTS {{ params.table }};
CREATE TABLE IF NOT EXISTS {{ params.table }} (
a int,
b int)
ROW FORMAT DELIMITED
FIELDS TERMINATED BY ',';
LOAD DATA LOCAL INPATH '{{ params.csv_path }}'
OVERWRITE INTO TABLE {{ params.table }};
"""
self.columns = ['{}.a'.format(self.table),
'{}.b'.format(self.table)]
self.hook = HiveMetastoreHook()
t = HiveOperator(
task_id='HiveHook_' + str(random.randint(1, 10000)),
params={
'database': self.database,
'table': self.table,
'csv_path': self.local_path
},
hive_cli_conn_id='beeline_default',
hql=self.hql, dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
ignore_ti_state=True)
def tearDown(self):
hook = HiveMetastoreHook()
with hook.get_conn() as metastore:
metastore.drop_table(self.database, self.table, deleteData=True)
os.remove(self.local_path)
def test_get_conn(self):
hook = HiveServer2Hook()
hook.get_conn()
def test_get_records(self):
hook = HiveServer2Hook()
query = "SELECT * FROM {}".format(self.table)
results = hook.get_records(query, schema=self.database)
self.assertListEqual(results, [(1, 1), (2, 2)])
def test_get_pandas_df(self):
hook = HiveServer2Hook()
query = "SELECT * FROM {}".format(self.table)
df = hook.get_pandas_df(query, schema=self.database)
self.assertEqual(len(df), 2)
self.assertListEqual(df.columns.tolist(), self.columns)
self.assertListEqual(df[self.columns[0]].values.tolist(), [1, 2])
def test_get_results_header(self):
hook = HiveServer2Hook()
query = "SELECT * FROM {}".format(self.table)
results = hook.get_results(query, schema=self.database)
self.assertListEqual([col[0] for col in results['header']],
self.columns)
def test_get_results_data(self):
hook = HiveServer2Hook()
query = "SELECT * FROM {}".format(self.table)
results = hook.get_results(query, schema=self.database)
self.assertListEqual(results['data'], [(1, 1), (2, 2)])
def test_to_csv(self):
hook = HiveServer2Hook()
query = "SELECT * FROM {}".format(self.table)
csv_filepath = 'query_results.csv'
hook.to_csv(query, csv_filepath, schema=self.database,
delimiter=',', lineterminator='\n', output_header=True)
df = pd.read_csv(csv_filepath, sep=',')
self.assertListEqual(df.columns.tolist(), self.columns)
self.assertListEqual(df[self.columns[0]].values.tolist(), [1, 2])
self.assertEqual(len(df), 2)
def test_multi_statements(self):
sqls = [
"CREATE TABLE IF NOT EXISTS test_multi_statements (i INT)",
"SELECT * FROM {}".format(self.table),
"DROP TABLE test_multi_statements",
]
hook = HiveServer2Hook()
results = hook.get_records(sqls, schema=self.database)
self.assertListEqual(results, [(1, 1), (2, 2)])
def test_get_results_with_hive_conf(self):
hql = ["set key",
"set airflow.ctx.dag_id",
"set airflow.ctx.dag_run_id",
"set airflow.ctx.task_id",
"set airflow.ctx.execution_date"]
dag_id_ctx_var_name = \
AIRFLOW_VAR_NAME_FORMAT_MAPPING['AIRFLOW_CONTEXT_DAG_ID']['env_var_format']
task_id_ctx_var_name = \
AIRFLOW_VAR_NAME_FORMAT_MAPPING['AIRFLOW_CONTEXT_TASK_ID']['env_var_format']
execution_date_ctx_var_name = \
AIRFLOW_VAR_NAME_FORMAT_MAPPING['AIRFLOW_CONTEXT_EXECUTION_DATE'][
'env_var_format']
dag_run_id_ctx_var_name = \
AIRFLOW_VAR_NAME_FORMAT_MAPPING['AIRFLOW_CONTEXT_DAG_RUN_ID'][
'env_var_format']
os.environ[dag_id_ctx_var_name] = 'test_dag_id'
os.environ[task_id_ctx_var_name] = 'test_task_id'
os.environ[execution_date_ctx_var_name] = 'test_execution_date'
os.environ[dag_run_id_ctx_var_name] = 'test_dag_run_id'
hook = HiveServer2Hook()
output = '\n'.join(res_tuple[0]
for res_tuple
in hook.get_results(hql=hql,
hive_conf={'key': 'value'})['data'])
self.assertIn('value', output)
self.assertIn('test_dag_id', output)
self.assertIn('test_task_id', output)
self.assertIn('test_execution_date', output)
self.assertIn('test_dag_run_id', output)
del os.environ[dag_id_ctx_var_name]
del os.environ[task_id_ctx_var_name]
del os.environ[execution_date_ctx_var_name]
del os.environ[dag_run_id_ctx_var_name]
| apache-2.0 |
richrr/scripts | python/table_sorter.py | 1 | 4467 | import os
import sys
from utils import *
import operator
from time import localtime, strftime
import argparse
import math
import numpy
import matplotlib.pyplot as plt
import brewer2mpl
import pylab
#usage: python ~/scripts/python/table_sorter.py -i Group_otu_table_sorted_L2.txt
def main(args):
parser = argparse.ArgumentParser(description='Sort table of taxonomic summary')
parser.add_argument('-i', '--infile')
parser.add_argument('-o', '--outfile', default="sortablefile.html") # output filename
parser.add_argument('-d', '--delimiter', default='\t') # delimiter for file
parser.add_argument('-f', '--float', action='store_true', default=False) # show numbers as decimal instead of percent (default)
args = parser.parse_args()
if len(sys.argv)==1 :
parser.print_help()
sys.exit('\natleast one argument required\n')
infile = args.infile
outpfile = infile+args.outfile
delim = args.delimiter
showfloat = args.float
lines = read_file(infile)
if 'Taxon' not in lines[0]:
sys.exit('\nheader line is required, tab-delimited, starts with taxon, followed by samples\n')
content = ''
content += FIXED_STRING()
content += BEGIN_STRING(showfloat, infile)
counter = 0
TABS = '\t\t\t\t\t'
for l in lines:
counter += 1
l = l.strip()
if '#' in l or not l:
continue
if counter == 1:
content += """\
<table id="example" class="display" cellspacing="0" width="100%">
<thead>
<tr>
"""
val = [TABS+'<th>'+i+'</th>' for i in l.split(delim)]
content = content + '\n'.join(val) + """\
</tr>
</thead>
"""
content += """\
<tbody>
"""
continue
content += """\
<tr>
"""
val = list()
for i in l.split(delim):
r = i
try:
if not showfloat:
i = float(i)*100
r = "%.1f" % float(i)
except:
pass
val.append(TABS+'<td>'+str(r)+'</td>')
#val = [TABS+'<td>'+i+'</td>' for i in l.split(delim)]
content = content + '\n'.join(val) + """\
</tr>
"""
content += END_STRING()
#print content
writeTXT_to_file(content, outpfile)
def END_STRING():
string = """\
</section>
</div>
</body>
</html>
"""
return string
def BEGIN_STRING(showfloat, infile):
string = """\
<body class="dt-example">
<div class="container">
<section>
<h1>Taxonomic summary<span> Sortable </span></h1>
"""
string += '<h2>File: %s</h2>' %infile
if not showfloat:
string += '<h3>Numbers are in % relative abundance</h3>'
return string
def FIXED_STRING():
string = """\
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<link rel="shortcut icon" type="image/ico" href="http://www.datatables.net/favicon.ico">
<meta name="viewport" content="initial-scale=1.0, maximum-scale=2.0">
<title>DataTables example - Default ordering (sorting)</title>
<link rel="stylesheet" type="text/css" href="http://www.hort.vt.edu/microeco/RR/DataTables-1.10.2/media/css/jquery.dataTables.css">
<link rel="stylesheet" type="text/css" href="http://www.hort.vt.edu/microeco/RR/DataTables-1.10.2/examples/resources/syntax/shCore.css">
<link rel="stylesheet" type="text/css" href="http://www.hort.vt.edu/microeco/RR/DataTables-1.10.2/examples/resources/demo.css">
<style type="text/css" class="init">
</style>
<script type="text/javascript" language="javascript" src="http://www.hort.vt.edu/microeco/RR/DataTables-1.10.2/media/js/jquery.js"></script>
<script type="text/javascript" language="javascript" src="http://www.hort.vt.edu/microeco/RR/DataTables-1.10.2/media/js/jquery.dataTables.js"></script>
<script type="text/javascript" language="javascript" src="http://www.hort.vt.edu/microeco/RR/DataTables-1.10.2/examples/resources/syntax/shCore.js"></script>
<script type="text/javascript" language="javascript" src="http://www.hort.vt.edu/microeco/RR/DataTables-1.10.2/examples/resources/demo.js"></script>
<script type="text/javascript" language="javascript" class="init">
$(document).ready(function() {
$('#example').dataTable( {
"order": [[ 0, "asc" ]]
} );
} );
</script>
</head>
"""
return string
if __name__=='__main__':
datetime = strftime("%a, %d %b %Y %I:%M:%S %p", localtime())
cmd = 'echo ' + datetime
os.system(cmd)
main(sys.argv)
| gpl-3.0 |
DSLituiev/scikit-learn | sklearn/datasets/tests/test_samples_generator.py | 181 | 15664 | from __future__ import division
from collections import defaultdict
from functools import partial
import numpy as np
import scipy.sparse as sp
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.datasets import make_classification
from sklearn.datasets import make_multilabel_classification
from sklearn.datasets import make_hastie_10_2
from sklearn.datasets import make_regression
from sklearn.datasets import make_blobs
from sklearn.datasets import make_friedman1
from sklearn.datasets import make_friedman2
from sklearn.datasets import make_friedman3
from sklearn.datasets import make_low_rank_matrix
from sklearn.datasets import make_sparse_coded_signal
from sklearn.datasets import make_sparse_uncorrelated
from sklearn.datasets import make_spd_matrix
from sklearn.datasets import make_swiss_roll
from sklearn.datasets import make_s_curve
from sklearn.datasets import make_biclusters
from sklearn.datasets import make_checkerboard
from sklearn.utils.validation import assert_all_finite
def test_make_classification():
X, y = make_classification(n_samples=100, n_features=20, n_informative=5,
n_redundant=1, n_repeated=1, n_classes=3,
n_clusters_per_class=1, hypercube=False,
shift=None, scale=None, weights=[0.1, 0.25],
random_state=0)
assert_equal(X.shape, (100, 20), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(np.unique(y).shape, (3,), "Unexpected number of classes")
assert_equal(sum(y == 0), 10, "Unexpected number of samples in class #0")
assert_equal(sum(y == 1), 25, "Unexpected number of samples in class #1")
assert_equal(sum(y == 2), 65, "Unexpected number of samples in class #2")
def test_make_classification_informative_features():
"""Test the construction of informative features in make_classification
Also tests `n_clusters_per_class`, `n_classes`, `hypercube` and
fully-specified `weights`.
"""
# Create very separate clusters; check that vertices are unique and
# correspond to classes
class_sep = 1e6
make = partial(make_classification, class_sep=class_sep, n_redundant=0,
n_repeated=0, flip_y=0, shift=0, scale=1, shuffle=False)
for n_informative, weights, n_clusters_per_class in [(2, [1], 1),
(2, [1/3] * 3, 1),
(2, [1/4] * 4, 1),
(2, [1/2] * 2, 2),
(2, [3/4, 1/4], 2),
(10, [1/3] * 3, 10)
]:
n_classes = len(weights)
n_clusters = n_classes * n_clusters_per_class
n_samples = n_clusters * 50
for hypercube in (False, True):
X, y = make(n_samples=n_samples, n_classes=n_classes,
weights=weights, n_features=n_informative,
n_informative=n_informative,
n_clusters_per_class=n_clusters_per_class,
hypercube=hypercube, random_state=0)
assert_equal(X.shape, (n_samples, n_informative))
assert_equal(y.shape, (n_samples,))
# Cluster by sign, viewed as strings to allow uniquing
signs = np.sign(X)
signs = signs.view(dtype='|S{0}'.format(signs.strides[0]))
unique_signs, cluster_index = np.unique(signs,
return_inverse=True)
assert_equal(len(unique_signs), n_clusters,
"Wrong number of clusters, or not in distinct "
"quadrants")
clusters_by_class = defaultdict(set)
for cluster, cls in zip(cluster_index, y):
clusters_by_class[cls].add(cluster)
for clusters in clusters_by_class.values():
assert_equal(len(clusters), n_clusters_per_class,
"Wrong number of clusters per class")
assert_equal(len(clusters_by_class), n_classes,
"Wrong number of classes")
assert_array_almost_equal(np.bincount(y) / len(y) // weights,
[1] * n_classes,
err_msg="Wrong number of samples "
"per class")
# Ensure on vertices of hypercube
for cluster in range(len(unique_signs)):
centroid = X[cluster_index == cluster].mean(axis=0)
if hypercube:
assert_array_almost_equal(np.abs(centroid),
[class_sep] * n_informative,
decimal=0,
err_msg="Clusters are not "
"centered on hypercube "
"vertices")
else:
assert_raises(AssertionError,
assert_array_almost_equal,
np.abs(centroid),
[class_sep] * n_informative,
decimal=0,
err_msg="Clusters should not be cenetered "
"on hypercube vertices")
assert_raises(ValueError, make, n_features=2, n_informative=2, n_classes=5,
n_clusters_per_class=1)
assert_raises(ValueError, make, n_features=2, n_informative=2, n_classes=3,
n_clusters_per_class=2)
def test_make_multilabel_classification_return_sequences():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(n_samples=100, n_features=20,
n_classes=3, random_state=0,
return_indicator=False,
allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (100, 20), "X shape mismatch")
if not allow_unlabeled:
assert_equal(max([max(y) for y in Y]), 2)
assert_equal(min([len(y) for y in Y]), min_length)
assert_true(max([len(y) for y in Y]) <= 3)
def test_make_multilabel_classification_return_indicator():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(n_samples=25, n_features=20,
n_classes=3, random_state=0,
allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (25, 20), "X shape mismatch")
assert_equal(Y.shape, (25, 3), "Y shape mismatch")
assert_true(np.all(np.sum(Y, axis=0) > min_length))
# Also test return_distributions and return_indicator with True
X2, Y2, p_c, p_w_c = make_multilabel_classification(
n_samples=25, n_features=20, n_classes=3, random_state=0,
allow_unlabeled=allow_unlabeled, return_distributions=True)
assert_array_equal(X, X2)
assert_array_equal(Y, Y2)
assert_equal(p_c.shape, (3,))
assert_almost_equal(p_c.sum(), 1)
assert_equal(p_w_c.shape, (20, 3))
assert_almost_equal(p_w_c.sum(axis=0), [1] * 3)
def test_make_multilabel_classification_return_indicator_sparse():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(n_samples=25, n_features=20,
n_classes=3, random_state=0,
return_indicator='sparse',
allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (25, 20), "X shape mismatch")
assert_equal(Y.shape, (25, 3), "Y shape mismatch")
assert_true(sp.issparse(Y))
def test_make_hastie_10_2():
X, y = make_hastie_10_2(n_samples=100, random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(np.unique(y).shape, (2,), "Unexpected number of classes")
def test_make_regression():
X, y, c = make_regression(n_samples=100, n_features=10, n_informative=3,
effective_rank=5, coef=True, bias=0.0,
noise=1.0, random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(c.shape, (10,), "coef shape mismatch")
assert_equal(sum(c != 0.0), 3, "Unexpected number of informative features")
# Test that y ~= np.dot(X, c) + bias + N(0, 1.0).
assert_almost_equal(np.std(y - np.dot(X, c)), 1.0, decimal=1)
# Test with small number of features.
X, y = make_regression(n_samples=100, n_features=1) # n_informative=3
assert_equal(X.shape, (100, 1))
def test_make_regression_multitarget():
X, y, c = make_regression(n_samples=100, n_features=10, n_informative=3,
n_targets=3, coef=True, noise=1., random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100, 3), "y shape mismatch")
assert_equal(c.shape, (10, 3), "coef shape mismatch")
assert_array_equal(sum(c != 0.0), 3,
"Unexpected number of informative features")
# Test that y ~= np.dot(X, c) + bias + N(0, 1.0)
assert_almost_equal(np.std(y - np.dot(X, c)), 1.0, decimal=1)
def test_make_blobs():
cluster_stds = np.array([0.05, 0.2, 0.4])
cluster_centers = np.array([[0.0, 0.0], [1.0, 1.0], [0.0, 1.0]])
X, y = make_blobs(random_state=0, n_samples=50, n_features=2,
centers=cluster_centers, cluster_std=cluster_stds)
assert_equal(X.shape, (50, 2), "X shape mismatch")
assert_equal(y.shape, (50,), "y shape mismatch")
assert_equal(np.unique(y).shape, (3,), "Unexpected number of blobs")
for i, (ctr, std) in enumerate(zip(cluster_centers, cluster_stds)):
assert_almost_equal((X[y == i] - ctr).std(), std, 1, "Unexpected std")
def test_make_friedman1():
X, y = make_friedman1(n_samples=5, n_features=10, noise=0.0,
random_state=0)
assert_equal(X.shape, (5, 10), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y,
10 * np.sin(np.pi * X[:, 0] * X[:, 1])
+ 20 * (X[:, 2] - 0.5) ** 2
+ 10 * X[:, 3] + 5 * X[:, 4])
def test_make_friedman2():
X, y = make_friedman2(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 4), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y,
(X[:, 0] ** 2
+ (X[:, 1] * X[:, 2] - 1
/ (X[:, 1] * X[:, 3])) ** 2) ** 0.5)
def test_make_friedman3():
X, y = make_friedman3(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 4), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y, np.arctan((X[:, 1] * X[:, 2]
- 1 / (X[:, 1] * X[:, 3]))
/ X[:, 0]))
def test_make_low_rank_matrix():
X = make_low_rank_matrix(n_samples=50, n_features=25, effective_rank=5,
tail_strength=0.01, random_state=0)
assert_equal(X.shape, (50, 25), "X shape mismatch")
from numpy.linalg import svd
u, s, v = svd(X)
assert_less(sum(s) - 5, 0.1, "X rank is not approximately 5")
def test_make_sparse_coded_signal():
Y, D, X = make_sparse_coded_signal(n_samples=5, n_components=8,
n_features=10, n_nonzero_coefs=3,
random_state=0)
assert_equal(Y.shape, (10, 5), "Y shape mismatch")
assert_equal(D.shape, (10, 8), "D shape mismatch")
assert_equal(X.shape, (8, 5), "X shape mismatch")
for col in X.T:
assert_equal(len(np.flatnonzero(col)), 3, 'Non-zero coefs mismatch')
assert_array_almost_equal(np.dot(D, X), Y)
assert_array_almost_equal(np.sqrt((D ** 2).sum(axis=0)),
np.ones(D.shape[1]))
def test_make_sparse_uncorrelated():
X, y = make_sparse_uncorrelated(n_samples=5, n_features=10, random_state=0)
assert_equal(X.shape, (5, 10), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
def test_make_spd_matrix():
X = make_spd_matrix(n_dim=5, random_state=0)
assert_equal(X.shape, (5, 5), "X shape mismatch")
assert_array_almost_equal(X, X.T)
from numpy.linalg import eig
eigenvalues, _ = eig(X)
assert_array_equal(eigenvalues > 0, np.array([True] * 5),
"X is not positive-definite")
def test_make_swiss_roll():
X, t = make_swiss_roll(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 3), "X shape mismatch")
assert_equal(t.shape, (5,), "t shape mismatch")
assert_array_almost_equal(X[:, 0], t * np.cos(t))
assert_array_almost_equal(X[:, 2], t * np.sin(t))
def test_make_s_curve():
X, t = make_s_curve(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 3), "X shape mismatch")
assert_equal(t.shape, (5,), "t shape mismatch")
assert_array_almost_equal(X[:, 0], np.sin(t))
assert_array_almost_equal(X[:, 2], np.sign(t) * (np.cos(t) - 1))
def test_make_biclusters():
X, rows, cols = make_biclusters(
shape=(100, 100), n_clusters=4, shuffle=True, random_state=0)
assert_equal(X.shape, (100, 100), "X shape mismatch")
assert_equal(rows.shape, (4, 100), "rows shape mismatch")
assert_equal(cols.shape, (4, 100,), "columns shape mismatch")
assert_all_finite(X)
assert_all_finite(rows)
assert_all_finite(cols)
X2, _, _ = make_biclusters(shape=(100, 100), n_clusters=4,
shuffle=True, random_state=0)
assert_array_almost_equal(X, X2)
def test_make_checkerboard():
X, rows, cols = make_checkerboard(
shape=(100, 100), n_clusters=(20, 5),
shuffle=True, random_state=0)
assert_equal(X.shape, (100, 100), "X shape mismatch")
assert_equal(rows.shape, (100, 100), "rows shape mismatch")
assert_equal(cols.shape, (100, 100,), "columns shape mismatch")
X, rows, cols = make_checkerboard(
shape=(100, 100), n_clusters=2, shuffle=True, random_state=0)
assert_all_finite(X)
assert_all_finite(rows)
assert_all_finite(cols)
X1, _, _ = make_checkerboard(shape=(100, 100), n_clusters=2,
shuffle=True, random_state=0)
X2, _, _ = make_checkerboard(shape=(100, 100), n_clusters=2,
shuffle=True, random_state=0)
assert_array_equal(X1, X2)
| bsd-3-clause |
OGGM/oggm | oggm/tests/funcs.py | 1 | 16644 | import os
import shutil
from distutils.util import strtobool
import hashlib
import numpy as np
import xarray as xr
import shapely.geometry as shpg
from scipy import optimize as optimization
# Local imports
import oggm
import oggm.cfg as cfg
from oggm.utils import (get_demo_file, mkdir, get_git_ident, get_sys_info,
get_env_info, apply_test_ref_tstars)
from oggm.workflow import execute_entity_task
from oggm.core import flowline
from oggm import tasks
from oggm.core.flowline import RectangularBedFlowline
_TEST_DIR = None
def dummy_constant_bed(hmax=3000., hmin=1000., nx=200, map_dx=100.,
widths=3.):
dx = 1.
surface_h = np.linspace(hmax, hmin, nx)
bed_h = surface_h
widths = surface_h * 0. + widths
coords = np.arange(0, nx - 0.5, 1)
line = shpg.LineString(np.vstack([coords, coords * 0.]).T)
return [flowline.RectangularBedFlowline(line, dx, map_dx, surface_h,
bed_h, widths)]
def dummy_constant_bed_cliff(hmax=3000., hmin=1000., nx=200, map_dx=100.,
cliff_height=250.):
"""
I introduce a cliff in the bed to test the mass conservation of the models
Such a cliff could be real or a DEM error/artifact
"""
dx = 1.
surface_h = np.linspace(hmax, hmin, nx)
surface_h[50:] = surface_h[50:] - cliff_height
bed_h = surface_h
widths = surface_h * 0. + 1.
coords = np.arange(0, nx - 0.5, 1)
line = shpg.LineString(np.vstack([coords, coords * 0.]).T)
return [flowline.RectangularBedFlowline(line, dx, map_dx, surface_h,
bed_h, widths)]
def dummy_constant_bed_obstacle(hmax=3000., hmin=1000., nx=200):
"""
I introduce an obstacle in the bed
"""
map_dx = 100.
dx = 1.
surface_h = np.linspace(hmax, hmin, nx)
cliff_height = 200.0
surface_h[60:] = surface_h[60:] + cliff_height
bed_h = surface_h
widths = surface_h * 0. + 1.
coords = np.arange(0, nx - 0.5, 1)
line = shpg.LineString(np.vstack([coords, coords * 0.]).T)
return [flowline.RectangularBedFlowline(line, dx, map_dx, surface_h,
bed_h, widths)]
def dummy_bumpy_bed():
map_dx = 100.
dx = 1.
nx = 200
coords = np.arange(0, nx - 0.5, 1)
surface_h = np.linspace(3000, 1000, nx)
surface_h += 170. * np.exp(-((coords - 30) / 5) ** 2)
bed_h = surface_h
widths = surface_h * 0. + 3.
line = shpg.LineString(np.vstack([coords, coords * 0.]).T)
return [flowline.RectangularBedFlowline(line, dx, map_dx, surface_h,
bed_h, widths)]
def dummy_noisy_bed(map_dx=100.):
dx = 1.
nx = 200
np.random.seed(42)
coords = np.arange(0, nx - 0.5, 1)
surface_h = np.linspace(3000, 1000, nx)
surface_h += 100 * np.random.rand(nx) - 50.
bed_h = surface_h
widths = surface_h * 0. + 3.
line = shpg.LineString(np.vstack([coords, coords * 0.]).T)
return [flowline.RectangularBedFlowline(line, dx, map_dx, surface_h,
bed_h, widths)]
def dummy_parabolic_bed(hmax=3000., hmin=1000., nx=200, map_dx=100.,
default_shape=5.e-3,
from_other_shape=None, from_other_bed=None):
dx = 1.
surface_h = np.linspace(hmax, hmin, nx)
bed_h = surface_h * 1
shape = surface_h * 0. + default_shape
if from_other_shape is not None:
shape[0:len(from_other_shape)] = from_other_shape
if from_other_bed is not None:
bed_h[0:len(from_other_bed)] = from_other_bed
coords = np.arange(0, nx - 0.5, 1)
line = shpg.LineString(np.vstack([coords, coords * 0.]).T)
return [flowline.ParabolicBedFlowline(line, dx, map_dx, surface_h,
bed_h, shape)]
def dummy_mixed_bed(deflambdas=3.5, map_dx=100., mixslice=None):
dx = 1.
nx = 200
surface_h = np.linspace(3000, 1000, nx)
bed_h = surface_h
shape = surface_h * 0. + 3.e-03
if mixslice:
shape[mixslice] = np.NaN
else:
shape[10:20] = np.NaN
is_trapezoid = ~np.isfinite(shape)
lambdas = shape * 0.
lambdas[is_trapezoid] = deflambdas
widths_m = bed_h * 0. + 10
section = bed_h * 0.
coords = np.arange(0, nx - 0.5, 1)
line = shpg.LineString(np.vstack([coords, coords * 0.]).T)
fls = flowline.MixedBedFlowline(line=line, dx=dx, map_dx=map_dx,
surface_h=surface_h, bed_h=bed_h,
section=section, bed_shape=shape,
is_trapezoid=is_trapezoid,
lambdas=lambdas, widths_m=widths_m)
return [fls]
def dummy_trapezoidal_bed(hmax=3000., hmin=1000., nx=200, map_dx=100.,
def_lambdas=2):
dx = 1.
surface_h = np.linspace(hmax, hmin, nx)
bed_h = surface_h
widths = surface_h * 0. + 1.6
lambdas = surface_h * 0. + def_lambdas
coords = np.arange(0, nx - 0.5, 1)
line = shpg.LineString(np.vstack([coords, coords * 0.]).T)
return [flowline.TrapezoidalBedFlowline(line, dx, map_dx, surface_h,
bed_h, widths, lambdas)]
def dummy_width_bed():
"""This bed has a width of 6 during the first 20 points and then 3"""
map_dx = 100.
dx = 1.
nx = 200
surface_h = np.linspace(3000, 1000, nx)
bed_h = surface_h
widths = surface_h * 0. + 3.
widths[0:20] = 6.
coords = np.arange(0, nx - 0.5, 1)
line = shpg.LineString(np.vstack([coords, coords * 0.]).T)
return [flowline.RectangularBedFlowline(line, dx, map_dx, surface_h,
bed_h, widths)]
def dummy_width_bed_tributary(map_dx=100., n_trib=1):
# bed with tributary glacier
dx = 1.
nx = 200
surface_h = np.linspace(3000, 1000, nx)
bed_h = surface_h
widths = surface_h * 0. + 3.
widths[0:20] = 6 / (n_trib + 1)
coords = np.arange(0, nx - 0.5, 1)
line = shpg.LineString(np.vstack([coords, coords * 0.]).T)
fl_0 = flowline.RectangularBedFlowline(line, dx, map_dx, surface_h, bed_h,
widths)
coords = np.arange(0, 19.1, 1)
line = shpg.LineString(np.vstack([coords, coords * 0. + 1]).T)
out = [fl_0]
for i in range(n_trib):
fl_1 = flowline.RectangularBedFlowline(line, dx, map_dx,
surface_h[0:20],
bed_h[0:20],
widths[0:20])
fl_1.set_flows_to(fl_0)
out.append(fl_1)
return out[::-1]
def dummy_bed_tributary_tail_to_head(map_dx=100., n_trib=1, small_cliff=False):
# bed with tributary glacier(s) flowing directly into their top
# (for splitted flowline experiments)
dx = 1.
nx = 200
surface_h = np.linspace(3000, 1000, nx)
bed_h = surface_h
widths = surface_h * 0. + 3.
pix_id = np.linspace(20, 180, n_trib).round().astype(int)
fls = [flowline.RectangularBedFlowline(dx=dx, map_dx=map_dx,
surface_h=surface_h[:pix_id[0]],
bed_h=bed_h[:pix_id[0]],
widths=widths[:pix_id[0]])]
for i, pid in enumerate(pix_id):
if i == (len(pix_id) - 1):
eid = nx + 1
else:
eid = pix_id[i + 1]
dh = -100 if small_cliff else 0
fl = flowline.RectangularBedFlowline(dx=dx, map_dx=map_dx,
surface_h=surface_h[pid:eid] + dh,
bed_h=bed_h[pid:eid] + dh,
widths=widths[pid:eid])
fls[-1].set_flows_to(fl, to_head=True, check_tail=False)
fls.append(fl)
return fls
def bu_tidewater_bed(gridsize=200, gridlength=6e4, widths_m=600,
b_0=260, alpha=0.017, b_1=350, x_0=4e4, sigma=1e4,
water_level=0, split_flowline_before_water=None):
# Bassis & Ultee bed profile
dx_meter = gridlength / gridsize
x = np.arange(gridsize+1) * dx_meter
bed_h = b_0 - alpha * x + b_1 * np.exp(-((x - x_0) / sigma)**2)
bed_h += water_level
surface_h = bed_h
widths = surface_h * 0. + widths_m / dx_meter
if split_flowline_before_water is not None:
bs = np.min(np.nonzero(bed_h < 0)[0]) - split_flowline_before_water
fls = [RectangularBedFlowline(dx=1, map_dx=dx_meter,
surface_h=surface_h[:bs],
bed_h=bed_h[:bs],
widths=widths[:bs]),
RectangularBedFlowline(dx=1, map_dx=dx_meter,
surface_h=surface_h[bs:],
bed_h=bed_h[bs:],
widths=widths[bs:]),
]
fls[0].set_flows_to(fls[1], check_tail=False, to_head=True)
return fls
else:
return [
RectangularBedFlowline(dx=1, map_dx=dx_meter, surface_h=surface_h,
bed_h=bed_h, widths=widths)]
def patch_minimal_download_oggm_files(*args, **kwargs):
"""A simple patch to make sure we don't download."""
raise RuntimeError('We should not be there in minimal mode')
def use_multiprocessing():
try:
return strtobool(os.getenv("OGGM_TEST_MULTIPROC", "False"))
except BaseException:
return False
def get_test_dir():
global _TEST_DIR
if _TEST_DIR is None:
s = get_git_ident()
s += ''.join([str(k) + str(v) for k, v in get_sys_info()])
s += ''.join([str(k) + str(v) for k, v in get_env_info()])
s = hashlib.md5(s.encode()).hexdigest()
out = os.path.join(cfg.PATHS['test_dir'], s)
if 'PYTEST_XDIST_WORKER' in os.environ:
out = os.path.join(out, os.environ.get('PYTEST_XDIST_WORKER'))
mkdir(out)
_TEST_DIR = out
# If new ident, remove all other dirs so spare space
for d in os.listdir(cfg.PATHS['test_dir']):
if d and d != s:
shutil.rmtree(os.path.join(cfg.PATHS['test_dir'], d))
return _TEST_DIR
def init_hef(reset=False, border=40, logging_level='INFO'):
from oggm.core import gis, inversion, climate, centerlines, flowline
import geopandas as gpd
# test directory
testdir = os.path.join(get_test_dir(), 'tmp_border{}'.format(border))
if not os.path.exists(testdir):
os.makedirs(testdir)
reset = True
# Init
cfg.initialize(logging_level=logging_level)
cfg.set_intersects_db(get_demo_file('rgi_intersect_oetztal.shp'))
cfg.PATHS['dem_file'] = get_demo_file('hef_srtm.tif')
cfg.PATHS['climate_file'] = get_demo_file('histalp_merged_hef.nc')
cfg.PARAMS['baseline_climate'] = ''
cfg.PATHS['working_dir'] = testdir
cfg.PARAMS['trapezoid_lambdas'] = 1
cfg.PARAMS['border'] = border
hef_file = get_demo_file('Hintereisferner_RGI5.shp')
entity = gpd.read_file(hef_file).iloc[0]
gdir = oggm.GlacierDirectory(entity, reset=reset)
if 'inversion_glen_a' not in gdir.get_diagnostics():
reset = True
gdir = oggm.GlacierDirectory(entity, reset=reset)
if not reset:
return gdir
gis.define_glacier_region(gdir)
execute_entity_task(gis.glacier_masks, [gdir])
execute_entity_task(centerlines.compute_centerlines, [gdir])
centerlines.initialize_flowlines(gdir)
centerlines.compute_downstream_line(gdir)
centerlines.compute_downstream_bedshape(gdir)
centerlines.catchment_area(gdir)
centerlines.catchment_intersections(gdir)
centerlines.catchment_width_geom(gdir)
centerlines.catchment_width_correction(gdir)
climate.process_custom_climate_data(gdir)
mbdf = gdir.get_ref_mb_data()['ANNUAL_BALANCE']
res = climate.t_star_from_refmb(gdir, mbdf=mbdf)
climate.local_t_star(gdir, tstar=res['t_star'], bias=res['bias'])
climate.mu_star_calibration(gdir)
inversion.prepare_for_inversion(gdir)
ref_v = 0.573 * 1e9
glen_n = cfg.PARAMS['glen_n']
def to_optimize(x):
# For backwards compat
_fd = 1.9e-24 * x[0]
glen_a = (glen_n+2) * _fd / 2.
fs = 5.7e-20 * x[1]
v = inversion.mass_conservation_inversion(gdir, fs=fs,
glen_a=glen_a)
return (v - ref_v)**2
out = optimization.minimize(to_optimize, [1, 1],
bounds=((0.01, 10), (0.01, 10)),
tol=1e-4)['x']
_fd = 1.9e-24 * out[0]
glen_a = (glen_n+2) * _fd / 2.
fs = 5.7e-20 * out[1]
v = inversion.mass_conservation_inversion(gdir, fs=fs,
glen_a=glen_a,
write=True)
inversion.filter_inversion_output(gdir)
inversion.distribute_thickness_interp(gdir, varname_suffix='_interp')
inversion.distribute_thickness_per_altitude(gdir, varname_suffix='_alt')
flowline.init_present_time_glacier(gdir)
return gdir
def init_columbia(reset=False):
from oggm.core import gis, centerlines
import geopandas as gpd
# test directory
testdir = os.path.join(get_test_dir(), 'tmp_columbia')
if not os.path.exists(testdir):
os.makedirs(testdir)
reset = True
# Init
cfg.initialize()
cfg.PATHS['working_dir'] = testdir
cfg.PARAMS['use_intersects'] = False
cfg.PATHS['dem_file'] = get_demo_file('dem_Columbia.tif')
cfg.PARAMS['border'] = 10
cfg.PARAMS['use_kcalving_for_inversion'] = True
cfg.PARAMS['use_kcalving_for_run'] = True
entity = gpd.read_file(get_demo_file('01_rgi60_Columbia.shp')).iloc[0]
gdir = oggm.GlacierDirectory(entity, reset=reset)
if gdir.has_file('climate_historical'):
return gdir
gis.define_glacier_region(gdir)
gis.glacier_masks(gdir)
centerlines.compute_centerlines(gdir)
centerlines.initialize_flowlines(gdir)
centerlines.compute_downstream_line(gdir)
centerlines.catchment_area(gdir)
centerlines.catchment_intersections(gdir)
centerlines.catchment_width_geom(gdir)
centerlines.catchment_width_correction(gdir)
tasks.process_dummy_cru_file(gdir, seed=0)
apply_test_ref_tstars()
return gdir
def init_columbia_eb(dir_name, reset=False):
from oggm.core import gis, centerlines
import geopandas as gpd
# test directory
testdir = os.path.join(get_test_dir(), dir_name)
mkdir(testdir, reset=reset)
# Init
cfg.initialize()
cfg.PATHS['working_dir'] = testdir
cfg.PARAMS['use_intersects'] = False
cfg.PATHS['dem_file'] = get_demo_file('dem_Columbia.tif')
cfg.PARAMS['border'] = 10
cfg.PARAMS['use_kcalving_for_inversion'] = True
cfg.PARAMS['use_kcalving_for_run'] = True
entity = gpd.read_file(get_demo_file('01_rgi60_Columbia.shp')).iloc[0]
gdir = oggm.GlacierDirectory(entity)
if gdir.has_file('climate_historical'):
return gdir
gis.define_glacier_region(gdir)
gis.simple_glacier_masks(gdir)
centerlines.elevation_band_flowline(gdir)
centerlines.fixed_dx_elevation_band_flowline(gdir)
centerlines.compute_downstream_line(gdir)
tasks.process_dummy_cru_file(gdir, seed=0)
apply_test_ref_tstars()
return gdir
def characs_apply_func(gdir, d):
# add some new stats to the mix
with xr.open_dataset(gdir.get_filepath('gridded_data')) as ds:
glc_ext = ds['glacier_ext'].values
glc_mask = ds['glacier_mask'].values
d['glc_ext_num_perc'] = np.sum(glc_ext) / np.sum(glc_mask)
class TempEnvironmentVariable:
"""Context manager for environment variables
https://gist.github.com/devhero/7e015f0ce0abacab3880d33c26f07674
"""
def __init__(self, **kwargs):
self.envs = kwargs
def __enter__(self):
self.old_envs = {}
for k, v in self.envs.items():
self.old_envs[k] = os.environ.get(k)
if v is not None:
os.environ[k] = v
elif k in os.environ:
del os.environ[k]
def __exit__(self, *args):
for k, v in self.old_envs.items():
if v is not None:
os.environ[k] = v
elif k in os.environ:
del os.environ[k]
| bsd-3-clause |
jjx02230808/project0223 | examples/linear_model/plot_lasso_coordinate_descent_path.py | 254 | 2639 | """
=====================
Lasso and Elastic Net
=====================
Lasso and elastic net (L1 and L2 penalisation) implemented using a
coordinate descent.
The coefficients can be forced to be positive.
"""
print(__doc__)
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import lasso_path, enet_path
from sklearn import datasets
diabetes = datasets.load_diabetes()
X = diabetes.data
y = diabetes.target
X /= X.std(axis=0) # Standardize data (easier to set the l1_ratio parameter)
# Compute paths
eps = 5e-3 # the smaller it is the longer is the path
print("Computing regularization path using the lasso...")
alphas_lasso, coefs_lasso, _ = lasso_path(X, y, eps, fit_intercept=False)
print("Computing regularization path using the positive lasso...")
alphas_positive_lasso, coefs_positive_lasso, _ = lasso_path(
X, y, eps, positive=True, fit_intercept=False)
print("Computing regularization path using the elastic net...")
alphas_enet, coefs_enet, _ = enet_path(
X, y, eps=eps, l1_ratio=0.8, fit_intercept=False)
print("Computing regularization path using the positve elastic net...")
alphas_positive_enet, coefs_positive_enet, _ = enet_path(
X, y, eps=eps, l1_ratio=0.8, positive=True, fit_intercept=False)
# Display results
plt.figure(1)
ax = plt.gca()
ax.set_color_cycle(2 * ['b', 'r', 'g', 'c', 'k'])
l1 = plt.plot(-np.log10(alphas_lasso), coefs_lasso.T)
l2 = plt.plot(-np.log10(alphas_enet), coefs_enet.T, linestyle='--')
plt.xlabel('-Log(alpha)')
plt.ylabel('coefficients')
plt.title('Lasso and Elastic-Net Paths')
plt.legend((l1[-1], l2[-1]), ('Lasso', 'Elastic-Net'), loc='lower left')
plt.axis('tight')
plt.figure(2)
ax = plt.gca()
ax.set_color_cycle(2 * ['b', 'r', 'g', 'c', 'k'])
l1 = plt.plot(-np.log10(alphas_lasso), coefs_lasso.T)
l2 = plt.plot(-np.log10(alphas_positive_lasso), coefs_positive_lasso.T,
linestyle='--')
plt.xlabel('-Log(alpha)')
plt.ylabel('coefficients')
plt.title('Lasso and positive Lasso')
plt.legend((l1[-1], l2[-1]), ('Lasso', 'positive Lasso'), loc='lower left')
plt.axis('tight')
plt.figure(3)
ax = plt.gca()
ax.set_color_cycle(2 * ['b', 'r', 'g', 'c', 'k'])
l1 = plt.plot(-np.log10(alphas_enet), coefs_enet.T)
l2 = plt.plot(-np.log10(alphas_positive_enet), coefs_positive_enet.T,
linestyle='--')
plt.xlabel('-Log(alpha)')
plt.ylabel('coefficients')
plt.title('Elastic-Net and positive Elastic-Net')
plt.legend((l1[-1], l2[-1]), ('Elastic-Net', 'positive Elastic-Net'),
loc='lower left')
plt.axis('tight')
plt.show()
| bsd-3-clause |
gregcaporaso/qiime | qiime/make_bootstrapped_tree.py | 15 | 1352 | #!/usr/bin/env python
from __future__ import division
__author__ = "Justin Kuczynski"
__copyright__ = "Copyright 2011, The QIIME Project"
__credits__ = ["Justin Kuczynski"]
__license__ = "GPL"
__version__ = "1.9.1-dev"
__maintainer__ = "Justin Kuczynski"
__email__ = "justinak@gmail.com"
"""takes a tree and bootstrap support file and writes a pdf, colored by
bootstrap support
"""
from matplotlib import use
use('Agg', warn=False)
from cogent.draw.dendrogram import SquareDendrogram
import os.path
import sys
def write_pdf_bootstrap_tree(tree, output_f, hits_dict):
def f(node):
if not node.Name:
return 'black'
tip_id = node.Name.split('/')[0]
try:
if hits_dict[tip_id] < .25:
return 'blue'
elif hits_dict[tip_id] < .5:
return 'green'
elif hits_dict[tip_id] < .75:
return 'yellow'
elif hits_dict[tip_id] <= 1.1:
return 'red'
return 'black'
except:
return 'black'
t = SquareDendrogram(tree)
# Make output size proportional to the tree size.
width = 8 * len(tree.tips())
height = 8 * len(tree.tips())
if width < 700:
width = 700
if height < 700:
height = 700
t.drawToPDF(output_f, width, height, edge_color_callback=f)
| gpl-2.0 |
shikhardb/scikit-learn | doc/conf.py | 16 | 8442 | # -*- coding: utf-8 -*-
#
# scikit-learn documentation build configuration file, created by
# sphinx-quickstart on Fri Jan 8 09:13:42 2010.
#
# This file is execfile()d with the current directory set to its containing
# dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
from __future__ import print_function
import sys
import os
from sklearn.externals.six import u
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
sys.path.insert(0, os.path.abspath('sphinxext'))
from github_link import make_linkcode_resolve
# -- General configuration ---------------------------------------------------
# Try to override the matplotlib configuration as early as possible
try:
import gen_rst
except:
pass
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['gen_rst',
'sphinx.ext.autodoc', 'sphinx.ext.autosummary',
'sphinx.ext.pngmath', 'numpy_ext.numpydoc',
'sphinx.ext.linkcode',
]
autosummary_generate = True
autodoc_default_flags = ['members', 'inherited-members']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['templates']
# generate autosummary even if no references
autosummary_generate = True
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# Generate the plots for the gallery
plot_gallery = True
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u('scikit-learn')
copyright = u('2010 - 2014, scikit-learn developers (BSD License)')
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
import sklearn
version = sklearn.__version__
# The full version, including alpha/beta/rc tags.
release = sklearn.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be
# searched for source files.
exclude_trees = ['_build', 'templates', 'includes']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = False
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'scikit-learn'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {'oldversion': False, 'collapsiblesidebar': True,
'google_analytics': True, 'surveybanner': False,
'sprintbanner': True}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = 'scikit-learn'
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'logos/scikit-learn-logo-small.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'logos/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['images']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_use_modindex = False
# If false, no index is generated.
html_use_index = False
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'scikit-learndoc'
# -- Options for LaTeX output ------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [('index', 'user_guide.tex', u('scikit-learn user guide'),
u('scikit-learn developers'), 'manual'), ]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = "logos/scikit-learn-logo.png"
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
latex_preamble = r"""
\usepackage{amsmath}\usepackage{amsfonts}\usepackage{bm}\usepackage{morefloats}
\usepackage{enumitem} \setlistdepth{10}
"""
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
trim_doctests_flags = True
def generate_example_rst(app, what, name, obj, options, lines):
# generate empty examples files, so that we don't get
# inclusion errors if there are no examples for a class / module
examples_path = os.path.join(app.srcdir, "modules", "generated",
"%s.examples" % name)
if not os.path.exists(examples_path):
# touch file
open(examples_path, 'w').close()
def setup(app):
# to hide/show the prompt in code examples:
app.add_javascript('js/copybutton.js')
app.connect('autodoc-process-docstring', generate_example_rst)
# The following is used by sphinx.ext.linkcode to provide links to github
linkcode_resolve = make_linkcode_resolve('sklearn',
u'https://github.com/scikit-learn/'
'scikit-learn/blob/{revision}/'
'{package}/{path}#L{lineno}')
| bsd-3-clause |
dsm054/pandas | pandas/tests/indexes/period/test_partial_slicing.py | 1 | 5501 | import numpy as np
import pytest
import pandas as pd
from pandas import (
DataFrame, DatetimeIndex, Period, PeriodIndex, Series, period_range)
from pandas.util import testing as tm
class TestPeriodIndex(object):
def setup_method(self, method):
pass
def test_slice_with_negative_step(self):
ts = Series(np.arange(20),
period_range('2014-01', periods=20, freq='M'))
SLC = pd.IndexSlice
def assert_slices_equivalent(l_slc, i_slc):
tm.assert_series_equal(ts[l_slc], ts.iloc[i_slc])
tm.assert_series_equal(ts.loc[l_slc], ts.iloc[i_slc])
tm.assert_series_equal(ts.loc[l_slc], ts.iloc[i_slc])
assert_slices_equivalent(SLC[Period('2014-10')::-1], SLC[9::-1])
assert_slices_equivalent(SLC['2014-10'::-1], SLC[9::-1])
assert_slices_equivalent(SLC[:Period('2014-10'):-1], SLC[:8:-1])
assert_slices_equivalent(SLC[:'2014-10':-1], SLC[:8:-1])
assert_slices_equivalent(SLC['2015-02':'2014-10':-1], SLC[13:8:-1])
assert_slices_equivalent(SLC[Period('2015-02'):Period('2014-10'):-1],
SLC[13:8:-1])
assert_slices_equivalent(SLC['2015-02':Period('2014-10'):-1],
SLC[13:8:-1])
assert_slices_equivalent(SLC[Period('2015-02'):'2014-10':-1],
SLC[13:8:-1])
assert_slices_equivalent(SLC['2014-10':'2015-02':-1], SLC[:0])
def test_slice_with_zero_step_raises(self):
ts = Series(np.arange(20),
period_range('2014-01', periods=20, freq='M'))
with pytest.raises(ValueError, match='slice step cannot be zero'):
ts[::0]
with pytest.raises(ValueError, match='slice step cannot be zero'):
ts.loc[::0]
with pytest.raises(ValueError, match='slice step cannot be zero'):
ts.loc[::0]
def test_slice_keep_name(self):
idx = period_range('20010101', periods=10, freq='D', name='bob')
assert idx.name == idx[1:].name
def test_pindex_slice_index(self):
pi = PeriodIndex(start='1/1/10', end='12/31/12', freq='M')
s = Series(np.random.rand(len(pi)), index=pi)
res = s['2010']
exp = s[0:12]
tm.assert_series_equal(res, exp)
res = s['2011']
exp = s[12:24]
tm.assert_series_equal(res, exp)
def test_range_slice_day(self):
# GH 6716
didx = DatetimeIndex(start='2013/01/01', freq='D', periods=400)
pidx = PeriodIndex(start='2013/01/01', freq='D', periods=400)
for idx in [didx, pidx]:
# slices against index should raise IndexError
values = ['2014', '2013/02', '2013/01/02', '2013/02/01 9H',
'2013/02/01 09:00']
for v in values:
with pytest.raises(TypeError):
idx[v:]
s = Series(np.random.rand(len(idx)), index=idx)
tm.assert_series_equal(s['2013/01/02':], s[1:])
tm.assert_series_equal(s['2013/01/02':'2013/01/05'], s[1:5])
tm.assert_series_equal(s['2013/02':], s[31:])
tm.assert_series_equal(s['2014':], s[365:])
invalid = ['2013/02/01 9H', '2013/02/01 09:00']
for v in invalid:
with pytest.raises(TypeError):
idx[v:]
def test_range_slice_seconds(self):
# GH 6716
didx = DatetimeIndex(start='2013/01/01 09:00:00', freq='S',
periods=4000)
pidx = PeriodIndex(start='2013/01/01 09:00:00', freq='S', periods=4000)
for idx in [didx, pidx]:
# slices against index should raise IndexError
values = ['2014', '2013/02', '2013/01/02', '2013/02/01 9H',
'2013/02/01 09:00']
for v in values:
with pytest.raises(TypeError):
idx[v:]
s = Series(np.random.rand(len(idx)), index=idx)
tm.assert_series_equal(s['2013/01/01 09:05':'2013/01/01 09:10'],
s[300:660])
tm.assert_series_equal(s['2013/01/01 10:00':'2013/01/01 10:05'],
s[3600:3960])
tm.assert_series_equal(s['2013/01/01 10H':], s[3600:])
tm.assert_series_equal(s[:'2013/01/01 09:30'], s[:1860])
for d in ['2013/01/01', '2013/01', '2013']:
tm.assert_series_equal(s[d:], s)
def test_range_slice_outofbounds(self):
# GH 5407
didx = DatetimeIndex(start='2013/10/01', freq='D', periods=10)
pidx = PeriodIndex(start='2013/10/01', freq='D', periods=10)
for idx in [didx, pidx]:
df = DataFrame(dict(units=[100 + i for i in range(10)]), index=idx)
empty = DataFrame(index=idx.__class__([], freq='D'),
columns=['units'])
empty['units'] = empty['units'].astype('int64')
tm.assert_frame_equal(df['2013/09/01':'2013/09/30'], empty)
tm.assert_frame_equal(df['2013/09/30':'2013/10/02'], df.iloc[:2])
tm.assert_frame_equal(df['2013/10/01':'2013/10/02'], df.iloc[:2])
tm.assert_frame_equal(df['2013/10/02':'2013/09/30'], empty)
tm.assert_frame_equal(df['2013/10/15':'2013/10/17'], empty)
tm.assert_frame_equal(df['2013-06':'2013-09'], empty)
tm.assert_frame_equal(df['2013-11':'2013-12'], empty)
| bsd-3-clause |
petercable/xray | xray/core/dataarray.py | 1 | 41517 | import contextlib
import functools
import warnings
import numpy as np
import pandas as pd
from ..plot.plot import _PlotMethods
from . import indexing
from . import groupby
from . import ops
from . import utils
from . import variable
from .alignment import align
from .common import AbstractArray, BaseDataObject
from .coordinates import DataArrayCoordinates, Indexes
from .dataset import Dataset
from .pycompat import iteritems, basestring, OrderedDict, zip
from .variable import as_variable, _as_compatible_data, Coordinate
from .formatting import format_item
def _infer_coords_and_dims(shape, coords, dims):
"""All the logic for creating a new DataArray"""
if (coords is not None and not utils.is_dict_like(coords) and
len(coords) != len(shape)):
raise ValueError('coords is not dict-like, but it has %s items, '
'which does not match the %s dimensions of the '
'data' % (len(coords), len(shape)))
if isinstance(dims, basestring):
dims = [dims]
if dims is None:
dims = ['dim_%s' % n for n in range(len(shape))]
if coords is not None and len(coords) == len(shape):
# try to infer dimensions from coords
if utils.is_dict_like(coords):
dims = list(coords.keys())
else:
for n, (dim, coord) in enumerate(zip(dims, coords)):
if getattr(coord, 'name', None) is None:
coord = as_variable(coord, key=dim).to_coord()
dims[n] = coord.name
else:
for d in dims:
if not isinstance(d, basestring):
raise TypeError('dimension %s is not a string' % d)
if coords is not None and not utils.is_dict_like(coords):
# ensure coordinates have the right dimensions
coords = [Coordinate(dim, coord, getattr(coord, 'attrs', {}))
for dim, coord in zip(dims, coords)]
if coords is None:
coords = {}
elif not utils.is_dict_like(coords):
coords = OrderedDict(zip(dims, coords))
return coords, dims
class _LocIndexer(object):
def __init__(self, data_array):
self.data_array = data_array
def _remap_key(self, key):
def lookup_positions(dim, labels):
index = self.data_array.indexes[dim]
return indexing.convert_label_indexer(index, labels)
if utils.is_dict_like(key):
return dict((dim, lookup_positions(dim, labels))
for dim, labels in iteritems(key))
else:
# expand the indexer so we can handle Ellipsis
key = indexing.expanded_indexer(key, self.data_array.ndim)
return tuple(lookup_positions(dim, labels) for dim, labels
in zip(self.data_array.dims, key))
def __getitem__(self, key):
return self.data_array[self._remap_key(key)]
def __setitem__(self, key, value):
self.data_array[self._remap_key(key)] = value
class DataArray(AbstractArray, BaseDataObject):
"""N-dimensional array with labeled coordinates and dimensions.
DataArray provides a wrapper around numpy ndarrays that uses labeled
dimensions and coordinates to support metadata aware operations. The API is
similar to that for the pandas Series or DataFrame, but DataArray objects
can have any number of dimensions, and their contents have fixed data
types.
Additional features over raw numpy arrays:
- Apply operations over dimensions by name: ``x.sum('time')``.
- Select or assign values by integer location (like numpy): ``x[:10]``
or by label (like pandas): ``x.loc['2014-01-01']`` or
``x.sel(time='2014-01-01')``.
- Mathematical operations (e.g., ``x - y``) vectorize across multiple
dimensions (known in numpy as "broadcasting") based on dimension names,
regardless of their original order.
- Keep track of arbitrary metadata in the form of a Python dictionary:
``x.attrs``
- Convert to a pandas Series: ``x.to_series()``.
Getting items from or doing mathematical operations with a DataArray
always returns another DataArray.
Attributes
----------
dims : tuple
Dimension names associated with this array.
values : np.ndarray
Access or modify DataArray values as a numpy array.
coords : dict-like
Dictionary of Coordinate objects that label values along each dimension.
name : str or None
Name of this array.
attrs : OrderedDict
Dictionary for holding arbitrary metadata.
"""
groupby_cls = groupby.DataArrayGroupBy
def __init__(self, data, coords=None, dims=None, name=None,
attrs=None, encoding=None):
"""
Parameters
----------
data : array_like
Values for this array. Must be an ``numpy.ndarray``, ndarray like,
or castable to an ``ndarray``. If a self-described xray or pandas
object, attempts are made to use this array's metadata to fill in
other unspecified arguments. A view of the array's data is used
instead of a copy if possible.
coords : sequence or dict of array_like objects, optional
Coordinates (tick labels) to use for indexing along each dimension.
If dict-like, should be a mapping from dimension names to the
corresponding coordinates.
dims : str or sequence of str, optional
Name(s) of the the data dimension(s). Must be either a string (only
for 1D data) or a sequence of strings with length equal to the
number of dimensions. If this argument is omited, dimension names
are taken from ``coords`` (if possible) and otherwise default to
``['dim_0', ... 'dim_n']``.
name : str or None, optional
Name of this array.
attrs : dict_like or None, optional
Attributes to assign to the new variable. By default, an empty
attribute dictionary is initialized.
encoding : dict_like or None, optional
Dictionary specifying how to encode this array's data into a
serialized format like netCDF4. Currently used keys (for netCDF)
include '_FillValue', 'scale_factor', 'add_offset', 'dtype',
'units' and 'calendar' (the later two only for datetime arrays).
Unrecognized keys are ignored.
"""
# try to fill in arguments from data if they weren't supplied
if coords is None:
coords = getattr(data, 'coords', None)
if isinstance(data, pd.Series):
coords = [data.index]
elif isinstance(data, pd.DataFrame):
coords = [data.index, data.columns]
elif isinstance(data, (pd.Index, variable.Coordinate)):
coords = [data]
elif isinstance(data, pd.Panel):
coords = [data.items, data.major_axis, data.minor_axis]
if dims is None:
dims = getattr(data, 'dims', getattr(coords, 'dims', None))
if name is None:
name = getattr(data, 'name', None)
if attrs is None:
attrs = getattr(data, 'attrs', None)
if encoding is None:
encoding = getattr(data, 'encoding', None)
data = _as_compatible_data(data)
coords, dims = _infer_coords_and_dims(data.shape, coords, dims)
dataset = Dataset(coords=coords)
# insert data afterwards in case of redundant coords/data
dataset[name] = (dims, data, attrs, encoding)
for k, v in iteritems(dataset.coords):
if any(d not in dims for d in v.dims):
raise ValueError('coordinate %s has dimensions %s, but these '
'are not a subset of the DataArray '
'dimensions %s' % (k, v.dims, dims))
# these fully describe a DataArray
self._dataset = dataset
self._name = name
@classmethod
def _new_from_dataset(cls, original_dataset, name):
"""Private constructor for the benefit of Dataset.__getitem__ (skips
all validation)
"""
dataset = original_dataset._copy_listed([name], keep_attrs=False)
if name not in dataset:
# handle virtual variables
try:
_, name = name.split('.', 1)
except Exception:
raise KeyError(name)
if name not in dataset._dims:
dataset._coord_names.discard(name)
return cls._new_from_dataset_no_copy(dataset, name)
@classmethod
def _new_from_dataset_no_copy(cls, dataset, name):
obj = object.__new__(cls)
obj._dataset = dataset
obj._name = name
return obj
def _with_replaced_dataset(self, dataset):
return self._new_from_dataset_no_copy(dataset, self.name)
def _to_dataset_split(self, dim):
def subset(dim, label):
array = self.loc[{dim: label}].drop(dim)
array.attrs = {}
return array
variables = OrderedDict([(str(label), subset(dim, label))
for label in self.indexes[dim]])
coords = self.coords.to_dataset()
del coords[dim]
return Dataset(variables, coords, self.attrs)
def _to_dataset_whole(self, name):
if name is None:
return self._dataset.copy()
else:
return self.rename(name)._dataset
def to_dataset(self, dim=None, name=None):
"""Convert a DataArray to a Dataset.
Parameters
----------
dim : str, optional
Name of the dimension on this array along which to split this array
into separate variables. If not provided, this array is converted
into a Dataset of one variable.
name : str, optional
Name to substitute for this array's name. Only valid is ``dim`` is
not provided.
Returns
-------
dataset : Dataset
"""
if dim is not None and dim not in self.dims:
warnings.warn('the order of the arguments on DataArray.to_dataset '
'has changed; you now need to supply ``name`` as '
'a keyword argument',
FutureWarning, stacklevel=2)
name = dim
dim = None
if dim is not None:
if name is not None:
raise TypeError('cannot supply both dim and name arguments')
return self._to_dataset_split(dim)
else:
return self._to_dataset_whole(name)
@property
def name(self):
"""The name of this array.
"""
return self._name
@contextlib.contextmanager
def _set_new_dataset(self):
"""Context manager to use for modifying _dataset, in a manner that
can be safely rolled back if an error is encountered.
"""
ds = self._dataset.copy(deep=False)
yield ds
self._dataset = ds
@name.setter
def name(self, value):
with self._set_new_dataset() as ds:
ds.rename({self.name: value}, inplace=True)
self._name = value
@property
def variable(self):
return self._dataset._variables[self.name]
@property
def dtype(self):
return self.variable.dtype
@property
def shape(self):
return self.variable.shape
@property
def size(self):
return self.variable.size
@property
def nbytes(self):
return self.variable.nbytes
@property
def ndim(self):
return self.variable.ndim
def __len__(self):
return len(self.variable)
@property
def data(self):
"""The array's data as a dask or numpy array"""
return self.variable.data
@data.setter
def data(self, value):
self.variable.data = value
@property
def values(self):
"""The array's data as a numpy.ndarray"""
return self.variable.values
@values.setter
def values(self, value):
self.variable.values = value
@property
def _in_memory(self):
return self.variable._in_memory
def to_index(self):
"""Convert this variable to a pandas.Index. Only possible for 1D
arrays.
"""
return self.variable.to_index()
@property
def dims(self):
"""Dimension names associated with this array."""
return self.variable.dims
@dims.setter
def dims(self, value):
raise AttributeError('you cannot assign dims on a DataArray. Use '
'.rename() or .swap_dims() instead.')
def _item_key_to_dict(self, key):
if utils.is_dict_like(key):
return key
else:
key = indexing.expanded_indexer(key, self.ndim)
return dict(zip(self.dims, key))
def __getitem__(self, key):
if isinstance(key, basestring):
return self.coords[key]
else:
# orthogonal array indexing
return self.isel(**self._item_key_to_dict(key))
def __setitem__(self, key, value):
if isinstance(key, basestring):
self.coords[key] = value
else:
# orthogonal array indexing
self.variable[key] = value
def __delitem__(self, key):
del self._dataset[key]
@property
def _attr_sources(self):
"""List of places to look-up items for attribute-style access"""
return [self.coords, self.attrs]
def __contains__(self, key):
return key in self._dataset
@property
def loc(self):
"""Attribute for location based indexing like pandas.
"""
return _LocIndexer(self)
@property
def attrs(self):
"""Dictionary storing arbitrary metadata with this array."""
return self.variable.attrs
@attrs.setter
def attrs(self, value):
self.variable.attrs = value
@property
def encoding(self):
"""Dictionary of format-specific settings for how this array should be
serialized."""
return self.variable.encoding
@encoding.setter
def encoding(self, value):
self.variable.encoding = value
@property
def indexes(self):
"""OrderedDict of pandas.Index objects used for label based indexing
"""
return Indexes(self)
@property
def coords(self):
"""Dictionary-like container of coordinate arrays.
"""
return DataArrayCoordinates(self)
def reset_coords(self, names=None, drop=False, inplace=False):
"""Given names of coordinates, reset them to become variables.
Parameters
----------
names : str or list of str, optional
Name(s) of non-index coordinates in this dataset to reset into
variables. By default, all non-index coordinates are reset.
drop : bool, optional
If True, remove coordinates instead of converting them into
variables.
inplace : bool, optional
If True, modify this dataset inplace. Otherwise, create a new
object.
Returns
-------
Dataset, or DataArray if ``drop == True``
"""
if inplace and not drop:
raise ValueError('cannot reset coordinates in-place on a '
'DataArray without ``drop == True``')
if names is None:
names = (self._dataset._coord_names - set(self.dims) -
set([self.name]))
ds = self._dataset.reset_coords(names, drop, inplace)
return ds[self.name] if drop else ds
def load(self):
"""Manually trigger loading of this array's data from disk or a
remote source into memory and return this array.
Normally, it should not be necessary to call this method in user code,
because all xray functions should either work on deferred data or
load data automatically. However, this method can be necessary when
working with many file objects on disk.
"""
self._dataset.load()
return self
def load_data(self): # pragma: no cover
warnings.warn('the DataArray method `load_data` has been deprecated; '
'use `load` instead',
FutureWarning, stacklevel=2)
return self.load()
def copy(self, deep=True):
"""Returns a copy of this array.
If `deep=True`, a deep copy is made of all variables in the underlying
dataset. Otherwise, a shallow copy is made, so each variable in the new
array's dataset is also a variable in this array's dataset.
"""
ds = self._dataset.copy(deep=deep)
return self._with_replaced_dataset(ds)
def __copy__(self):
return self.copy(deep=False)
def __deepcopy__(self, memo=None):
# memo does nothing but is required for compatability with
# copy.deepcopy
return self.copy(deep=True)
# mutable objects should not be hashable
__hash__ = None
@property
def chunks(self):
"""Block dimensions for this array's data or None if it's not a dask
array.
"""
return self.variable.chunks
def chunk(self, chunks=None):
"""Coerce this array's data into a dask arrays with the given chunks.
If this variable is a non-dask array, it will be converted to dask
array. If it's a dask array, it will be rechunked to the given chunk
sizes.
If neither chunks is not provided for one or more dimensions, chunk
sizes along that dimension will not be updated; non-dask arrays will be
converted into dask arrays with a single block.
Parameters
----------
chunks : int, tuple or dict, optional
Chunk sizes along each dimension, e.g., ``5``, ``(5, 5)`` or
``{'x': 5, 'y': 5}``.
Returns
-------
chunked : xray.DataArray
"""
if isinstance(chunks, (list, tuple)):
chunks = dict(zip(self.dims, chunks))
ds = self._dataset.chunk(chunks)
return self._with_replaced_dataset(ds)
def isel(self, **indexers):
"""Return a new DataArray whose dataset is given by integer indexing
along the specified dimension(s).
See Also
--------
Dataset.isel
DataArray.sel
"""
ds = self._dataset.isel(**indexers)
return self._with_replaced_dataset(ds)
def sel(self, method=None, **indexers):
"""Return a new DataArray whose dataset is given by selecting
index labels along the specified dimension(s).
See Also
--------
Dataset.sel
DataArray.isel
"""
return self.isel(**indexing.remap_label_indexers(self, indexers,
method=method))
def isel_points(self, dim='points', **indexers):
"""Return a new DataArray whose dataset is given by pointwise integer
indexing along the specified dimension(s).
See Also
--------
Dataset.isel_points
"""
ds = self._dataset.isel_points(dim=dim, **indexers)
return self._with_replaced_dataset(ds)
def sel_points(self, dim='points', method=None, **indexers):
"""Return a new DataArray whose dataset is given by pointwise selection
of index labels along the specified dimension(s).
See Also
--------
Dataset.sel_points
"""
ds = self._dataset.sel_points(dim=dim, method=method, **indexers)
return self._with_replaced_dataset(ds)
def reindex_like(self, other, method=None, copy=True):
"""Conform this object onto the indexes of another object, filling
in missing values with NaN.
Parameters
----------
other : Dataset or DataArray
Object with an 'indexes' attribute giving a mapping from dimension
names to pandas.Index objects, which provides coordinates upon
which to index the variables in this dataset. The indexes on this
other object need not be the same as the indexes on this
dataset. Any mis-matched index values will be filled in with
NaN, and any mis-matched dimension names will simply be ignored.
method : {None, 'nearest', 'pad'/'ffill', 'backfill'/'bfill'}, optional
Method to use for filling index values from other not found on this
data array:
* None (default): don't fill gaps
* pad / ffill: propgate last valid index value forward
* backfill / bfill: propagate next valid index value backward
* nearest: use nearest valid index value (requires pandas>=0.16)
copy : bool, optional
If `copy=True`, the returned array's dataset contains only copied
variables. If `copy=False` and no reindexing is required then
original variables from this array's dataset are returned.
Returns
-------
reindexed : DataArray
Another dataset array, with this array's data but coordinates from
the other object.
See Also
--------
DataArray.reindex
align
"""
return self.reindex(method=method, copy=copy, **other.indexes)
def reindex(self, method=None, copy=True, **indexers):
"""Conform this object onto a new set of indexes, filling in
missing values with NaN.
Parameters
----------
copy : bool, optional
If `copy=True`, the returned array's dataset contains only copied
variables. If `copy=False` and no reindexing is required then
original variables from this array's dataset are returned.
method : {None, 'nearest', 'pad'/'ffill', 'backfill'/'bfill'}, optional
Method to use for filling index values in ``indexers`` not found on
this data array:
* None (default): don't fill gaps
* pad / ffill: propgate last valid index value forward
* backfill / bfill: propagate next valid index value backward
* nearest: use nearest valid index value (requires pandas>=0.16)
**indexers : dict
Dictionary with keys given by dimension names and values given by
arrays of coordinates tick labels. Any mis-matched coordinate values
will be filled in with NaN, and any mis-matched dimension names will
simply be ignored.
Returns
-------
reindexed : DataArray
Another dataset array, with this array's data but replaced
coordinates.
See Also
--------
DataArray.reindex_like
align
"""
ds = self._dataset.reindex(method=method, copy=copy, **indexers)
return self._with_replaced_dataset(ds)
def rename(self, new_name_or_name_dict):
"""Returns a new DataArray with renamed coordinates and/or a new name.
Parameters
----------
new_name_or_name_dict : str or dict-like
If the argument is dict-like, it it used as a mapping from old
names to new names for coordinates (and/or this array itself).
Otherwise, use the argument as the new name for this array.
Returns
-------
renamed : DataArray
Renamed array or array with renamed coordinates.
See Also
--------
Dataset.rename
DataArray.swap_dims
"""
if utils.is_dict_like(new_name_or_name_dict):
name_dict = new_name_or_name_dict
new_name = name_dict.get(self.name, self.name)
else:
new_name = new_name_or_name_dict
name_dict = {self.name: new_name}
renamed_dataset = self._dataset.rename(name_dict)
return renamed_dataset[new_name]
def swap_dims(self, dims_dict):
"""Returns a new DataArray with swapped dimensions.
Parameters
----------
dims_dict : dict-like
Dictionary whose keys are current dimension names and whose values
are new names. Each value must already be a coordinate on this
array.
inplace : bool, optional
If True, swap dimensions in-place. Otherwise, return a new object.
Returns
-------
renamed : Dataset
DataArray with swapped dimensions.
See Also
--------
DataArray.rename
Dataset.swap_dims
"""
ds = self._dataset.swap_dims(dims_dict)
return self._with_replaced_dataset(ds)
def transpose(self, *dims):
"""Return a new DataArray object with transposed dimensions.
Parameters
----------
*dims : str, optional
By default, reverse the dimensions. Otherwise, reorder the
dimensions to this order.
Returns
-------
transposed : DataArray
The returned DataArray's array is transposed.
Notes
-----
Although this operation returns a view of this array's data, it is
not lazy -- the data will be fully loaded.
See Also
--------
numpy.transpose
Dataset.transpose
"""
ds = self._dataset.copy()
ds[self.name] = self.variable.transpose(*dims)
return self._with_replaced_dataset(ds)
def squeeze(self, dim=None):
"""Return a new DataArray object with squeezed data.
Parameters
----------
dim : None or str or tuple of str, optional
Selects a subset of the length one dimensions. If a dimension is
selected with length greater than one, an error is raised. If
None, all length one dimensions are squeezed.
Returns
-------
squeezed : DataArray
This array, but with with all or a subset of the dimensions of
length 1 removed.
Notes
-----
Although this operation returns a view of this array's data, it is
not lazy -- the data will be fully loaded.
See Also
--------
numpy.squeeze
"""
ds = self._dataset.squeeze(dim)
return self._with_replaced_dataset(ds)
def drop(self, labels, dim=None):
"""Drop coordinates or index labels from this DataArray.
Parameters
----------
labels : str
Names of coordinate variables or index labels to drop.
dim : str, optional
Dimension along which to drop index labels. By default (if
``dim is None``), drops coordinates rather than index labels.
Returns
-------
dropped : DataArray
"""
if utils.is_scalar(labels):
labels = [labels]
if dim is None and self.name in labels:
raise ValueError('cannot drop this DataArray from itself')
ds = self._dataset.drop(labels, dim)
return self._with_replaced_dataset(ds)
def dropna(self, dim, how='any', thresh=None):
"""Returns a new array with dropped labels for missing values along
the provided dimension.
Parameters
----------
dim : str
Dimension along which to drop missing values. Dropping along
multiple dimensions simultaneously is not yet supported.
how : {'any', 'all'}, optional
* any : if any NA values are present, drop that label
* all : if all values are NA, drop that label
thresh : int, default None
If supplied, require this many non-NA values.
Returns
-------
DataArray
"""
ds = self._dataset.dropna(dim, how=how, thresh=thresh)
return self._with_replaced_dataset(ds)
def fillna(self, value):
"""Fill missing values in this object.
This operation follows the normal broadcasting and alignment rules that
xray uses for binary arithmetic, except the result is aligned to this
object (``join='left'``) instead of aligned to the intersection of
index coordinates (``join='inner'``).
Parameters
----------
value : scalar, ndarray or DataArray
Used to fill all matching missing values in this array. If the
argument is a DataArray, it is first aligned with (reindexed to)
this array.
Returns
-------
DataArray
"""
if utils.is_dict_like(value):
raise TypeError('cannot provide fill value as a dictionary with '
'fillna on a DataArray')
return self._fillna(value)
def reduce(self, func, dim=None, axis=None, keep_attrs=False, **kwargs):
"""Reduce this array by applying `func` along some dimension(s).
Parameters
----------
func : function
Function which can be called in the form
`f(x, axis=axis, **kwargs)` to return the result of reducing an
np.ndarray over an integer valued axis.
dim : str or sequence of str, optional
Dimension(s) over which to apply `func`.
axis : int or sequence of int, optional
Axis(es) over which to repeatedly apply `func`. Only one of the
'dim' and 'axis' arguments can be supplied. If neither are
supplied, then the reduction is calculated over the flattened array
(by calling `f(x)` without an axis argument).
keep_attrs : bool, optional
If True, the variable's attributes (`attrs`) will be copied from
the original object to the new one. If False (default), the new
object will be returned without attributes.
**kwargs : dict
Additional keyword arguments passed on to `func`.
Returns
-------
reduced : DataArray
DataArray with this object's array replaced with an array with
summarized data and the indicated dimension(s) removed.
"""
var = self.variable.reduce(func, dim, axis, keep_attrs, **kwargs)
ds = self._dataset.drop(set(self.dims) - set(var.dims))
ds[self.name] = var
return self._with_replaced_dataset(ds)
def to_pandas(self):
"""Convert this array into a pandas object with the same shape.
The type of the returned object depends on the number of DataArray
dimensions:
* 1D -> `pandas.Series`
* 2D -> `pandas.DataFrame`
* 3D -> `pandas.Panel`
Only works for arrays with 3 or fewer dimensions.
The DataArray constructor performs the inverse transformation.
"""
# TODO: consolidate the info about pandas constructors and the
# attributes that correspond to their indexes into a separate module?
constructors = {0: lambda x: x,
1: pd.Series,
2: pd.DataFrame,
3: pd.Panel}
try:
constructor = constructors[self.ndim]
except KeyError:
raise ValueError('cannot convert arrays with %s dimensions into '
'pandas objects' % self.ndim)
return constructor(self.values, *self.indexes.values())
def to_dataframe(self):
"""Convert this array and its coordinates into a tidy pandas.DataFrame.
The DataFrame is indexed by the Cartesian product of index coordinates
(in the form of a :py:class:`pandas.MultiIndex`).
Other coordinates are included as columns in the DataFrame.
"""
# TODO: add a 'name' parameter
dims = OrderedDict(zip(self.dims, self.shape))
return self._dataset._to_dataframe(dims)
def to_series(self):
"""Convert this array into a pandas.Series.
The Series is indexed by the Cartesian product of index coordinates
(in the form of a :py:class:`pandas.MultiIndex`).
"""
index = self.coords.to_index()
return pd.Series(self.values.reshape(-1), index=index, name=self.name)
def to_masked_array(self, copy=True):
"""Convert this array into a numpy.ma.MaskedArray
Parameters
----------
copy : bool
If True (default) make a copy of the array in the result. If False,
a MaskedArray view of DataArray.values is returned.
Returns
-------
result : MaskedArray
Masked where invalid values (nan or inf) occur.
"""
isnull = pd.isnull(self.values)
return np.ma.masked_where(isnull, self.values, copy=copy)
@classmethod
def from_series(cls, series):
"""Convert a pandas.Series into an xray.DataArray.
If the series's index is a MultiIndex, it will be expanded into a
tensor product of one-dimensional coordinates (filling in missing values
with NaN). Thus this operation should be the inverse of the `to_series`
method.
"""
# TODO: add a 'name' parameter
df = pd.DataFrame({series.name: series})
ds = Dataset.from_dataframe(df)
return cls._new_from_dataset_no_copy(ds, series.name)
def to_cdms2(self):
"""Convert this array into a cdms2.Variable
"""
from ..convert import to_cdms2
return to_cdms2(self)
@classmethod
def from_cdms2(cls, variable):
"""Convert a cdms2.Variable into an xray.DataArray
"""
from ..convert import from_cdms2
return from_cdms2(variable)
def _all_compat(self, other, compat_str):
"""Helper function for equals and identical"""
def compat(x, y):
return getattr(x.variable, compat_str)(y.variable)
return (utils.dict_equiv(self.coords, other.coords, compat=compat) and
compat(self, other))
def broadcast_equals(self, other):
"""Two DataArrays are broadcast equal if they are equal after
broadcasting them against each other such that they have the same
dimensions.
See Also
--------
DataArray.equals
DataArray.identical
"""
try:
return self._all_compat(other, 'broadcast_equals')
except (TypeError, AttributeError):
return False
def equals(self, other):
"""True if two DataArrays have the same dimensions, coordinates and
values; otherwise False.
DataArrays can still be equal (like pandas objects) if they have NaN
values in the same locations.
This method is necessary because `v1 == v2` for ``DataArray``
does element-wise comparisions (like numpy.ndarrays).
See Also
--------
DataArray.broadcast_equals
DataArray.identical
"""
try:
return self._all_compat(other, 'equals')
except (TypeError, AttributeError):
return False
def identical(self, other):
"""Like equals, but also checks the array name and attributes, and
attributes on all coordinates.
See Also
--------
DataArray.broadcast_equals
DataArray.equal
"""
try:
return (self.name == other.name and
self._all_compat(other, 'identical'))
except (TypeError, AttributeError):
return False
__default_name = object()
def _result_name(self, other=None):
if self.name in self.dims:
# these names match dimension, so if we preserve them we will also
# rename indexes
return None
if other is None:
# shortcut
return self.name
other_name = getattr(other, 'name', self.__default_name)
other_dims = getattr(other, 'dims', ())
if other_name in other_dims:
# same trouble as above
return None
# use the same naming heuristics as pandas:
# https://github.com/ContinuumIO/blaze/issues/458#issuecomment-51936356
if other_name is self.__default_name or other_name == self.name:
return self.name
return None
def __array_wrap__(self, obj, context=None):
new_var = self.variable.__array_wrap__(obj, context)
ds = self.coords.to_dataset()
name = self._result_name()
ds[name] = new_var
return self._new_from_dataset_no_copy(ds, name)
@staticmethod
def _unary_op(f):
@functools.wraps(f)
def func(self, *args, **kwargs):
return self.__array_wrap__(f(self.variable.data, *args, **kwargs))
return func
@staticmethod
def _binary_op(f, reflexive=False, join='inner', **ignored_kwargs):
@functools.wraps(f)
def func(self, other):
if isinstance(other, (Dataset, groupby.GroupBy)):
return NotImplemented
if hasattr(other, 'indexes'):
self, other = align(self, other, join=join, copy=False)
empty_indexes = [d for d, s in zip(self.dims, self.shape)
if s == 0]
if empty_indexes:
raise ValueError('no overlapping labels for some '
'dimensions: %s' % empty_indexes)
other_coords = getattr(other, 'coords', None)
other_variable = getattr(other, 'variable', other)
ds = self.coords.merge(other_coords)
name = self._result_name(other)
ds[name] = (f(self.variable, other_variable)
if not reflexive
else f(other_variable, self.variable))
result = self._new_from_dataset_no_copy(ds, name)
return result
return func
@staticmethod
def _inplace_binary_op(f):
@functools.wraps(f)
def func(self, other):
if isinstance(other, groupby.GroupBy):
raise TypeError('in-place operations between a DataArray and '
'a grouped object are not permitted')
other_coords = getattr(other, 'coords', None)
other_variable = getattr(other, 'variable', other)
with self.coords._merge_inplace(other_coords):
f(self.variable, other_variable)
return self
return func
@property
def plot(self):
'''
Access plotting functions
>>> d = DataArray([[1, 2], [3, 4]])
For convenience just call this directly
>>> d.plot()
Or use it as a namespace to use xray.plot functions as
DataArray methods
>>> d.plot.imshow() # equivalent to xray.plot.imshow(d)
'''
return _PlotMethods(self)
def _title_for_slice(self, truncate=50):
'''
If the dataarray has 1 dimensional coordinates or comes from a slice
we can show that info in the title
Parameters
----------
truncate : integer
maximum number of characters for title
Returns
-------
title : string
Can be used for plot titles
'''
one_dims = []
for dim, coord in iteritems(self.coords):
if coord.size == 1:
one_dims.append('{dim} = {v}'.format(
dim=dim, v=format_item(coord.values)))
title = ', '.join(one_dims)
if len(title) > truncate:
title = title[:(truncate - 3)] + '...'
return title
def diff(self, dim, n=1, label='upper'):
"""Calculate the n-th order discrete difference along given axis.
Parameters
----------
dim : str, optional
Dimension over which to calculate the finite difference.
n : int, optional
The number of times values are differenced.
label : str, optional
The new coordinate in dimension ``dim`` will have the
values of either the minuend's or subtrahend's coordinate
for values 'upper' and 'lower', respectively. Other
values are not supported.
Returns
-------
difference : same type as caller
The n-th order finite differnce of this object.
Examples
--------
>>> arr = xray.DataArray([5, 5, 6, 6], [[1, 2, 3, 4]], ['x'])
>>> arr.diff('x')
<xray.DataArray (x: 3)>
array([0, 1, 0])
Coordinates:
* x (x) int64 2 3 4
>>> arr.diff('x', 2)
<xray.DataArray (x: 2)>
array([ 1, -1])
Coordinates:
* x (x) int64 3 4
"""
ds = self._dataset.diff(n=n, dim=dim, label=label)
return self._with_replaced_dataset(ds)
@property
def real(self):
return self._with_replaced_dataset(self._dataset.real)
@property
def imag(self):
return self._with_replaced_dataset(self._dataset.imag)
# priority most be higher than Variable to properly work with binary ufuncs
ops.inject_all_ops_and_reduce_methods(DataArray, priority=60)
| apache-2.0 |
ryfeus/lambda-packs | HDF4_H5_NETCDF/source2.7/numpy/lib/polynomial.py | 6 | 38467 | """
Functions to operate on polynomials.
"""
from __future__ import division, absolute_import, print_function
__all__ = ['poly', 'roots', 'polyint', 'polyder', 'polyadd',
'polysub', 'polymul', 'polydiv', 'polyval', 'poly1d',
'polyfit', 'RankWarning']
import re
import warnings
import numpy.core.numeric as NX
from numpy.core import (isscalar, abs, finfo, atleast_1d, hstack, dot, array,
ones)
from numpy.lib.twodim_base import diag, vander
from numpy.lib.function_base import trim_zeros
from numpy.lib.type_check import iscomplex, real, imag, mintypecode
from numpy.linalg import eigvals, lstsq, inv
class RankWarning(UserWarning):
"""
Issued by `polyfit` when the Vandermonde matrix is rank deficient.
For more information, a way to suppress the warning, and an example of
`RankWarning` being issued, see `polyfit`.
"""
pass
def poly(seq_of_zeros):
"""
Find the coefficients of a polynomial with the given sequence of roots.
Returns the coefficients of the polynomial whose leading coefficient
is one for the given sequence of zeros (multiple roots must be included
in the sequence as many times as their multiplicity; see Examples).
A square matrix (or array, which will be treated as a matrix) can also
be given, in which case the coefficients of the characteristic polynomial
of the matrix are returned.
Parameters
----------
seq_of_zeros : array_like, shape (N,) or (N, N)
A sequence of polynomial roots, or a square array or matrix object.
Returns
-------
c : ndarray
1D array of polynomial coefficients from highest to lowest degree:
``c[0] * x**(N) + c[1] * x**(N-1) + ... + c[N-1] * x + c[N]``
where c[0] always equals 1.
Raises
------
ValueError
If input is the wrong shape (the input must be a 1-D or square
2-D array).
See Also
--------
polyval : Compute polynomial values.
roots : Return the roots of a polynomial.
polyfit : Least squares polynomial fit.
poly1d : A one-dimensional polynomial class.
Notes
-----
Specifying the roots of a polynomial still leaves one degree of
freedom, typically represented by an undetermined leading
coefficient. [1]_ In the case of this function, that coefficient -
the first one in the returned array - is always taken as one. (If
for some reason you have one other point, the only automatic way
presently to leverage that information is to use ``polyfit``.)
The characteristic polynomial, :math:`p_a(t)`, of an `n`-by-`n`
matrix **A** is given by
:math:`p_a(t) = \\mathrm{det}(t\\, \\mathbf{I} - \\mathbf{A})`,
where **I** is the `n`-by-`n` identity matrix. [2]_
References
----------
.. [1] M. Sullivan and M. Sullivan, III, "Algebra and Trignometry,
Enhanced With Graphing Utilities," Prentice-Hall, pg. 318, 1996.
.. [2] G. Strang, "Linear Algebra and Its Applications, 2nd Edition,"
Academic Press, pg. 182, 1980.
Examples
--------
Given a sequence of a polynomial's zeros:
>>> np.poly((0, 0, 0)) # Multiple root example
array([1, 0, 0, 0])
The line above represents z**3 + 0*z**2 + 0*z + 0.
>>> np.poly((-1./2, 0, 1./2))
array([ 1. , 0. , -0.25, 0. ])
The line above represents z**3 - z/4
>>> np.poly((np.random.random(1.)[0], 0, np.random.random(1.)[0]))
array([ 1. , -0.77086955, 0.08618131, 0. ]) #random
Given a square array object:
>>> P = np.array([[0, 1./3], [-1./2, 0]])
>>> np.poly(P)
array([ 1. , 0. , 0.16666667])
Note how in all cases the leading coefficient is always 1.
"""
seq_of_zeros = atleast_1d(seq_of_zeros)
sh = seq_of_zeros.shape
if len(sh) == 2 and sh[0] == sh[1] and sh[0] != 0:
seq_of_zeros = eigvals(seq_of_zeros)
elif len(sh) == 1:
dt = seq_of_zeros.dtype
# Let object arrays slip through, e.g. for arbitrary precision
if dt != object:
seq_of_zeros = seq_of_zeros.astype(mintypecode(dt.char))
else:
raise ValueError("input must be 1d or non-empty square 2d array.")
if len(seq_of_zeros) == 0:
return 1.0
dt = seq_of_zeros.dtype
a = ones((1,), dtype=dt)
for k in range(len(seq_of_zeros)):
a = NX.convolve(a, array([1, -seq_of_zeros[k]], dtype=dt),
mode='full')
if issubclass(a.dtype.type, NX.complexfloating):
# if complex roots are all complex conjugates, the roots are real.
roots = NX.asarray(seq_of_zeros, complex)
if NX.all(NX.sort(roots) == NX.sort(roots.conjugate())):
a = a.real.copy()
return a
def roots(p):
"""
Return the roots of a polynomial with coefficients given in p.
The values in the rank-1 array `p` are coefficients of a polynomial.
If the length of `p` is n+1 then the polynomial is described by::
p[0] * x**n + p[1] * x**(n-1) + ... + p[n-1]*x + p[n]
Parameters
----------
p : array_like
Rank-1 array of polynomial coefficients.
Returns
-------
out : ndarray
An array containing the roots of the polynomial.
Raises
------
ValueError
When `p` cannot be converted to a rank-1 array.
See also
--------
poly : Find the coefficients of a polynomial with a given sequence
of roots.
polyval : Compute polynomial values.
polyfit : Least squares polynomial fit.
poly1d : A one-dimensional polynomial class.
Notes
-----
The algorithm relies on computing the eigenvalues of the
companion matrix [1]_.
References
----------
.. [1] R. A. Horn & C. R. Johnson, *Matrix Analysis*. Cambridge, UK:
Cambridge University Press, 1999, pp. 146-7.
Examples
--------
>>> coeff = [3.2, 2, 1]
>>> np.roots(coeff)
array([-0.3125+0.46351241j, -0.3125-0.46351241j])
"""
# If input is scalar, this makes it an array
p = atleast_1d(p)
if p.ndim != 1:
raise ValueError("Input must be a rank-1 array.")
# find non-zero array entries
non_zero = NX.nonzero(NX.ravel(p))[0]
# Return an empty array if polynomial is all zeros
if len(non_zero) == 0:
return NX.array([])
# find the number of trailing zeros -- this is the number of roots at 0.
trailing_zeros = len(p) - non_zero[-1] - 1
# strip leading and trailing zeros
p = p[int(non_zero[0]):int(non_zero[-1])+1]
# casting: if incoming array isn't floating point, make it floating point.
if not issubclass(p.dtype.type, (NX.floating, NX.complexfloating)):
p = p.astype(float)
N = len(p)
if N > 1:
# build companion matrix and find its eigenvalues (the roots)
A = diag(NX.ones((N-2,), p.dtype), -1)
A[0,:] = -p[1:] / p[0]
roots = eigvals(A)
else:
roots = NX.array([])
# tack any zeros onto the back of the array
roots = hstack((roots, NX.zeros(trailing_zeros, roots.dtype)))
return roots
def polyint(p, m=1, k=None):
"""
Return an antiderivative (indefinite integral) of a polynomial.
The returned order `m` antiderivative `P` of polynomial `p` satisfies
:math:`\\frac{d^m}{dx^m}P(x) = p(x)` and is defined up to `m - 1`
integration constants `k`. The constants determine the low-order
polynomial part
.. math:: \\frac{k_{m-1}}{0!} x^0 + \\ldots + \\frac{k_0}{(m-1)!}x^{m-1}
of `P` so that :math:`P^{(j)}(0) = k_{m-j-1}`.
Parameters
----------
p : array_like or poly1d
Polynomial to differentiate.
A sequence is interpreted as polynomial coefficients, see `poly1d`.
m : int, optional
Order of the antiderivative. (Default: 1)
k : list of `m` scalars or scalar, optional
Integration constants. They are given in the order of integration:
those corresponding to highest-order terms come first.
If ``None`` (default), all constants are assumed to be zero.
If `m = 1`, a single scalar can be given instead of a list.
See Also
--------
polyder : derivative of a polynomial
poly1d.integ : equivalent method
Examples
--------
The defining property of the antiderivative:
>>> p = np.poly1d([1,1,1])
>>> P = np.polyint(p)
>>> P
poly1d([ 0.33333333, 0.5 , 1. , 0. ])
>>> np.polyder(P) == p
True
The integration constants default to zero, but can be specified:
>>> P = np.polyint(p, 3)
>>> P(0)
0.0
>>> np.polyder(P)(0)
0.0
>>> np.polyder(P, 2)(0)
0.0
>>> P = np.polyint(p, 3, k=[6,5,3])
>>> P
poly1d([ 0.01666667, 0.04166667, 0.16666667, 3. , 5. , 3. ])
Note that 3 = 6 / 2!, and that the constants are given in the order of
integrations. Constant of the highest-order polynomial term comes first:
>>> np.polyder(P, 2)(0)
6.0
>>> np.polyder(P, 1)(0)
5.0
>>> P(0)
3.0
"""
m = int(m)
if m < 0:
raise ValueError("Order of integral must be positive (see polyder)")
if k is None:
k = NX.zeros(m, float)
k = atleast_1d(k)
if len(k) == 1 and m > 1:
k = k[0]*NX.ones(m, float)
if len(k) < m:
raise ValueError(
"k must be a scalar or a rank-1 array of length 1 or >m.")
truepoly = isinstance(p, poly1d)
p = NX.asarray(p)
if m == 0:
if truepoly:
return poly1d(p)
return p
else:
# Note: this must work also with object and integer arrays
y = NX.concatenate((p.__truediv__(NX.arange(len(p), 0, -1)), [k[0]]))
val = polyint(y, m - 1, k=k[1:])
if truepoly:
return poly1d(val)
return val
def polyder(p, m=1):
"""
Return the derivative of the specified order of a polynomial.
Parameters
----------
p : poly1d or sequence
Polynomial to differentiate.
A sequence is interpreted as polynomial coefficients, see `poly1d`.
m : int, optional
Order of differentiation (default: 1)
Returns
-------
der : poly1d
A new polynomial representing the derivative.
See Also
--------
polyint : Anti-derivative of a polynomial.
poly1d : Class for one-dimensional polynomials.
Examples
--------
The derivative of the polynomial :math:`x^3 + x^2 + x^1 + 1` is:
>>> p = np.poly1d([1,1,1,1])
>>> p2 = np.polyder(p)
>>> p2
poly1d([3, 2, 1])
which evaluates to:
>>> p2(2.)
17.0
We can verify this, approximating the derivative with
``(f(x + h) - f(x))/h``:
>>> (p(2. + 0.001) - p(2.)) / 0.001
17.007000999997857
The fourth-order derivative of a 3rd-order polynomial is zero:
>>> np.polyder(p, 2)
poly1d([6, 2])
>>> np.polyder(p, 3)
poly1d([6])
>>> np.polyder(p, 4)
poly1d([ 0.])
"""
m = int(m)
if m < 0:
raise ValueError("Order of derivative must be positive (see polyint)")
truepoly = isinstance(p, poly1d)
p = NX.asarray(p)
n = len(p) - 1
y = p[:-1] * NX.arange(n, 0, -1)
if m == 0:
val = p
else:
val = polyder(y, m - 1)
if truepoly:
val = poly1d(val)
return val
def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False):
"""
Least squares polynomial fit.
Fit a polynomial ``p(x) = p[0] * x**deg + ... + p[deg]`` of degree `deg`
to points `(x, y)`. Returns a vector of coefficients `p` that minimises
the squared error.
Parameters
----------
x : array_like, shape (M,)
x-coordinates of the M sample points ``(x[i], y[i])``.
y : array_like, shape (M,) or (M, K)
y-coordinates of the sample points. Several data sets of sample
points sharing the same x-coordinates can be fitted at once by
passing in a 2D-array that contains one dataset per column.
deg : int
Degree of the fitting polynomial
rcond : float, optional
Relative condition number of the fit. Singular values smaller than
this relative to the largest singular value will be ignored. The
default value is len(x)*eps, where eps is the relative precision of
the float type, about 2e-16 in most cases.
full : bool, optional
Switch determining nature of return value. When it is False (the
default) just the coefficients are returned, when True diagnostic
information from the singular value decomposition is also returned.
w : array_like, shape (M,), optional
Weights to apply to the y-coordinates of the sample points. For
gaussian uncertainties, use 1/sigma (not 1/sigma**2).
cov : bool, optional
Return the estimate and the covariance matrix of the estimate
If full is True, then cov is not returned.
Returns
-------
p : ndarray, shape (deg + 1,) or (deg + 1, K)
Polynomial coefficients, highest power first. If `y` was 2-D, the
coefficients for `k`-th data set are in ``p[:,k]``.
residuals, rank, singular_values, rcond
Present only if `full` = True. Residuals of the least-squares fit,
the effective rank of the scaled Vandermonde coefficient matrix,
its singular values, and the specified value of `rcond`. For more
details, see `linalg.lstsq`.
V : ndarray, shape (M,M) or (M,M,K)
Present only if `full` = False and `cov`=True. The covariance
matrix of the polynomial coefficient estimates. The diagonal of
this matrix are the variance estimates for each coefficient. If y
is a 2-D array, then the covariance matrix for the `k`-th data set
are in ``V[:,:,k]``
Warns
-----
RankWarning
The rank of the coefficient matrix in the least-squares fit is
deficient. The warning is only raised if `full` = False.
The warnings can be turned off by
>>> import warnings
>>> warnings.simplefilter('ignore', np.RankWarning)
See Also
--------
polyval : Compute polynomial values.
linalg.lstsq : Computes a least-squares fit.
scipy.interpolate.UnivariateSpline : Computes spline fits.
Notes
-----
The solution minimizes the squared error
.. math ::
E = \\sum_{j=0}^k |p(x_j) - y_j|^2
in the equations::
x[0]**n * p[0] + ... + x[0] * p[n-1] + p[n] = y[0]
x[1]**n * p[0] + ... + x[1] * p[n-1] + p[n] = y[1]
...
x[k]**n * p[0] + ... + x[k] * p[n-1] + p[n] = y[k]
The coefficient matrix of the coefficients `p` is a Vandermonde matrix.
`polyfit` issues a `RankWarning` when the least-squares fit is badly
conditioned. This implies that the best fit is not well-defined due
to numerical error. The results may be improved by lowering the polynomial
degree or by replacing `x` by `x` - `x`.mean(). The `rcond` parameter
can also be set to a value smaller than its default, but the resulting
fit may be spurious: including contributions from the small singular
values can add numerical noise to the result.
Note that fitting polynomial coefficients is inherently badly conditioned
when the degree of the polynomial is large or the interval of sample points
is badly centered. The quality of the fit should always be checked in these
cases. When polynomial fits are not satisfactory, splines may be a good
alternative.
References
----------
.. [1] Wikipedia, "Curve fitting",
http://en.wikipedia.org/wiki/Curve_fitting
.. [2] Wikipedia, "Polynomial interpolation",
http://en.wikipedia.org/wiki/Polynomial_interpolation
Examples
--------
>>> x = np.array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0])
>>> y = np.array([0.0, 0.8, 0.9, 0.1, -0.8, -1.0])
>>> z = np.polyfit(x, y, 3)
>>> z
array([ 0.08703704, -0.81349206, 1.69312169, -0.03968254])
It is convenient to use `poly1d` objects for dealing with polynomials:
>>> p = np.poly1d(z)
>>> p(0.5)
0.6143849206349179
>>> p(3.5)
-0.34732142857143039
>>> p(10)
22.579365079365115
High-order polynomials may oscillate wildly:
>>> p30 = np.poly1d(np.polyfit(x, y, 30))
/... RankWarning: Polyfit may be poorly conditioned...
>>> p30(4)
-0.80000000000000204
>>> p30(5)
-0.99999999999999445
>>> p30(4.5)
-0.10547061179440398
Illustration:
>>> import matplotlib.pyplot as plt
>>> xp = np.linspace(-2, 6, 100)
>>> _ = plt.plot(x, y, '.', xp, p(xp), '-', xp, p30(xp), '--')
>>> plt.ylim(-2,2)
(-2, 2)
>>> plt.show()
"""
order = int(deg) + 1
x = NX.asarray(x) + 0.0
y = NX.asarray(y) + 0.0
# check arguments.
if deg < 0:
raise ValueError("expected deg >= 0")
if x.ndim != 1:
raise TypeError("expected 1D vector for x")
if x.size == 0:
raise TypeError("expected non-empty vector for x")
if y.ndim < 1 or y.ndim > 2:
raise TypeError("expected 1D or 2D array for y")
if x.shape[0] != y.shape[0]:
raise TypeError("expected x and y to have same length")
# set rcond
if rcond is None:
rcond = len(x)*finfo(x.dtype).eps
# set up least squares equation for powers of x
lhs = vander(x, order)
rhs = y
# apply weighting
if w is not None:
w = NX.asarray(w) + 0.0
if w.ndim != 1:
raise TypeError("expected a 1-d array for weights")
if w.shape[0] != y.shape[0]:
raise TypeError("expected w and y to have the same length")
lhs *= w[:, NX.newaxis]
if rhs.ndim == 2:
rhs *= w[:, NX.newaxis]
else:
rhs *= w
# scale lhs to improve condition number and solve
scale = NX.sqrt((lhs*lhs).sum(axis=0))
lhs /= scale
c, resids, rank, s = lstsq(lhs, rhs, rcond)
c = (c.T/scale).T # broadcast scale coefficients
# warn on rank reduction, which indicates an ill conditioned matrix
if rank != order and not full:
msg = "Polyfit may be poorly conditioned"
warnings.warn(msg, RankWarning, stacklevel=2)
if full:
return c, resids, rank, s, rcond
elif cov:
Vbase = inv(dot(lhs.T, lhs))
Vbase /= NX.outer(scale, scale)
# Some literature ignores the extra -2.0 factor in the denominator, but
# it is included here because the covariance of Multivariate Student-T
# (which is implied by a Bayesian uncertainty analysis) includes it.
# Plus, it gives a slightly more conservative estimate of uncertainty.
if len(x) <= order + 2:
raise ValueError("the number of data points must exceed order + 2 "
"for Bayesian estimate the covariance matrix")
fac = resids / (len(x) - order - 2.0)
if y.ndim == 1:
return c, Vbase * fac
else:
return c, Vbase[:,:, NX.newaxis] * fac
else:
return c
def polyval(p, x):
"""
Evaluate a polynomial at specific values.
If `p` is of length N, this function returns the value:
``p[0]*x**(N-1) + p[1]*x**(N-2) + ... + p[N-2]*x + p[N-1]``
If `x` is a sequence, then `p(x)` is returned for each element of `x`.
If `x` is another polynomial then the composite polynomial `p(x(t))`
is returned.
Parameters
----------
p : array_like or poly1d object
1D array of polynomial coefficients (including coefficients equal
to zero) from highest degree to the constant term, or an
instance of poly1d.
x : array_like or poly1d object
A number, an array of numbers, or an instance of poly1d, at
which to evaluate `p`.
Returns
-------
values : ndarray or poly1d
If `x` is a poly1d instance, the result is the composition of the two
polynomials, i.e., `x` is "substituted" in `p` and the simplified
result is returned. In addition, the type of `x` - array_like or
poly1d - governs the type of the output: `x` array_like => `values`
array_like, `x` a poly1d object => `values` is also.
See Also
--------
poly1d: A polynomial class.
Notes
-----
Horner's scheme [1]_ is used to evaluate the polynomial. Even so,
for polynomials of high degree the values may be inaccurate due to
rounding errors. Use carefully.
References
----------
.. [1] I. N. Bronshtein, K. A. Semendyayev, and K. A. Hirsch (Eng.
trans. Ed.), *Handbook of Mathematics*, New York, Van Nostrand
Reinhold Co., 1985, pg. 720.
Examples
--------
>>> np.polyval([3,0,1], 5) # 3 * 5**2 + 0 * 5**1 + 1
76
>>> np.polyval([3,0,1], np.poly1d(5))
poly1d([ 76.])
>>> np.polyval(np.poly1d([3,0,1]), 5)
76
>>> np.polyval(np.poly1d([3,0,1]), np.poly1d(5))
poly1d([ 76.])
"""
p = NX.asarray(p)
if isinstance(x, poly1d):
y = 0
else:
x = NX.asarray(x)
y = NX.zeros_like(x)
for i in range(len(p)):
y = y * x + p[i]
return y
def polyadd(a1, a2):
"""
Find the sum of two polynomials.
Returns the polynomial resulting from the sum of two input polynomials.
Each input must be either a poly1d object or a 1D sequence of polynomial
coefficients, from highest to lowest degree.
Parameters
----------
a1, a2 : array_like or poly1d object
Input polynomials.
Returns
-------
out : ndarray or poly1d object
The sum of the inputs. If either input is a poly1d object, then the
output is also a poly1d object. Otherwise, it is a 1D array of
polynomial coefficients from highest to lowest degree.
See Also
--------
poly1d : A one-dimensional polynomial class.
poly, polyadd, polyder, polydiv, polyfit, polyint, polysub, polyval
Examples
--------
>>> np.polyadd([1, 2], [9, 5, 4])
array([9, 6, 6])
Using poly1d objects:
>>> p1 = np.poly1d([1, 2])
>>> p2 = np.poly1d([9, 5, 4])
>>> print(p1)
1 x + 2
>>> print(p2)
2
9 x + 5 x + 4
>>> print(np.polyadd(p1, p2))
2
9 x + 6 x + 6
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1 = atleast_1d(a1)
a2 = atleast_1d(a2)
diff = len(a2) - len(a1)
if diff == 0:
val = a1 + a2
elif diff > 0:
zr = NX.zeros(diff, a1.dtype)
val = NX.concatenate((zr, a1)) + a2
else:
zr = NX.zeros(abs(diff), a2.dtype)
val = a1 + NX.concatenate((zr, a2))
if truepoly:
val = poly1d(val)
return val
def polysub(a1, a2):
"""
Difference (subtraction) of two polynomials.
Given two polynomials `a1` and `a2`, returns ``a1 - a2``.
`a1` and `a2` can be either array_like sequences of the polynomials'
coefficients (including coefficients equal to zero), or `poly1d` objects.
Parameters
----------
a1, a2 : array_like or poly1d
Minuend and subtrahend polynomials, respectively.
Returns
-------
out : ndarray or poly1d
Array or `poly1d` object of the difference polynomial's coefficients.
See Also
--------
polyval, polydiv, polymul, polyadd
Examples
--------
.. math:: (2 x^2 + 10 x - 2) - (3 x^2 + 10 x -4) = (-x^2 + 2)
>>> np.polysub([2, 10, -2], [3, 10, -4])
array([-1, 0, 2])
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1 = atleast_1d(a1)
a2 = atleast_1d(a2)
diff = len(a2) - len(a1)
if diff == 0:
val = a1 - a2
elif diff > 0:
zr = NX.zeros(diff, a1.dtype)
val = NX.concatenate((zr, a1)) - a2
else:
zr = NX.zeros(abs(diff), a2.dtype)
val = a1 - NX.concatenate((zr, a2))
if truepoly:
val = poly1d(val)
return val
def polymul(a1, a2):
"""
Find the product of two polynomials.
Finds the polynomial resulting from the multiplication of the two input
polynomials. Each input must be either a poly1d object or a 1D sequence
of polynomial coefficients, from highest to lowest degree.
Parameters
----------
a1, a2 : array_like or poly1d object
Input polynomials.
Returns
-------
out : ndarray or poly1d object
The polynomial resulting from the multiplication of the inputs. If
either inputs is a poly1d object, then the output is also a poly1d
object. Otherwise, it is a 1D array of polynomial coefficients from
highest to lowest degree.
See Also
--------
poly1d : A one-dimensional polynomial class.
poly, polyadd, polyder, polydiv, polyfit, polyint, polysub,
polyval
convolve : Array convolution. Same output as polymul, but has parameter
for overlap mode.
Examples
--------
>>> np.polymul([1, 2, 3], [9, 5, 1])
array([ 9, 23, 38, 17, 3])
Using poly1d objects:
>>> p1 = np.poly1d([1, 2, 3])
>>> p2 = np.poly1d([9, 5, 1])
>>> print(p1)
2
1 x + 2 x + 3
>>> print(p2)
2
9 x + 5 x + 1
>>> print(np.polymul(p1, p2))
4 3 2
9 x + 23 x + 38 x + 17 x + 3
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1, a2 = poly1d(a1), poly1d(a2)
val = NX.convolve(a1, a2)
if truepoly:
val = poly1d(val)
return val
def polydiv(u, v):
"""
Returns the quotient and remainder of polynomial division.
The input arrays are the coefficients (including any coefficients
equal to zero) of the "numerator" (dividend) and "denominator"
(divisor) polynomials, respectively.
Parameters
----------
u : array_like or poly1d
Dividend polynomial's coefficients.
v : array_like or poly1d
Divisor polynomial's coefficients.
Returns
-------
q : ndarray
Coefficients, including those equal to zero, of the quotient.
r : ndarray
Coefficients, including those equal to zero, of the remainder.
See Also
--------
poly, polyadd, polyder, polydiv, polyfit, polyint, polymul, polysub,
polyval
Notes
-----
Both `u` and `v` must be 0-d or 1-d (ndim = 0 or 1), but `u.ndim` need
not equal `v.ndim`. In other words, all four possible combinations -
``u.ndim = v.ndim = 0``, ``u.ndim = v.ndim = 1``,
``u.ndim = 1, v.ndim = 0``, and ``u.ndim = 0, v.ndim = 1`` - work.
Examples
--------
.. math:: \\frac{3x^2 + 5x + 2}{2x + 1} = 1.5x + 1.75, remainder 0.25
>>> x = np.array([3.0, 5.0, 2.0])
>>> y = np.array([2.0, 1.0])
>>> np.polydiv(x, y)
(array([ 1.5 , 1.75]), array([ 0.25]))
"""
truepoly = (isinstance(u, poly1d) or isinstance(u, poly1d))
u = atleast_1d(u) + 0.0
v = atleast_1d(v) + 0.0
# w has the common type
w = u[0] + v[0]
m = len(u) - 1
n = len(v) - 1
scale = 1. / v[0]
q = NX.zeros((max(m - n + 1, 1),), w.dtype)
r = u.astype(w.dtype)
for k in range(0, m-n+1):
d = scale * r[k]
q[k] = d
r[k:k+n+1] -= d*v
while NX.allclose(r[0], 0, rtol=1e-14) and (r.shape[-1] > 1):
r = r[1:]
if truepoly:
return poly1d(q), poly1d(r)
return q, r
_poly_mat = re.compile(r"[*][*]([0-9]*)")
def _raise_power(astr, wrap=70):
n = 0
line1 = ''
line2 = ''
output = ' '
while True:
mat = _poly_mat.search(astr, n)
if mat is None:
break
span = mat.span()
power = mat.groups()[0]
partstr = astr[n:span[0]]
n = span[1]
toadd2 = partstr + ' '*(len(power)-1)
toadd1 = ' '*(len(partstr)-1) + power
if ((len(line2) + len(toadd2) > wrap) or
(len(line1) + len(toadd1) > wrap)):
output += line1 + "\n" + line2 + "\n "
line1 = toadd1
line2 = toadd2
else:
line2 += partstr + ' '*(len(power)-1)
line1 += ' '*(len(partstr)-1) + power
output += line1 + "\n" + line2
return output + astr[n:]
class poly1d(object):
"""
A one-dimensional polynomial class.
A convenience class, used to encapsulate "natural" operations on
polynomials so that said operations may take on their customary
form in code (see Examples).
Parameters
----------
c_or_r : array_like
The polynomial's coefficients, in decreasing powers, or if
the value of the second parameter is True, the polynomial's
roots (values where the polynomial evaluates to 0). For example,
``poly1d([1, 2, 3])`` returns an object that represents
:math:`x^2 + 2x + 3`, whereas ``poly1d([1, 2, 3], True)`` returns
one that represents :math:`(x-1)(x-2)(x-3) = x^3 - 6x^2 + 11x -6`.
r : bool, optional
If True, `c_or_r` specifies the polynomial's roots; the default
is False.
variable : str, optional
Changes the variable used when printing `p` from `x` to `variable`
(see Examples).
Examples
--------
Construct the polynomial :math:`x^2 + 2x + 3`:
>>> p = np.poly1d([1, 2, 3])
>>> print(np.poly1d(p))
2
1 x + 2 x + 3
Evaluate the polynomial at :math:`x = 0.5`:
>>> p(0.5)
4.25
Find the roots:
>>> p.r
array([-1.+1.41421356j, -1.-1.41421356j])
>>> p(p.r)
array([ -4.44089210e-16+0.j, -4.44089210e-16+0.j])
These numbers in the previous line represent (0, 0) to machine precision
Show the coefficients:
>>> p.c
array([1, 2, 3])
Display the order (the leading zero-coefficients are removed):
>>> p.order
2
Show the coefficient of the k-th power in the polynomial
(which is equivalent to ``p.c[-(i+1)]``):
>>> p[1]
2
Polynomials can be added, subtracted, multiplied, and divided
(returns quotient and remainder):
>>> p * p
poly1d([ 1, 4, 10, 12, 9])
>>> (p**3 + 4) / p
(poly1d([ 1., 4., 10., 12., 9.]), poly1d([ 4.]))
``asarray(p)`` gives the coefficient array, so polynomials can be
used in all functions that accept arrays:
>>> p**2 # square of polynomial
poly1d([ 1, 4, 10, 12, 9])
>>> np.square(p) # square of individual coefficients
array([1, 4, 9])
The variable used in the string representation of `p` can be modified,
using the `variable` parameter:
>>> p = np.poly1d([1,2,3], variable='z')
>>> print(p)
2
1 z + 2 z + 3
Construct a polynomial from its roots:
>>> np.poly1d([1, 2], True)
poly1d([ 1, -3, 2])
This is the same polynomial as obtained by:
>>> np.poly1d([1, -1]) * np.poly1d([1, -2])
poly1d([ 1, -3, 2])
"""
__hash__ = None
@property
def coeffs(self):
""" A copy of the polynomial coefficients """
return self._coeffs.copy()
@property
def variable(self):
""" The name of the polynomial variable """
return self._variable
# calculated attributes
@property
def order(self):
""" The order or degree of the polynomial """
return len(self._coeffs) - 1
@property
def roots(self):
""" The roots of the polynomial, where self(x) == 0 """
return roots(self._coeffs)
# our internal _coeffs property need to be backed by __dict__['coeffs'] for
# scipy to work correctly.
@property
def _coeffs(self):
return self.__dict__['coeffs']
@_coeffs.setter
def _coeffs(self, coeffs):
self.__dict__['coeffs'] = coeffs
# alias attributes
r = roots
c = coef = coefficients = coeffs
o = order
def __init__(self, c_or_r, r=False, variable=None):
if isinstance(c_or_r, poly1d):
self._variable = c_or_r._variable
self._coeffs = c_or_r._coeffs
if set(c_or_r.__dict__) - set(self.__dict__):
msg = ("In the future extra properties will not be copied "
"across when constructing one poly1d from another")
warnings.warn(msg, FutureWarning, stacklevel=2)
self.__dict__.update(c_or_r.__dict__)
if variable is not None:
self._variable = variable
return
if r:
c_or_r = poly(c_or_r)
c_or_r = atleast_1d(c_or_r)
if c_or_r.ndim > 1:
raise ValueError("Polynomial must be 1d only.")
c_or_r = trim_zeros(c_or_r, trim='f')
if len(c_or_r) == 0:
c_or_r = NX.array([0.])
self._coeffs = c_or_r
if variable is None:
variable = 'x'
self._variable = variable
def __array__(self, t=None):
if t:
return NX.asarray(self.coeffs, t)
else:
return NX.asarray(self.coeffs)
def __repr__(self):
vals = repr(self.coeffs)
vals = vals[6:-1]
return "poly1d(%s)" % vals
def __len__(self):
return self.order
def __str__(self):
thestr = "0"
var = self.variable
# Remove leading zeros
coeffs = self.coeffs[NX.logical_or.accumulate(self.coeffs != 0)]
N = len(coeffs)-1
def fmt_float(q):
s = '%.4g' % q
if s.endswith('.0000'):
s = s[:-5]
return s
for k in range(len(coeffs)):
if not iscomplex(coeffs[k]):
coefstr = fmt_float(real(coeffs[k]))
elif real(coeffs[k]) == 0:
coefstr = '%sj' % fmt_float(imag(coeffs[k]))
else:
coefstr = '(%s + %sj)' % (fmt_float(real(coeffs[k])),
fmt_float(imag(coeffs[k])))
power = (N-k)
if power == 0:
if coefstr != '0':
newstr = '%s' % (coefstr,)
else:
if k == 0:
newstr = '0'
else:
newstr = ''
elif power == 1:
if coefstr == '0':
newstr = ''
elif coefstr == 'b':
newstr = var
else:
newstr = '%s %s' % (coefstr, var)
else:
if coefstr == '0':
newstr = ''
elif coefstr == 'b':
newstr = '%s**%d' % (var, power,)
else:
newstr = '%s %s**%d' % (coefstr, var, power)
if k > 0:
if newstr != '':
if newstr.startswith('-'):
thestr = "%s - %s" % (thestr, newstr[1:])
else:
thestr = "%s + %s" % (thestr, newstr)
else:
thestr = newstr
return _raise_power(thestr)
def __call__(self, val):
return polyval(self.coeffs, val)
def __neg__(self):
return poly1d(-self.coeffs)
def __pos__(self):
return self
def __mul__(self, other):
if isscalar(other):
return poly1d(self.coeffs * other)
else:
other = poly1d(other)
return poly1d(polymul(self.coeffs, other.coeffs))
def __rmul__(self, other):
if isscalar(other):
return poly1d(other * self.coeffs)
else:
other = poly1d(other)
return poly1d(polymul(self.coeffs, other.coeffs))
def __add__(self, other):
other = poly1d(other)
return poly1d(polyadd(self.coeffs, other.coeffs))
def __radd__(self, other):
other = poly1d(other)
return poly1d(polyadd(self.coeffs, other.coeffs))
def __pow__(self, val):
if not isscalar(val) or int(val) != val or val < 0:
raise ValueError("Power to non-negative integers only.")
res = [1]
for _ in range(val):
res = polymul(self.coeffs, res)
return poly1d(res)
def __sub__(self, other):
other = poly1d(other)
return poly1d(polysub(self.coeffs, other.coeffs))
def __rsub__(self, other):
other = poly1d(other)
return poly1d(polysub(other.coeffs, self.coeffs))
def __div__(self, other):
if isscalar(other):
return poly1d(self.coeffs/other)
else:
other = poly1d(other)
return polydiv(self, other)
__truediv__ = __div__
def __rdiv__(self, other):
if isscalar(other):
return poly1d(other/self.coeffs)
else:
other = poly1d(other)
return polydiv(other, self)
__rtruediv__ = __rdiv__
def __eq__(self, other):
if not isinstance(other, poly1d):
return NotImplemented
if self.coeffs.shape != other.coeffs.shape:
return False
return (self.coeffs == other.coeffs).all()
def __ne__(self, other):
if not isinstance(other, poly1d):
return NotImplemented
return not self.__eq__(other)
def __getitem__(self, val):
ind = self.order - val
if val > self.order:
return 0
if val < 0:
return 0
return self.coeffs[ind]
def __setitem__(self, key, val):
ind = self.order - key
if key < 0:
raise ValueError("Does not support negative powers.")
if key > self.order:
zr = NX.zeros(key-self.order, self.coeffs.dtype)
self._coeffs = NX.concatenate((zr, self.coeffs))
ind = 0
self._coeffs[ind] = val
return
def __iter__(self):
return iter(self.coeffs)
def integ(self, m=1, k=0):
"""
Return an antiderivative (indefinite integral) of this polynomial.
Refer to `polyint` for full documentation.
See Also
--------
polyint : equivalent function
"""
return poly1d(polyint(self.coeffs, m=m, k=k))
def deriv(self, m=1):
"""
Return a derivative of this polynomial.
Refer to `polyder` for full documentation.
See Also
--------
polyder : equivalent function
"""
return poly1d(polyder(self.coeffs, m=m))
# Stuff to do on module import
warnings.simplefilter('always', RankWarning)
| mit |
paveenju/mlat-sim | main/figure2_2a.py | 1 | 1441 | '''
Created on Dec 5, 2016
@author: paveenju
'''
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib2tikz.save as tikz_save
import utils.functions as fn
if __name__ == '__main__':
pass
def axes():
plt.axhline(0, alpha=.1)
plt.axvline(0, alpha=.1)
# input variables
dL = np.array([0.4, 0.6, 0.9])
P1, P2 = [1.0, 0.0, 0], [-1.0, 0.0, 0]
d = np.linalg.norm(np.mat(P1)-np.mat(P2))
c = d/2
A = dL/2
# data generation
x_p = np.linspace(-3, 3, 100)
y_p = np.linspace(-3, 3, 100)
x_p, y_p = np.meshgrid(x_p, y_p)
x, y, h, k = fn.linear_transformation(P1, P2, x_p, y_p)
# matplotlib
mpl.rcParams['lines.color'] = 'k'
mpl.rcParams['axes.prop_cycle'] = mpl.cycler('color', ['k'])
for a in A:
plt.contour(x_p, y_p,
((x**2/a**2) - (y**2/(c**2-a**2)) - 1),
[0], colors='b')
axes()
plt.annotate(r'$\tau_1$', xy=(0, 0), xytext=(0.67, 2.8), fontsize=20)
plt.annotate(r'$\tau_2$', xy=(0, 0), xytext=(0.9, 2.5), fontsize=20)
plt.annotate(r'$\tau_3$', xy=(0, 0), xytext=(1.2, 2.1), fontsize=20)
plt.text(1.75, 0.5, r'$\tau_1=0.4$' + '\n' + r'$\tau_2=0.6$' + '\n' + r'$\tau_2=0.9$',
bbox={'facecolor':'white', 'alpha':0.5, 'pad':10},
fontsize=20)
plt.plot(P1[0], P1[1], 'xr', mew=5, ms=15)
plt.plot(P2[0], P2[1], 'xr', mew=5, ms=15)
#plt.show()
tikz_save('../output/figure2_2a.tex') | gpl-3.0 |
madjelan/scikit-learn | examples/hetero_feature_union.py | 288 | 6236 | """
=============================================
Feature Union with Heterogeneous Data Sources
=============================================
Datasets can often contain components of that require different feature
extraction and processing pipelines. This scenario might occur when:
1. Your dataset consists of heterogeneous data types (e.g. raster images and
text captions)
2. Your dataset is stored in a Pandas DataFrame and different columns
require different processing pipelines.
This example demonstrates how to use
:class:`sklearn.feature_extraction.FeatureUnion` on a dataset containing
different types of features. We use the 20-newsgroups dataset and compute
standard bag-of-words features for the subject line and body in separate
pipelines as well as ad hoc features on the body. We combine them (with
weights) using a FeatureUnion and finally train a classifier on the combined
set of features.
The choice of features is not particularly helpful, but serves to illustrate
the technique.
"""
# Author: Matt Terry <matt.terry@gmail.com>
#
# License: BSD 3 clause
from __future__ import print_function
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.datasets import fetch_20newsgroups
from sklearn.datasets.twenty_newsgroups import strip_newsgroup_footer
from sklearn.datasets.twenty_newsgroups import strip_newsgroup_quoting
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import classification_report
from sklearn.pipeline import FeatureUnion
from sklearn.pipeline import Pipeline
from sklearn.svm import SVC
class ItemSelector(BaseEstimator, TransformerMixin):
"""For data grouped by feature, select subset of data at a provided key.
The data is expected to be stored in a 2D data structure, where the first
index is over features and the second is over samples. i.e.
>> len(data[key]) == n_samples
Please note that this is the opposite convention to sklearn feature
matrixes (where the first index corresponds to sample).
ItemSelector only requires that the collection implement getitem
(data[key]). Examples include: a dict of lists, 2D numpy array, Pandas
DataFrame, numpy record array, etc.
>> data = {'a': [1, 5, 2, 5, 2, 8],
'b': [9, 4, 1, 4, 1, 3]}
>> ds = ItemSelector(key='a')
>> data['a'] == ds.transform(data)
ItemSelector is not designed to handle data grouped by sample. (e.g. a
list of dicts). If your data is structured this way, consider a
transformer along the lines of `sklearn.feature_extraction.DictVectorizer`.
Parameters
----------
key : hashable, required
The key corresponding to the desired value in a mappable.
"""
def __init__(self, key):
self.key = key
def fit(self, x, y=None):
return self
def transform(self, data_dict):
return data_dict[self.key]
class TextStats(BaseEstimator, TransformerMixin):
"""Extract features from each document for DictVectorizer"""
def fit(self, x, y=None):
return self
def transform(self, posts):
return [{'length': len(text),
'num_sentences': text.count('.')}
for text in posts]
class SubjectBodyExtractor(BaseEstimator, TransformerMixin):
"""Extract the subject & body from a usenet post in a single pass.
Takes a sequence of strings and produces a dict of sequences. Keys are
`subject` and `body`.
"""
def fit(self, x, y=None):
return self
def transform(self, posts):
features = np.recarray(shape=(len(posts),),
dtype=[('subject', object), ('body', object)])
for i, text in enumerate(posts):
headers, _, bod = text.partition('\n\n')
bod = strip_newsgroup_footer(bod)
bod = strip_newsgroup_quoting(bod)
features['body'][i] = bod
prefix = 'Subject:'
sub = ''
for line in headers.split('\n'):
if line.startswith(prefix):
sub = line[len(prefix):]
break
features['subject'][i] = sub
return features
pipeline = Pipeline([
# Extract the subject & body
('subjectbody', SubjectBodyExtractor()),
# Use FeatureUnion to combine the features from subject and body
('union', FeatureUnion(
transformer_list=[
# Pipeline for pulling features from the post's subject line
('subject', Pipeline([
('selector', ItemSelector(key='subject')),
('tfidf', TfidfVectorizer(min_df=50)),
])),
# Pipeline for standard bag-of-words model for body
('body_bow', Pipeline([
('selector', ItemSelector(key='body')),
('tfidf', TfidfVectorizer()),
('best', TruncatedSVD(n_components=50)),
])),
# Pipeline for pulling ad hoc features from post's body
('body_stats', Pipeline([
('selector', ItemSelector(key='body')),
('stats', TextStats()), # returns a list of dicts
('vect', DictVectorizer()), # list of dicts -> feature matrix
])),
],
# weight components in FeatureUnion
transformer_weights={
'subject': 0.8,
'body_bow': 0.5,
'body_stats': 1.0,
},
)),
# Use a SVC classifier on the combined features
('svc', SVC(kernel='linear')),
])
# limit the list of categories to make running this exmaple faster.
categories = ['alt.atheism', 'talk.religion.misc']
train = fetch_20newsgroups(random_state=1,
subset='train',
categories=categories,
)
test = fetch_20newsgroups(random_state=1,
subset='test',
categories=categories,
)
pipeline.fit(train.data, train.target)
y = pipeline.predict(test.data)
print(classification_report(y, test.target))
| bsd-3-clause |
hlin117/statsmodels | statsmodels/datasets/ccard/data.py | 25 | 1635 | """Bill Greene's credit scoring data."""
__docformat__ = 'restructuredtext'
COPYRIGHT = """Used with express permission of the original author, who
retains all rights."""
TITLE = __doc__
SOURCE = """
William Greene's `Econometric Analysis`
More information can be found at the web site of the text:
http://pages.stern.nyu.edu/~wgreene/Text/econometricanalysis.htm
"""
DESCRSHORT = """William Greene's credit scoring data"""
DESCRLONG = """More information on this data can be found on the
homepage for Greene's `Econometric Analysis`. See source.
"""
NOTE = """::
Number of observations - 72
Number of variables - 5
Variable name definitions - See Source for more information on the
variables.
"""
from numpy import recfromtxt, column_stack, array
from statsmodels.datasets import utils as du
from os.path import dirname, abspath
def load():
"""Load the credit card data and returns a Dataset class.
Returns
-------
Dataset instance:
See DATASET_PROPOSAL.txt for more information.
"""
data = _get_data()
return du.process_recarray(data, endog_idx=0, dtype=float)
def load_pandas():
"""Load the credit card data and returns a Dataset class.
Returns
-------
Dataset instance:
See DATASET_PROPOSAL.txt for more information.
"""
data = _get_data()
return du.process_recarray_pandas(data, endog_idx=0)
def _get_data():
filepath = dirname(abspath(__file__))
data = recfromtxt(open(filepath + '/ccard.csv', 'rb'), delimiter=",",
names=True, dtype=float)
return data
| bsd-3-clause |
herruzojm/udacity-deep-learning | weight-initialization/helper.py | 153 | 3649 | import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
def hist_dist(title, distribution_tensor, hist_range=(-4, 4)):
"""
Display histogram of a TF distribution
"""
with tf.Session() as sess:
values = sess.run(distribution_tensor)
plt.title(title)
plt.hist(values, np.linspace(*hist_range, num=len(values)/2))
plt.show()
def _get_loss_acc(dataset, weights):
"""
Get losses and validation accuracy of example neural network
"""
batch_size = 128
epochs = 2
learning_rate = 0.001
features = tf.placeholder(tf.float32)
labels = tf.placeholder(tf.float32)
learn_rate = tf.placeholder(tf.float32)
biases = [
tf.Variable(tf.zeros([256])),
tf.Variable(tf.zeros([128])),
tf.Variable(tf.zeros([dataset.train.labels.shape[1]]))
]
# Layers
layer_1 = tf.nn.relu(tf.matmul(features, weights[0]) + biases[0])
layer_2 = tf.nn.relu(tf.matmul(layer_1, weights[1]) + biases[1])
logits = tf.matmul(layer_2, weights[2]) + biases[2]
# Training loss
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=labels))
# Optimizer
optimizer = tf.train.AdamOptimizer(learn_rate).minimize(loss)
# Accuracy
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(labels, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# Measurements use for graphing loss
loss_batch = []
with tf.Session() as session:
session.run(tf.global_variables_initializer())
batch_count = int((dataset.train.num_examples / batch_size))
# The training cycle
for epoch_i in range(epochs):
for batch_i in range(batch_count):
batch_features, batch_labels = dataset.train.next_batch(batch_size)
# Run optimizer and get loss
session.run(
optimizer,
feed_dict={features: batch_features, labels: batch_labels, learn_rate: learning_rate})
l = session.run(
loss,
feed_dict={features: batch_features, labels: batch_labels, learn_rate: learning_rate})
loss_batch.append(l)
valid_acc = session.run(
accuracy,
feed_dict={features: dataset.validation.images, labels: dataset.validation.labels, learn_rate: 1.0})
# Hack to Reset batches
dataset.train._index_in_epoch = 0
dataset.train._epochs_completed = 0
return loss_batch, valid_acc
def compare_init_weights(
dataset,
title,
weight_init_list,
plot_n_batches=100):
"""
Plot loss and print stats of weights using an example neural network
"""
colors = ['r', 'b', 'g', 'c', 'y', 'k']
label_accs = []
label_loss = []
assert len(weight_init_list) <= len(colors), 'Too many inital weights to plot'
for i, (weights, label) in enumerate(weight_init_list):
loss, val_acc = _get_loss_acc(dataset, weights)
plt.plot(loss[:plot_n_batches], colors[i], label=label)
label_accs.append((label, val_acc))
label_loss.append((label, loss[-1]))
plt.title(title)
plt.xlabel('Batches')
plt.ylabel('Loss')
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.show()
print('After 858 Batches (2 Epochs):')
print('Validation Accuracy')
for label, val_acc in label_accs:
print(' {:7.3f}% -- {}'.format(val_acc*100, label))
print('Loss')
for label, loss in label_loss:
print(' {:7.3f} -- {}'.format(loss, label))
| mit |
bokeh/bokeh | examples/app/dash/main.py | 1 | 4397 | from collections import Counter
from math import pi
import numpy as np
import pandas as pd
from bokeh.io import curdoc
from bokeh.layouts import column
from bokeh.models import (ColumnDataSource, DataTable, NumberFormatter,
RangeTool, StringFormatter, TableColumn)
from bokeh.palettes import Spectral11
from bokeh.plotting import figure
from bokeh.sampledata.autompg2 import autompg2 as mpg
from bokeh.sampledata.stocks import AAPL
from bokeh.transform import cumsum
# Timeseries
dates = np.array(AAPL['date'], dtype=np.datetime64)
source = ColumnDataSource(data=dict(date=dates, close=AAPL['adj_close']))
p = figure(height=110, tools="", toolbar_location=None, #name="line",
x_axis_type="datetime", x_range=(dates[1500], dates[2500]), sizing_mode="scale_width")
p.line('date', 'close', source=source, line_width=2, alpha=0.7)
p.yaxis.axis_label = 'Traffic'
p.background_fill_color="#f5f5f5"
p.grid.grid_line_color="white"
select = figure(height=50, width=800, y_range=p.y_range,
x_axis_type="datetime", y_axis_type=None,
tools="", toolbar_location=None, sizing_mode="scale_width")
range_rool = RangeTool(x_range=p.x_range)
range_rool.overlay.fill_color = "navy"
range_rool.overlay.fill_alpha = 0.2
select.line('date', 'close', source=source)
select.ygrid.grid_line_color = None
select.add_tools(range_rool)
select.toolbar.active_multi = range_rool
select.background_fill_color="#f5f5f5"
select.grid.grid_line_color="white"
select.x_range.range_padding = 0.01
layout = column(p, select, sizing_mode="scale_width", name="line")
curdoc().add_root(layout)
# Donut chart
x = Counter({ 'United States': 157, 'United Kingdom': 93, 'Japan': 89, 'China': 63,
'Germany': 44, 'India': 42, 'Italy': 40, 'Australia': 35, 'Brazil': 32,
'France': 31, 'Taiwan': 31 })
data = pd.DataFrame.from_dict(dict(x), orient='index').reset_index().rename(index=str, columns={0:'value', 'index':'country'})
data['angle'] = data['value']/sum(x.values()) * 2*pi
data['color'] = Spectral11
region = figure(height=350, toolbar_location=None, outline_line_color=None, sizing_mode="scale_both", name="region", x_range=(-0.4, 1))
region.annular_wedge(x=-0, y=1, inner_radius=0.2, outer_radius=0.32,
start_angle=cumsum('angle', include_zero=True), end_angle=cumsum('angle'),
line_color="white", fill_color='color', legend_group='country', source=data)
region.axis.axis_label=None
region.axis.visible=False
region.grid.grid_line_color = None
region.legend.label_text_font_size = "0.7em"
region.legend.spacing = 1
region.legend.glyph_height = 15
region.legend.label_height = 15
curdoc().add_root(region)
# Bar chart
plats = ("IOS", "Android", "OSX", "Windows", "Other")
values = (35, 22, 13, 26, 4)
platform = figure(height=350, toolbar_location=None, outline_line_color=None, sizing_mode="scale_both", name="platform",
y_range=list(reversed(plats)), x_axis_location="above")
platform.x_range.start = 0
platform.ygrid.grid_line_color = None
platform.axis.minor_tick_line_color = None
platform.outline_line_color = None
platform.hbar(left=0, right=values, y=plats, height=0.8)
curdoc().add_root(platform)
# Table
source = ColumnDataSource(data=mpg[:6])
columns = [
TableColumn(field="cyl", title="Counts"),
TableColumn(field="cty", title="Uniques",
formatter=StringFormatter(text_align="center")),
TableColumn(field="hwy", title="Rating",
formatter=NumberFormatter(text_align="right")),
]
table = DataTable(source=source, columns=columns, height=210, width=330, name="table", sizing_mode="scale_both")
curdoc().add_root(table)
# Setup
curdoc().title = "Bokeh Dashboard"
curdoc().template_variables['stats_names'] = ['users', 'new_users', 'time', 'sessions', 'sales']
curdoc().template_variables['stats'] = {
'users' : {'icon': 'user', 'value': 11200, 'change': 4 , 'label': 'Total Users'},
'new_users' : {'icon': 'user', 'value': 350, 'change': 1.2 , 'label': 'New Users'},
'time' : {'icon': 'clock-o', 'value': 5.6, 'change': -2.3 , 'label': 'Total Time'},
'sessions' : {'icon': 'user', 'value': 27300, 'change': 0.5 , 'label': 'Total Sessions'},
'sales' : {'icon': 'dollar-sign', 'value': 8700, 'change': -0.2 , 'label': 'Average Sales'},
}
| bsd-3-clause |
ahoyosid/scikit-learn | examples/manifold/plot_manifold_sphere.py | 258 | 5101 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=============================================
Manifold Learning methods on a severed sphere
=============================================
An application of the different :ref:`manifold` techniques
on a spherical data-set. Here one can see the use of
dimensionality reduction in order to gain some intuition
regarding the manifold learning methods. Regarding the dataset,
the poles are cut from the sphere, as well as a thin slice down its
side. This enables the manifold learning techniques to
'spread it open' whilst projecting it onto two dimensions.
For a similar example, where the methods are applied to the
S-curve dataset, see :ref:`example_manifold_plot_compare_methods.py`
Note that the purpose of the :ref:`MDS <multidimensional_scaling>` is
to find a low-dimensional representation of the data (here 2D) in
which the distances respect well the distances in the original
high-dimensional space, unlike other manifold-learning algorithms,
it does not seeks an isotropic representation of the data in
the low-dimensional space. Here the manifold problem matches fairly
that of representing a flat map of the Earth, as with
`map projection <http://en.wikipedia.org/wiki/Map_projection>`_
"""
# Author: Jaques Grobler <jaques.grobler@inria.fr>
# License: BSD 3 clause
print(__doc__)
from time import time
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.ticker import NullFormatter
from sklearn import manifold
from sklearn.utils import check_random_state
# Next line to silence pyflakes.
Axes3D
# Variables for manifold learning.
n_neighbors = 10
n_samples = 1000
# Create our sphere.
random_state = check_random_state(0)
p = random_state.rand(n_samples) * (2 * np.pi - 0.55)
t = random_state.rand(n_samples) * np.pi
# Sever the poles from the sphere.
indices = ((t < (np.pi - (np.pi / 8))) & (t > ((np.pi / 8))))
colors = p[indices]
x, y, z = np.sin(t[indices]) * np.cos(p[indices]), \
np.sin(t[indices]) * np.sin(p[indices]), \
np.cos(t[indices])
# Plot our dataset.
fig = plt.figure(figsize=(15, 8))
plt.suptitle("Manifold Learning with %i points, %i neighbors"
% (1000, n_neighbors), fontsize=14)
ax = fig.add_subplot(251, projection='3d')
ax.scatter(x, y, z, c=p[indices], cmap=plt.cm.rainbow)
try:
# compatibility matplotlib < 1.0
ax.view_init(40, -10)
except:
pass
sphere_data = np.array([x, y, z]).T
# Perform Locally Linear Embedding Manifold learning
methods = ['standard', 'ltsa', 'hessian', 'modified']
labels = ['LLE', 'LTSA', 'Hessian LLE', 'Modified LLE']
for i, method in enumerate(methods):
t0 = time()
trans_data = manifold\
.LocallyLinearEmbedding(n_neighbors, 2,
method=method).fit_transform(sphere_data).T
t1 = time()
print("%s: %.2g sec" % (methods[i], t1 - t0))
ax = fig.add_subplot(252 + i)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("%s (%.2g sec)" % (labels[i], t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
# Perform Isomap Manifold learning.
t0 = time()
trans_data = manifold.Isomap(n_neighbors, n_components=2)\
.fit_transform(sphere_data).T
t1 = time()
print("%s: %.2g sec" % ('ISO', t1 - t0))
ax = fig.add_subplot(257)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("%s (%.2g sec)" % ('Isomap', t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
# Perform Multi-dimensional scaling.
t0 = time()
mds = manifold.MDS(2, max_iter=100, n_init=1)
trans_data = mds.fit_transform(sphere_data).T
t1 = time()
print("MDS: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(258)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("MDS (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
# Perform Spectral Embedding.
t0 = time()
se = manifold.SpectralEmbedding(n_components=2,
n_neighbors=n_neighbors)
trans_data = se.fit_transform(sphere_data).T
t1 = time()
print("Spectral Embedding: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(259)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("Spectral Embedding (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
# Perform t-distributed stochastic neighbor embedding.
t0 = time()
tsne = manifold.TSNE(n_components=2, init='pca', random_state=0)
trans_data = tsne.fit_transform(sphere_data).T
t1 = time()
print("t-SNE: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(250)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("t-SNE (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
plt.show()
| bsd-3-clause |
kwailamchan/programming-languages | python/sklearn/examples/general/test_the_significance.py | 3 | 2304 | #---------------------------------------------------------------#
# Project: Test with permutations the significance of a classification score
# Author: Kelly Chan
# Date: Apr 23 2014
#---------------------------------------------------------------#
print(__doc__)
import numpy as np
import pylab as pl
from sklearn.svm import SVC
from sklearn.cross_validation import StratifiedKFold, permutation_test_score
from sklearn import datasets
def loadData():
iris = datasets.load_iris()
X = iris.data
y = iris.target
n_classes = np.unique(y).size
return X, y, n_classes
def addNoise(X):
random = np.random.RandomState(seed=0)
E = random.normal(size=(len(X), 2200))
X = np.c_[X, E]
return X
def crossValidation(y):
cv = StratifiedKFold(y, 2)
return cv
def createSVM():
svm = SVC(kernel='linear')
return svm
def computeScore(svm, X, y, cv):
score, permutation_scores, pvalue = permutation_test_score(svm, \
X, y, \
scoring='accuracy', \
cv=cv, \
n_permutations=100, \
n_jobs=1)
print("Classification score %s (pvalue: %s)" % (score, pvalue))
return score, permutation_scores, pvalue
def plotHist(score, permutation_scores, pvalue, n_classes):
pl.hist(permutation_scores, 20, label='Permutation scores')
ylim = pl.ylim()
pl.plot(2 * [score], ylim, '--g', \
linewidth=3, \
label='Classification Score (pvalue %s)' % pvalue)
pl.plot(2 * [1. / n_classes], ylim, '--k', \
linewidth=3, \
label='Luck')
pl.ylim(ylim)
pl.legend()
pl.xlabel('Score')
pl.show()
def test():
X, y, n_classes = loadData()
X = addNoise(X)
cv = crossValidation(y)
svm = createSVM()
score, permutation_scores, pvalue = computeScore(svm, X, y, cv)
plotHist(score, permutation_scores, pvalue, n_classes)
if __name__ == '__main__':
test()
| mit |
wangmiao1981/spark | python/setup.py | 14 | 13273 | #!/usr/bin/env python3
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib.util
import glob
import os
import sys
from setuptools import setup
from setuptools.command.install import install
from shutil import copyfile, copytree, rmtree
try:
exec(open('pyspark/version.py').read())
except IOError:
print("Failed to load PySpark version file for packaging. You must be in Spark's python dir.",
file=sys.stderr)
sys.exit(-1)
try:
spec = importlib.util.spec_from_file_location("install", "pyspark/install.py")
install_module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(install_module)
except IOError:
print("Failed to load the installing module (pyspark/install.py) which had to be "
"packaged together.",
file=sys.stderr)
sys.exit(-1)
VERSION = __version__ # noqa
# A temporary path so we can access above the Python project root and fetch scripts and jars we need
TEMP_PATH = "deps"
SPARK_HOME = os.path.abspath("../")
# Provide guidance about how to use setup.py
incorrect_invocation_message = """
If you are installing pyspark from spark source, you must first build Spark and
run sdist.
To build Spark with maven you can run:
./build/mvn -DskipTests clean package
Building the source dist is done in the Python directory:
cd python
python setup.py sdist
pip install dist/*.tar.gz"""
# Figure out where the jars are we need to package with PySpark.
JARS_PATH = glob.glob(os.path.join(SPARK_HOME, "assembly/target/scala-*/jars/"))
if len(JARS_PATH) == 1:
JARS_PATH = JARS_PATH[0]
elif (os.path.isfile("../RELEASE") and len(glob.glob("../jars/spark*core*.jar")) == 1):
# Release mode puts the jars in a jars directory
JARS_PATH = os.path.join(SPARK_HOME, "jars")
elif len(JARS_PATH) > 1:
print("Assembly jars exist for multiple scalas ({0}), please cleanup assembly/target".format(
JARS_PATH), file=sys.stderr)
sys.exit(-1)
elif len(JARS_PATH) == 0 and not os.path.exists(TEMP_PATH):
print(incorrect_invocation_message, file=sys.stderr)
sys.exit(-1)
EXAMPLES_PATH = os.path.join(SPARK_HOME, "examples/src/main/python")
SCRIPTS_PATH = os.path.join(SPARK_HOME, "bin")
USER_SCRIPTS_PATH = os.path.join(SPARK_HOME, "sbin")
DATA_PATH = os.path.join(SPARK_HOME, "data")
LICENSES_PATH = os.path.join(SPARK_HOME, "licenses")
SCRIPTS_TARGET = os.path.join(TEMP_PATH, "bin")
USER_SCRIPTS_TARGET = os.path.join(TEMP_PATH, "sbin")
JARS_TARGET = os.path.join(TEMP_PATH, "jars")
EXAMPLES_TARGET = os.path.join(TEMP_PATH, "examples")
DATA_TARGET = os.path.join(TEMP_PATH, "data")
LICENSES_TARGET = os.path.join(TEMP_PATH, "licenses")
# Check and see if we are under the spark path in which case we need to build the symlink farm.
# This is important because we only want to build the symlink farm while under Spark otherwise we
# want to use the symlink farm. And if the symlink farm exists under while under Spark (e.g. a
# partially built sdist) we should error and have the user sort it out.
in_spark = (os.path.isfile("../core/src/main/scala/org/apache/spark/SparkContext.scala") or
(os.path.isfile("../RELEASE") and len(glob.glob("../jars/spark*core*.jar")) == 1))
def _supports_symlinks():
"""Check if the system supports symlinks (e.g. *nix) or not."""
return getattr(os, "symlink", None) is not None
if (in_spark):
# Construct links for setup
try:
os.mkdir(TEMP_PATH)
except:
print("Temp path for symlink to parent already exists {0}".format(TEMP_PATH),
file=sys.stderr)
sys.exit(-1)
# If you are changing the versions here, please also change ./python/pyspark/sql/pandas/utils.py
# For Arrow, you should also check ./pom.xml and ensure there are no breaking changes in the
# binary format protocol with the Java version, see ARROW_HOME/format/* for specifications.
# Also don't forget to update python/docs/source/getting_started/install.rst.
_minimum_pandas_version = "0.23.2"
_minimum_pyarrow_version = "1.0.0"
class InstallCommand(install):
# TODO(SPARK-32837) leverage pip's custom options
def run(self):
install.run(self)
# Make sure the destination is always clean.
spark_dist = os.path.join(self.install_lib, "pyspark", "spark-distribution")
rmtree(spark_dist, ignore_errors=True)
if ("PYSPARK_HADOOP_VERSION" in os.environ) or ("PYSPARK_HIVE_VERSION" in os.environ):
# Note that PYSPARK_VERSION environment is just a testing purpose.
# PYSPARK_HIVE_VERSION environment variable is also internal for now in case
# we support another version of Hive in the future.
spark_version, hadoop_version, hive_version = install_module.checked_versions(
os.environ.get("PYSPARK_VERSION", VERSION).lower(),
os.environ.get("PYSPARK_HADOOP_VERSION", install_module.DEFAULT_HADOOP).lower(),
os.environ.get("PYSPARK_HIVE_VERSION", install_module.DEFAULT_HIVE).lower())
if ("PYSPARK_VERSION" not in os.environ and
((install_module.DEFAULT_HADOOP, install_module.DEFAULT_HIVE) ==
(hadoop_version, hive_version))):
# Do not download and install if they are same as default.
return
install_module.install_spark(
dest=spark_dist,
spark_version=spark_version,
hadoop_version=hadoop_version,
hive_version=hive_version)
try:
# We copy the shell script to be under pyspark/python/pyspark so that the launcher scripts
# find it where expected. The rest of the files aren't copied because they are accessed
# using Python imports instead which will be resolved correctly.
try:
os.makedirs("pyspark/python/pyspark")
except OSError:
# Don't worry if the directory already exists.
pass
copyfile("pyspark/shell.py", "pyspark/python/pyspark/shell.py")
if (in_spark):
# Construct the symlink farm - this is necessary since we can't refer to the path above the
# package root and we need to copy the jars and scripts which are up above the python root.
if _supports_symlinks():
os.symlink(JARS_PATH, JARS_TARGET)
os.symlink(SCRIPTS_PATH, SCRIPTS_TARGET)
os.symlink(USER_SCRIPTS_PATH, USER_SCRIPTS_TARGET)
os.symlink(EXAMPLES_PATH, EXAMPLES_TARGET)
os.symlink(DATA_PATH, DATA_TARGET)
os.symlink(LICENSES_PATH, LICENSES_TARGET)
else:
# For windows fall back to the slower copytree
copytree(JARS_PATH, JARS_TARGET)
copytree(SCRIPTS_PATH, SCRIPTS_TARGET)
copytree(USER_SCRIPTS_PATH, USER_SCRIPTS_TARGET)
copytree(EXAMPLES_PATH, EXAMPLES_TARGET)
copytree(DATA_PATH, DATA_TARGET)
copytree(LICENSES_PATH, LICENSES_TARGET)
else:
# If we are not inside of SPARK_HOME verify we have the required symlink farm
if not os.path.exists(JARS_TARGET):
print("To build packaging must be in the python directory under the SPARK_HOME.",
file=sys.stderr)
if not os.path.isdir(SCRIPTS_TARGET):
print(incorrect_invocation_message, file=sys.stderr)
sys.exit(-1)
# Scripts directive requires a list of each script path and does not take wild cards.
script_names = os.listdir(SCRIPTS_TARGET)
scripts = list(map(lambda script: os.path.join(SCRIPTS_TARGET, script), script_names))
# We add find_spark_home.py to the bin directory we install so that pip installed PySpark
# will search for SPARK_HOME with Python.
scripts.append("pyspark/find_spark_home.py")
with open('README.md') as f:
long_description = f.read()
setup(
name='pyspark',
version=VERSION,
description='Apache Spark Python API',
long_description=long_description,
long_description_content_type="text/markdown",
author='Spark Developers',
author_email='dev@spark.apache.org',
url='https://github.com/apache/spark/tree/master/python',
packages=['pyspark',
'pyspark.cloudpickle',
'pyspark.mllib',
'pyspark.mllib.linalg',
'pyspark.mllib.stat',
'pyspark.ml',
'pyspark.ml.linalg',
'pyspark.ml.param',
'pyspark.sql',
'pyspark.sql.avro',
'pyspark.sql.pandas',
'pyspark.streaming',
'pyspark.bin',
'pyspark.sbin',
'pyspark.jars',
'pyspark.pandas',
'pyspark.pandas.data_type_ops',
'pyspark.pandas.indexes',
'pyspark.pandas.missing',
'pyspark.pandas.plot',
'pyspark.pandas.spark',
'pyspark.pandas.typedef',
'pyspark.pandas.usage_logging',
'pyspark.python.pyspark',
'pyspark.python.lib',
'pyspark.data',
'pyspark.licenses',
'pyspark.resource',
'pyspark.examples.src.main.python'],
include_package_data=True,
package_dir={
'pyspark.jars': 'deps/jars',
'pyspark.bin': 'deps/bin',
'pyspark.sbin': 'deps/sbin',
'pyspark.python.lib': 'lib',
'pyspark.data': 'deps/data',
'pyspark.licenses': 'deps/licenses',
'pyspark.examples.src.main.python': 'deps/examples',
},
package_data={
'pyspark.jars': ['*.jar'],
'pyspark.bin': ['*'],
'pyspark.sbin': ['spark-config.sh', 'spark-daemon.sh',
'start-history-server.sh',
'stop-history-server.sh', ],
'pyspark.python.lib': ['*.zip'],
'pyspark.data': ['*.txt', '*.data'],
'pyspark.licenses': ['*.txt'],
'pyspark.examples.src.main.python': ['*.py', '*/*.py']},
scripts=scripts,
license='http://www.apache.org/licenses/LICENSE-2.0',
# Don't forget to update python/docs/source/getting_started/install.rst
# if you're updating the versions or dependencies.
install_requires=['py4j==0.10.9.2'],
extras_require={
'ml': ['numpy>=1.7'],
'mllib': ['numpy>=1.7'],
'sql': [
'pandas>=%s' % _minimum_pandas_version,
'pyarrow>=%s' % _minimum_pyarrow_version,
],
'pandas_on_spark': [
'pandas>=%s' % _minimum_pandas_version,
'pyarrow>=%s' % _minimum_pyarrow_version,
'numpy>=1.14',
],
},
python_requires='>=3.6',
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Typing :: Typed'],
cmdclass={
'install': InstallCommand,
},
)
finally:
# We only cleanup the symlink farm if we were in Spark, otherwise we are installing rather than
# packaging.
if (in_spark):
# Depending on cleaning up the symlink farm or copied version
if _supports_symlinks():
os.remove(os.path.join(TEMP_PATH, "jars"))
os.remove(os.path.join(TEMP_PATH, "bin"))
os.remove(os.path.join(TEMP_PATH, "sbin"))
os.remove(os.path.join(TEMP_PATH, "examples"))
os.remove(os.path.join(TEMP_PATH, "data"))
os.remove(os.path.join(TEMP_PATH, "licenses"))
else:
rmtree(os.path.join(TEMP_PATH, "jars"))
rmtree(os.path.join(TEMP_PATH, "bin"))
rmtree(os.path.join(TEMP_PATH, "sbin"))
rmtree(os.path.join(TEMP_PATH, "examples"))
rmtree(os.path.join(TEMP_PATH, "data"))
rmtree(os.path.join(TEMP_PATH, "licenses"))
os.rmdir(TEMP_PATH)
| apache-2.0 |
wiheto/teneto | teneto/timeseries/report.py | 1 | 2762 | """Create report about derive"""
import os
import matplotlib.pyplot as plt
import numpy as np
def gen_report(report, sdir='./', report_name='report.html'):
"""Generates report of derivation and postprocess steps in teneto.timeseries"""
# Create report directory
if not os.path.exists(sdir):
os.makedirs(sdir)
# Add a slash to file directory if not included to avoid DirNameFleName
# instead of DirName/FileName being creaated
if sdir[-1] != '/':
sdir += '/'
report_html = '<html><body>'
if 'method' in report.keys():
report_html += "<h1>Method: " + report['method'] + "</h1><p>"
for i in report[report['method']]:
if i == 'taper_window':
fig, ax = plt.subplots(1)
ax.plot(report[report['method']]['taper_window'],
report[report['method']]['taper'])
ax.set_xlabel('Window (time). 0 in middle of window.')
ax.set_title(
'Taper from ' + report[report['method']]['distribution'] + ' distribution (PDF).')
fig.savefig(sdir + 'taper.png')
report_html += "<img src='./taper.png' width=500>" + "<p>"
else:
report_html += "- <b>" + i + "</b>: " + \
str(report[report['method']][i]) + "<br>"
if 'postprocess' in report.keys():
report_html += "<p><h2>Postprocessing:</h2><p>"
report_html += "<b>Pipeline: </b>"
for i in report['postprocess']:
report_html += " " + i + ","
for i in report['postprocess']:
report_html += "<p><h3>" + i + "</h3><p>"
for j in report[i]:
if j == 'lambda':
report_html += "- <b>" + j + "</b>: " + "<br>"
lambda_val = np.array(report['boxcox']['lambda'])
fig, ax = plt.subplots(1)
ax.hist(lambda_val[:, -1])
ax.set_xlabel('lambda')
ax.set_ylabel('frequency')
ax.set_title('Histogram of lambda parameter')
fig.savefig(sdir + 'boxcox_lambda.png')
report_html += "<img src='./boxcox_lambda.png' width=500>" + "<p>"
report_html += "Data located in " + sdir + "boxcox_lambda.csv <p>"
np.savetxt(sdir + "boxcox_lambda.csv",
lambda_val, delimiter=",")
else:
report_html += "- <b>" + j + "</b>: " + \
str(report[i][j]) + "<br>"
report_html += '</body></html>'
with open(sdir + report_name, 'w') as file:
file.write(report_html)
file.close()
| gpl-3.0 |
jeffery-do/Vizdoombot | doom/lib/python3.5/site-packages/dask/tests/test_base.py | 1 | 10784 | # -*- coding: utf-8 -*-
import os
import pytest
from operator import add, mul
import sys
import dask
from dask.base import (compute, tokenize, normalize_token, normalize_function,
visualize)
from dask.utils import tmpdir, tmpfile, ignoring
from dask.utils_test import inc, dec
from dask.compatibility import unicode
def import_or_none(path):
with ignoring():
return pytest.importorskip(path)
return None
tz = pytest.importorskip('toolz')
da = import_or_none('dask.array')
db = import_or_none('dask.bag')
dd = import_or_none('dask.dataframe')
np = import_or_none('numpy')
pd = import_or_none('pandas')
def test_normalize_function():
def f1(a, b, c=1):
pass
def f2(a, b=1, c=2):
pass
def f3(a):
pass
assert normalize_function(f2)
assert normalize_function(lambda a: a)
assert (normalize_function(tz.partial(f2, b=2)) ==
normalize_function(tz.partial(f2, b=2)))
assert (normalize_function(tz.partial(f2, b=2)) !=
normalize_function(tz.partial(f2, b=3)))
assert (normalize_function(tz.partial(f1, b=2)) !=
normalize_function(tz.partial(f2, b=2)))
assert (normalize_function(tz.compose(f2, f3)) ==
normalize_function(tz.compose(f2, f3)))
assert (normalize_function(tz.compose(f2, f3)) !=
normalize_function(tz.compose(f2, f1)))
assert normalize_function(tz.curry(f2)) == normalize_function(tz.curry(f2))
assert normalize_function(tz.curry(f2)) != normalize_function(tz.curry(f1))
assert (normalize_function(tz.curry(f2, b=1)) ==
normalize_function(tz.curry(f2, b=1)))
assert (normalize_function(tz.curry(f2, b=1)) !=
normalize_function(tz.curry(f2, b=2)))
def test_tokenize():
a = (1, 2, 3)
assert isinstance(tokenize(a), (str, bytes))
@pytest.mark.skipif('not np')
def test_tokenize_numpy_array_consistent_on_values():
assert (tokenize(np.random.RandomState(1234).random_sample(1000)) ==
tokenize(np.random.RandomState(1234).random_sample(1000)))
@pytest.mark.skipif('not np')
def test_tokenize_numpy_array_supports_uneven_sizes():
tokenize(np.random.random(7).astype(dtype='i2'))
@pytest.mark.skipif('not np')
def test_tokenize_discontiguous_numpy_array():
tokenize(np.random.random(8)[::2])
@pytest.mark.skipif('not np')
def test_tokenize_numpy_datetime():
tokenize(np.array(['2000-01-01T12:00:00'], dtype='M8[ns]'))
@pytest.mark.skipif('not np')
def test_tokenize_numpy_scalar():
assert tokenize(np.array(1.0, dtype='f8')) == tokenize(np.array(1.0, dtype='f8'))
assert (tokenize(np.array([(1, 2)], dtype=[('a', 'i4'), ('b', 'i8')])[0]) ==
tokenize(np.array([(1, 2)], dtype=[('a', 'i4'), ('b', 'i8')])[0]))
@pytest.mark.skipif('not np')
def test_tokenize_numpy_array_on_object_dtype():
assert (tokenize(np.array(['a', 'aa', 'aaa'], dtype=object)) ==
tokenize(np.array(['a', 'aa', 'aaa'], dtype=object)))
assert (tokenize(np.array(['a', None, 'aaa'], dtype=object)) ==
tokenize(np.array(['a', None, 'aaa'], dtype=object)))
assert (tokenize(np.array([(1, 'a'), (1, None), (1, 'aaa')], dtype=object)) ==
tokenize(np.array([(1, 'a'), (1, None), (1, 'aaa')], dtype=object)))
if sys.version_info[0] == 2:
assert (tokenize(np.array([unicode("Rebeca Alón", encoding="utf-8")], dtype=object)) ==
tokenize(np.array([unicode("Rebeca Alón", encoding="utf-8")], dtype=object)))
@pytest.mark.skipif('not np')
def test_tokenize_numpy_memmap():
with tmpfile('.npy') as fn:
x = np.arange(5)
np.save(fn, x)
y = tokenize(np.load(fn, mmap_mode='r'))
with tmpfile('.npy') as fn:
x = np.arange(5)
np.save(fn, x)
z = tokenize(np.load(fn, mmap_mode='r'))
assert y != z
with tmpfile('.npy') as fn:
x = np.random.normal(size=(10, 10))
np.save(fn, x)
mm = np.load(fn, mmap_mode='r')
mm2 = np.load(fn, mmap_mode='r')
a = tokenize(mm[0, :])
b = tokenize(mm[1, :])
c = tokenize(mm[0:3, :])
d = tokenize(mm[:, 0])
assert len(set([a, b, c, d])) == 4
assert tokenize(mm) == tokenize(mm2)
assert tokenize(mm[1, :]) == tokenize(mm2[1, :])
@pytest.mark.skipif('not np')
def test_tokenize_numpy_memmap_no_filename():
# GH 1562:
with tmpfile('.npy') as fn1, tmpfile('.npy') as fn2:
x = np.arange(5)
np.save(fn1, x)
np.save(fn2, x)
a = np.load(fn1, mmap_mode='r')
b = a + a
assert tokenize(b) == tokenize(b)
def test_normalize_base():
for i in [1, 1.1, '1', slice(1, 2, 3)]:
assert normalize_token(i) is i
@pytest.mark.skipif('not pd')
def test_tokenize_pandas():
a = pd.DataFrame({'x': [1, 2, 3], 'y': ['4', 'asd', None]}, index=[1, 2, 3])
b = pd.DataFrame({'x': [1, 2, 3], 'y': ['4', 'asd', None]}, index=[1, 2, 3])
assert tokenize(a) == tokenize(b)
b.index.name = 'foo'
assert tokenize(a) != tokenize(b)
a = pd.DataFrame({'x': [1, 2, 3], 'y': ['a', 'b', 'a']})
b = pd.DataFrame({'x': [1, 2, 3], 'y': ['a', 'b', 'a']})
a['z'] = a.y.astype('category')
assert tokenize(a) != tokenize(b)
b['z'] = a.y.astype('category')
assert tokenize(a) == tokenize(b)
def test_tokenize_kwargs():
assert tokenize(5, x=1) == tokenize(5, x=1)
assert tokenize(5) != tokenize(5, x=1)
assert tokenize(5, x=1) != tokenize(5, x=2)
assert tokenize(5, x=1) != tokenize(5, y=1)
def test_tokenize_same_repr():
class Foo(object):
def __init__(self, x):
self.x = x
def __repr__(self):
return 'a foo'
assert tokenize(Foo(1)) != tokenize(Foo(2))
@pytest.mark.skipif('not np')
def test_tokenize_sequences():
assert tokenize([1]) != tokenize([2])
assert tokenize([1]) != tokenize((1,))
assert tokenize([1]) == tokenize([1])
x = np.arange(2000) # long enough to drop information in repr
y = np.arange(2000)
y[1000] = 0 # middle isn't printed in repr
assert tokenize([x]) != tokenize([y])
def test_tokenize_dict():
assert tokenize({'x': 1, 1: 'x'}) == tokenize({'x': 1, 1: 'x'})
def test_tokenize_ordered_dict():
with ignoring(ImportError):
from collections import OrderedDict
a = OrderedDict([('a', 1), ('b', 2)])
b = OrderedDict([('a', 1), ('b', 2)])
c = OrderedDict([('b', 2), ('a', 1)])
assert tokenize(a) == tokenize(b)
assert tokenize(a) != tokenize(c)
@pytest.mark.skipif('not db')
def test_compute_no_opt():
# Bag does `fuse` by default. Test that with `optimize_graph=False` that
# doesn't get called. We check this by using a callback to track the keys
# that are computed.
from dask.callbacks import Callback
b = db.from_sequence(range(100), npartitions=4)
add1 = tz.partial(add, 1)
mul2 = tz.partial(mul, 2)
o = b.map(add1).map(mul2)
# Check that with the kwarg, the optimization doesn't happen
keys = []
with Callback(pretask=lambda key, *args: keys.append(key)):
o.compute(get=dask.get, optimize_graph=False)
assert len([k for k in keys if 'mul' in k[0]]) == 4
assert len([k for k in keys if 'add' in k[0]]) == 4
# Check that without the kwarg, the optimization does happen
keys = []
with Callback(pretask=lambda key, *args: keys.append(key)):
o.compute(get=dask.get)
assert len([k for k in keys if 'mul' in k[0]]) == 4
assert len([k for k in keys if 'add' in k[0]]) == 0
@pytest.mark.skipif('not da')
def test_compute_array():
arr = np.arange(100).reshape((10, 10))
darr = da.from_array(arr, chunks=(5, 5))
darr1 = darr + 1
darr2 = darr + 2
out1, out2 = compute(darr1, darr2)
assert np.allclose(out1, arr + 1)
assert np.allclose(out2, arr + 2)
@pytest.mark.skipif('not dd')
def test_compute_dataframe():
df = pd.DataFrame({'a': [1, 2, 3, 4], 'b': [5, 5, 3, 3]})
ddf = dd.from_pandas(df, npartitions=2)
ddf1 = ddf.a + 1
ddf2 = ddf.a + ddf.b
out1, out2 = compute(ddf1, ddf2)
pd.util.testing.assert_series_equal(out1, df.a + 1)
pd.util.testing.assert_series_equal(out2, df.a + df.b)
@pytest.mark.skipif('not dd or not da')
def test_compute_array_dataframe():
arr = np.arange(100).reshape((10, 10))
darr = da.from_array(arr, chunks=(5, 5)) + 1
df = pd.DataFrame({'a': [1, 2, 3, 4], 'b': [5, 5, 3, 3]})
ddf = dd.from_pandas(df, npartitions=2).a + 2
arr_out, df_out = compute(darr, ddf)
assert np.allclose(arr_out, arr + 1)
pd.util.testing.assert_series_equal(df_out, df.a + 2)
@pytest.mark.skipif('not da or not db')
def test_compute_array_bag():
x = da.arange(5, chunks=2)
b = db.from_sequence([1, 2, 3])
pytest.raises(ValueError, lambda: compute(x, b))
xx, bb = compute(x, b, get=dask.async.get_sync)
assert np.allclose(xx, np.arange(5))
assert bb == [1, 2, 3]
@pytest.mark.skipif('not da')
def test_compute_with_literal():
x = da.arange(5, chunks=2)
y = 10
xx, yy = compute(x, y)
assert (xx == x.compute()).all()
assert yy == y
assert compute(5) == (5,)
@pytest.mark.skipif('not da')
@pytest.mark.skipif(sys.flags.optimize == 2,
reason="graphviz exception with Python -OO flag")
def test_visualize():
pytest.importorskip('graphviz')
with tmpdir() as d:
x = da.arange(5, chunks=2)
x.visualize(filename=os.path.join(d, 'mydask'))
assert os.path.exists(os.path.join(d, 'mydask.png'))
x.visualize(filename=os.path.join(d, 'mydask.pdf'))
assert os.path.exists(os.path.join(d, 'mydask.pdf'))
visualize(x, 1, 2, filename=os.path.join(d, 'mydask.png'))
assert os.path.exists(os.path.join(d, 'mydask.png'))
dsk = {'a': 1, 'b': (add, 'a', 2), 'c': (mul, 'a', 1)}
visualize(x, dsk, filename=os.path.join(d, 'mydask.png'))
assert os.path.exists(os.path.join(d, 'mydask.png'))
def test_use_cloudpickle_to_tokenize_functions_in__main__():
import sys
from textwrap import dedent
defn = dedent("""
def inc():
return x
""")
__main__ = sys.modules['__main__']
exec(compile(defn, '<test>', 'exec'), __main__.__dict__)
f = __main__.inc
t = normalize_token(f)
assert b'__main__' not in t
def test_optimizations_keyword():
def inc_to_dec(dsk, keys):
for key in dsk:
if dsk[key][0] == inc:
dsk[key] = (dec,) + dsk[key][1:]
return dsk
x = dask.delayed(inc)(1)
assert x.compute() == 2
with dask.set_options(optimizations=[inc_to_dec]):
assert x.compute() == 0
assert x.compute() == 2
| mit |
fspaolo/scikit-learn | benchmarks/bench_plot_lasso_path.py | 301 | 4003 | """Benchmarks of Lasso regularization path computation using Lars and CD
The input data is mostly low rank but is a fat infinite tail.
"""
from __future__ import print_function
from collections import defaultdict
import gc
import sys
from time import time
import numpy as np
from sklearn.linear_model import lars_path
from sklearn.linear_model import lasso_path
from sklearn.datasets.samples_generator import make_regression
def compute_bench(samples_range, features_range):
it = 0
results = defaultdict(lambda: [])
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print('====================')
print('Iteration %03d of %03d' % (it, max_it))
print('====================')
dataset_kwargs = {
'n_samples': n_samples,
'n_features': n_features,
'n_informative': n_features / 10,
'effective_rank': min(n_samples, n_features) / 10,
#'effective_rank': None,
'bias': 0.0,
}
print("n_samples: %d" % n_samples)
print("n_features: %d" % n_features)
X, y = make_regression(**dataset_kwargs)
gc.collect()
print("benchmarking lars_path (with Gram):", end='')
sys.stdout.flush()
tstart = time()
G = np.dot(X.T, X) # precomputed Gram matrix
Xy = np.dot(X.T, y)
lars_path(X, y, Xy=Xy, Gram=G, method='lasso')
delta = time() - tstart
print("%0.3fs" % delta)
results['lars_path (with Gram)'].append(delta)
gc.collect()
print("benchmarking lars_path (without Gram):", end='')
sys.stdout.flush()
tstart = time()
lars_path(X, y, method='lasso')
delta = time() - tstart
print("%0.3fs" % delta)
results['lars_path (without Gram)'].append(delta)
gc.collect()
print("benchmarking lasso_path (with Gram):", end='')
sys.stdout.flush()
tstart = time()
lasso_path(X, y, precompute=True)
delta = time() - tstart
print("%0.3fs" % delta)
results['lasso_path (with Gram)'].append(delta)
gc.collect()
print("benchmarking lasso_path (without Gram):", end='')
sys.stdout.flush()
tstart = time()
lasso_path(X, y, precompute=False)
delta = time() - tstart
print("%0.3fs" % delta)
results['lasso_path (without Gram)'].append(delta)
return results
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
import matplotlib.pyplot as plt
samples_range = np.linspace(10, 2000, 5).astype(np.int)
features_range = np.linspace(10, 2000, 5).astype(np.int)
results = compute_bench(samples_range, features_range)
max_time = max(max(t) for t in results.values())
fig = plt.figure('scikit-learn Lasso path benchmark results')
i = 1
for c, (label, timings) in zip('bcry', sorted(results.items())):
ax = fig.add_subplot(2, 2, i, projection='3d')
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z.T, cstride=1, rstride=1, color=c, alpha=0.8)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
#ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
ax.set_zlabel('Time (s)')
ax.set_zlim3d(0.0, max_time * 1.1)
ax.set_title(label)
#ax.legend()
i += 1
plt.show()
| bsd-3-clause |
dongjoon-hyun/spark | python/pyspark/pandas/plot/matplotlib.py | 14 | 30172 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from distutils.version import LooseVersion
import matplotlib as mat
import numpy as np
import pandas as pd
from matplotlib.axes._base import _process_plot_format
from pandas.core.dtypes.inference import is_list_like
from pandas.io.formats.printing import pprint_thing
from pyspark.pandas.plot import (
TopNPlotBase,
SampledPlotBase,
HistogramPlotBase,
BoxPlotBase,
unsupported_function,
KdePlotBase,
)
if LooseVersion(pd.__version__) < LooseVersion("0.25"):
from pandas.plotting._core import (
_all_kinds,
BarPlot as PandasBarPlot,
BoxPlot as PandasBoxPlot,
HistPlot as PandasHistPlot,
MPLPlot as PandasMPLPlot,
PiePlot as PandasPiePlot,
AreaPlot as PandasAreaPlot,
LinePlot as PandasLinePlot,
BarhPlot as PandasBarhPlot,
ScatterPlot as PandasScatterPlot,
KdePlot as PandasKdePlot,
)
else:
from pandas.plotting._matplotlib import (
BarPlot as PandasBarPlot,
BoxPlot as PandasBoxPlot,
HistPlot as PandasHistPlot,
PiePlot as PandasPiePlot,
AreaPlot as PandasAreaPlot,
LinePlot as PandasLinePlot,
BarhPlot as PandasBarhPlot,
ScatterPlot as PandasScatterPlot,
KdePlot as PandasKdePlot,
)
from pandas.plotting._core import PlotAccessor
from pandas.plotting._matplotlib.core import MPLPlot as PandasMPLPlot
_all_kinds = PlotAccessor._all_kinds
class PandasOnSparkBarPlot(PandasBarPlot, TopNPlotBase):
def __init__(self, data, **kwargs):
super().__init__(self.get_top_n(data), **kwargs)
def _plot(self, ax, x, y, w, start=0, log=False, **kwds):
self.set_result_text(ax)
return ax.bar(x, y, w, bottom=start, log=log, **kwds)
class PandasOnSparkBoxPlot(PandasBoxPlot, BoxPlotBase):
def boxplot(
self,
ax,
bxpstats,
notch=None,
sym=None,
vert=None,
whis=None,
positions=None,
widths=None,
patch_artist=None,
bootstrap=None,
usermedians=None,
conf_intervals=None,
meanline=None,
showmeans=None,
showcaps=None,
showbox=None,
showfliers=None,
boxprops=None,
labels=None,
flierprops=None,
medianprops=None,
meanprops=None,
capprops=None,
whiskerprops=None,
manage_ticks=None,
# manage_xticks is for compatibility of matplotlib < 3.1.0.
# Remove this when minimum version is 3.0.0
manage_xticks=None,
autorange=False,
zorder=None,
precision=None,
):
def update_dict(dictionary, rc_name, properties):
"""Loads properties in the dictionary from rc file if not already
in the dictionary"""
rc_str = "boxplot.{0}.{1}"
if dictionary is None:
dictionary = dict()
for prop_dict in properties:
dictionary.setdefault(prop_dict, mat.rcParams[rc_str.format(rc_name, prop_dict)])
return dictionary
# Common property dictionaries loading from rc
flier_props = [
"color",
"marker",
"markerfacecolor",
"markeredgecolor",
"markersize",
"linestyle",
"linewidth",
]
default_props = ["color", "linewidth", "linestyle"]
boxprops = update_dict(boxprops, "boxprops", default_props)
whiskerprops = update_dict(whiskerprops, "whiskerprops", default_props)
capprops = update_dict(capprops, "capprops", default_props)
medianprops = update_dict(medianprops, "medianprops", default_props)
meanprops = update_dict(meanprops, "meanprops", default_props)
flierprops = update_dict(flierprops, "flierprops", flier_props)
if patch_artist:
boxprops["linestyle"] = "solid"
boxprops["edgecolor"] = boxprops.pop("color")
# if non-default sym value, put it into the flier dictionary
# the logic for providing the default symbol ('b+') now lives
# in bxp in the initial value of final_flierprops
# handle all of the `sym` related logic here so we only have to pass
# on the flierprops dict.
if sym is not None:
# no-flier case, which should really be done with
# 'showfliers=False' but none-the-less deal with it to keep back
# compatibility
if sym == "":
# blow away existing dict and make one for invisible markers
flierprops = dict(linestyle="none", marker="", color="none")
# turn the fliers off just to be safe
showfliers = False
# now process the symbol string
else:
# process the symbol string
# discarded linestyle
_, marker, color = _process_plot_format(sym)
# if we have a marker, use it
if marker is not None:
flierprops["marker"] = marker
# if we have a color, use it
if color is not None:
# assume that if color is passed in the user want
# filled symbol, if the users want more control use
# flierprops
flierprops["color"] = color
flierprops["markerfacecolor"] = color
flierprops["markeredgecolor"] = color
# replace medians if necessary:
if usermedians is not None:
if len(np.ravel(usermedians)) != len(bxpstats) or np.shape(usermedians)[0] != len(
bxpstats
):
raise ValueError("usermedians length not compatible with x")
else:
# reassign medians as necessary
for stats, med in zip(bxpstats, usermedians):
if med is not None:
stats["med"] = med
if conf_intervals is not None:
if np.shape(conf_intervals)[0] != len(bxpstats):
err_mess = "conf_intervals length not compatible with x"
raise ValueError(err_mess)
else:
for stats, ci in zip(bxpstats, conf_intervals):
if ci is not None:
if len(ci) != 2:
raise ValueError("each confidence interval must " "have two values")
else:
if ci[0] is not None:
stats["cilo"] = ci[0]
if ci[1] is not None:
stats["cihi"] = ci[1]
should_manage_ticks = True
if manage_xticks is not None:
should_manage_ticks = manage_xticks
if manage_ticks is not None:
should_manage_ticks = manage_ticks
if LooseVersion(mat.__version__) < LooseVersion("3.1.0"):
extra_args = {"manage_xticks": should_manage_ticks}
else:
extra_args = {"manage_ticks": should_manage_ticks}
artists = ax.bxp(
bxpstats,
positions=positions,
widths=widths,
vert=vert,
patch_artist=patch_artist,
shownotches=notch,
showmeans=showmeans,
showcaps=showcaps,
showbox=showbox,
boxprops=boxprops,
flierprops=flierprops,
medianprops=medianprops,
meanprops=meanprops,
meanline=meanline,
showfliers=showfliers,
capprops=capprops,
whiskerprops=whiskerprops,
zorder=zorder,
**extra_args,
)
return artists
def _plot(self, ax, bxpstats, column_num=None, return_type="axes", **kwds):
bp = self.boxplot(ax, bxpstats, **kwds)
if return_type == "dict":
return bp, bp
elif return_type == "both":
return self.BP(ax=ax, lines=bp), bp
else:
return ax, bp
def _compute_plot_data(self):
colname = self.data.name
spark_column_name = self.data._internal.spark_column_name_for(self.data._column_label)
data = self.data
# Updates all props with the rc defaults from matplotlib
self.kwds.update(PandasOnSparkBoxPlot.rc_defaults(**self.kwds))
# Gets some important kwds
showfliers = self.kwds.get("showfliers", False)
whis = self.kwds.get("whis", 1.5)
labels = self.kwds.get("labels", [colname])
# This one is pandas-on-Spark specific to control precision for approx_percentile
precision = self.kwds.get("precision", 0.01)
# # Computes mean, median, Q1 and Q3 with approx_percentile and precision
col_stats, col_fences = BoxPlotBase.compute_stats(data, spark_column_name, whis, precision)
# # Creates a column to flag rows as outliers or not
outliers = BoxPlotBase.outliers(data, spark_column_name, *col_fences)
# # Computes min and max values of non-outliers - the whiskers
whiskers = BoxPlotBase.calc_whiskers(spark_column_name, outliers)
if showfliers:
fliers = BoxPlotBase.get_fliers(spark_column_name, outliers, whiskers[0])
else:
fliers = []
# Builds bxpstats dict
stats = []
item = {
"mean": col_stats["mean"],
"med": col_stats["med"],
"q1": col_stats["q1"],
"q3": col_stats["q3"],
"whislo": whiskers[0],
"whishi": whiskers[1],
"fliers": fliers,
"label": labels[0],
}
stats.append(item)
self.data = {labels[0]: stats}
def _make_plot(self):
bxpstats = list(self.data.values())[0]
ax = self._get_ax(0)
kwds = self.kwds.copy()
for stats in bxpstats:
if len(stats["fliers"]) > 1000:
stats["fliers"] = stats["fliers"][:1000]
ax.text(
1,
1,
"showing top 1,000 fliers only",
size=6,
ha="right",
va="bottom",
transform=ax.transAxes,
)
ret, bp = self._plot(ax, bxpstats, column_num=0, return_type=self.return_type, **kwds)
self.maybe_color_bp(bp)
self._return_obj = ret
labels = [l for l, _ in self.data.items()]
labels = [pprint_thing(l) for l in labels]
if not self.use_index:
labels = [pprint_thing(key) for key in range(len(labels))]
self._set_ticklabels(ax, labels)
@staticmethod
def rc_defaults(
notch=None,
vert=None,
whis=None,
patch_artist=None,
bootstrap=None,
meanline=None,
showmeans=None,
showcaps=None,
showbox=None,
showfliers=None,
**kwargs
):
# Missing arguments default to rcParams.
if whis is None:
whis = mat.rcParams["boxplot.whiskers"]
if bootstrap is None:
bootstrap = mat.rcParams["boxplot.bootstrap"]
if notch is None:
notch = mat.rcParams["boxplot.notch"]
if vert is None:
vert = mat.rcParams["boxplot.vertical"]
if patch_artist is None:
patch_artist = mat.rcParams["boxplot.patchartist"]
if meanline is None:
meanline = mat.rcParams["boxplot.meanline"]
if showmeans is None:
showmeans = mat.rcParams["boxplot.showmeans"]
if showcaps is None:
showcaps = mat.rcParams["boxplot.showcaps"]
if showbox is None:
showbox = mat.rcParams["boxplot.showbox"]
if showfliers is None:
showfliers = mat.rcParams["boxplot.showfliers"]
return dict(
whis=whis,
bootstrap=bootstrap,
notch=notch,
vert=vert,
patch_artist=patch_artist,
meanline=meanline,
showmeans=showmeans,
showcaps=showcaps,
showbox=showbox,
showfliers=showfliers,
)
class PandasOnSparkHistPlot(PandasHistPlot, HistogramPlotBase):
def _args_adjust(self):
if is_list_like(self.bottom):
self.bottom = np.array(self.bottom)
def _compute_plot_data(self):
self.data, self.bins = HistogramPlotBase.prepare_hist_data(self.data, self.bins)
def _make_plot(self):
# TODO: this logic is similar with KdePlot. Might have to deduplicate it.
# 'num_colors' requires to calculate `shape` which has to count all.
# Use 1 for now to save the computation.
colors = self._get_colors(num_colors=1)
stacking_id = self._get_stacking_id()
output_series = HistogramPlotBase.compute_hist(self.data, self.bins)
for (i, label), y in zip(enumerate(self.data._internal.column_labels), output_series):
ax = self._get_ax(i)
kwds = self.kwds.copy()
label = pprint_thing(label if len(label) > 1 else label[0])
kwds["label"] = label
style, kwds = self._apply_style_colors(colors, kwds, i, label)
if style is not None:
kwds["style"] = style
kwds = self._make_plot_keywords(kwds, y)
artists = self._plot(ax, y, column_num=i, stacking_id=stacking_id, **kwds)
self._add_legend_handle(artists[0], label, index=i)
@classmethod
def _plot(cls, ax, y, style=None, bins=None, bottom=0, column_num=0, stacking_id=None, **kwds):
if column_num == 0:
cls._initialize_stacker(ax, stacking_id, len(bins) - 1)
base = np.zeros(len(bins) - 1)
bottom = bottom + cls._get_stacked_values(ax, stacking_id, base, kwds["label"])
# Since the counts were computed already, we use them as weights and just generate
# one entry for each bin
n, bins, patches = ax.hist(bins[:-1], bins=bins, bottom=bottom, weights=y, **kwds)
cls._update_stacker(ax, stacking_id, n)
return patches
class PandasOnSparkPiePlot(PandasPiePlot, TopNPlotBase):
def __init__(self, data, **kwargs):
super().__init__(self.get_top_n(data), **kwargs)
def _make_plot(self):
self.set_result_text(self._get_ax(0))
super()._make_plot()
class PandasOnSparkAreaPlot(PandasAreaPlot, SampledPlotBase):
def __init__(self, data, **kwargs):
super().__init__(self.get_sampled(data), **kwargs)
def _make_plot(self):
self.set_result_text(self._get_ax(0))
super()._make_plot()
class PandasOnSparkLinePlot(PandasLinePlot, SampledPlotBase):
def __init__(self, data, **kwargs):
super().__init__(self.get_sampled(data), **kwargs)
def _make_plot(self):
self.set_result_text(self._get_ax(0))
super()._make_plot()
class PandasOnSparkBarhPlot(PandasBarhPlot, TopNPlotBase):
def __init__(self, data, **kwargs):
super().__init__(self.get_top_n(data), **kwargs)
def _make_plot(self):
self.set_result_text(self._get_ax(0))
super()._make_plot()
class PandasOnSparkScatterPlot(PandasScatterPlot, TopNPlotBase):
def __init__(self, data, x, y, **kwargs):
super().__init__(self.get_top_n(data), x, y, **kwargs)
def _make_plot(self):
self.set_result_text(self._get_ax(0))
super()._make_plot()
class PandasOnSparkKdePlot(PandasKdePlot, KdePlotBase):
def _compute_plot_data(self):
self.data = KdePlotBase.prepare_kde_data(self.data)
def _make_plot(self):
# 'num_colors' requires to calculate `shape` which has to count all.
# Use 1 for now to save the computation.
colors = self._get_colors(num_colors=1)
stacking_id = self._get_stacking_id()
sdf = self.data._internal.spark_frame
for i, label in enumerate(self.data._internal.column_labels):
# 'y' is a Spark DataFrame that selects one column.
y = sdf.select(self.data._internal.spark_column_for(label))
ax = self._get_ax(i)
kwds = self.kwds.copy()
label = pprint_thing(label if len(label) > 1 else label[0])
kwds["label"] = label
style, kwds = self._apply_style_colors(colors, kwds, i, label)
if style is not None:
kwds["style"] = style
kwds = self._make_plot_keywords(kwds, y)
artists = self._plot(ax, y, column_num=i, stacking_id=stacking_id, **kwds)
self._add_legend_handle(artists[0], label, index=i)
def _get_ind(self, y):
return KdePlotBase.get_ind(y, self.ind)
@classmethod
def _plot(
cls, ax, y, style=None, bw_method=None, ind=None, column_num=None, stacking_id=None, **kwds
):
y = KdePlotBase.compute_kde(y, bw_method=bw_method, ind=ind)
lines = PandasMPLPlot._plot(ax, ind, y, style=style, **kwds)
return lines
_klasses = [
PandasOnSparkHistPlot,
PandasOnSparkBarPlot,
PandasOnSparkBoxPlot,
PandasOnSparkPiePlot,
PandasOnSparkAreaPlot,
PandasOnSparkLinePlot,
PandasOnSparkBarhPlot,
PandasOnSparkScatterPlot,
PandasOnSparkKdePlot,
]
_plot_klass = {getattr(klass, "_kind"): klass for klass in _klasses}
_common_kinds = {"area", "bar", "barh", "box", "hist", "kde", "line", "pie"}
_series_kinds = _common_kinds.union(set())
_dataframe_kinds = _common_kinds.union({"scatter", "hexbin"})
_pandas_on_spark_all_kinds = _common_kinds.union(_series_kinds).union(_dataframe_kinds)
def plot_pandas_on_spark(data, kind, **kwargs):
if kind not in _pandas_on_spark_all_kinds:
raise ValueError("{} is not a valid plot kind".format(kind))
from pyspark.pandas import DataFrame, Series
if isinstance(data, Series):
if kind not in _series_kinds:
return unsupported_function(class_name="pd.Series", method_name=kind)()
return plot_series(data=data, kind=kind, **kwargs)
elif isinstance(data, DataFrame):
if kind not in _dataframe_kinds:
return unsupported_function(class_name="pd.DataFrame", method_name=kind)()
return plot_frame(data=data, kind=kind, **kwargs)
def plot_series(
data,
kind="line",
ax=None, # Series unique
figsize=None,
use_index=True,
title=None,
grid=None,
legend=False,
style=None,
logx=False,
logy=False,
loglog=False,
xticks=None,
yticks=None,
xlim=None,
ylim=None,
rot=None,
fontsize=None,
colormap=None,
table=False,
yerr=None,
xerr=None,
label=None,
secondary_y=False, # Series unique
**kwds
):
"""
Make plots of Series using matplotlib / pylab.
Each plot kind has a corresponding method on the
``Series.plot`` accessor:
``s.plot(kind='line')`` is equivalent to
``s.plot.line()``.
Parameters
----------
data : Series
kind : str
- 'line' : line plot (default)
- 'bar' : vertical bar plot
- 'barh' : horizontal bar plot
- 'hist' : histogram
- 'box' : boxplot
- 'kde' : Kernel Density Estimation plot
- 'density' : same as 'kde'
- 'area' : area plot
- 'pie' : pie plot
ax : matplotlib axes object
If not passed, uses gca()
figsize : a tuple (width, height) in inches
use_index : boolean, default True
Use index as ticks for x axis
title : string or list
Title to use for the plot. If a string is passed, print the string at
the top of the figure. If a list is passed and `subplots` is True,
print each item in the list above the corresponding subplot.
grid : boolean, default None (matlab style default)
Axis grid lines
legend : False/True/'reverse'
Place legend on axis subplots
style : list or dict
matplotlib line style per column
logx : boolean, default False
Use log scaling on x axis
logy : boolean, default False
Use log scaling on y axis
loglog : boolean, default False
Use log scaling on both x and y axes
xticks : sequence
Values to use for the xticks
yticks : sequence
Values to use for the yticks
xlim : 2-tuple/list
ylim : 2-tuple/list
rot : int, default None
Rotation for ticks (xticks for vertical, yticks for horizontal plots)
fontsize : int, default None
Font size for xticks and yticks
colormap : str or matplotlib colormap object, default None
Colormap to select colors from. If string, load colormap with that name
from matplotlib.
colorbar : boolean, optional
If True, plot colorbar (only relevant for 'scatter' and 'hexbin' plots)
position : float
Specify relative alignments for bar plot layout.
From 0 (left/bottom-end) to 1 (right/top-end). Default is 0.5 (center)
table : boolean, Series or DataFrame, default False
If True, draw a table using the data in the DataFrame and the data will
be transposed to meet matplotlib's default layout.
If a Series or DataFrame is passed, use passed data to draw a table.
yerr : DataFrame, Series, array-like, dict and str
See :ref:`Plotting with Error Bars <visualization.errorbars>` for
detail.
xerr : same types as yerr.
label : label argument to provide to plot
secondary_y : boolean or sequence of ints, default False
If True then y-axis will be on the right
mark_right : boolean, default True
When using a secondary_y axis, automatically mark the column
labels with "(right)" in the legend
**kwds : keywords
Options to pass to matplotlib plotting method
Returns
-------
axes : :class:`matplotlib.axes.Axes` or numpy.ndarray of them
Notes
-----
- See matplotlib documentation online for more on this subject
- If `kind` = 'bar' or 'barh', you can specify relative alignments
for bar plot layout by `position` keyword.
From 0 (left/bottom-end) to 1 (right/top-end). Default is 0.5 (center)
"""
# function copied from pandas.plotting._core
# so it calls modified _plot below
import matplotlib.pyplot as plt
if ax is None and len(plt.get_fignums()) > 0:
with plt.rc_context():
ax = plt.gca()
ax = PandasMPLPlot._get_ax_layer(ax)
return _plot(
data,
kind=kind,
ax=ax,
figsize=figsize,
use_index=use_index,
title=title,
grid=grid,
legend=legend,
style=style,
logx=logx,
logy=logy,
loglog=loglog,
xticks=xticks,
yticks=yticks,
xlim=xlim,
ylim=ylim,
rot=rot,
fontsize=fontsize,
colormap=colormap,
table=table,
yerr=yerr,
xerr=xerr,
label=label,
secondary_y=secondary_y,
**kwds,
)
def plot_frame(
data,
x=None,
y=None,
kind="line",
ax=None,
subplots=None,
sharex=None,
sharey=False,
layout=None,
figsize=None,
use_index=True,
title=None,
grid=None,
legend=True,
style=None,
logx=False,
logy=False,
loglog=False,
xticks=None,
yticks=None,
xlim=None,
ylim=None,
rot=None,
fontsize=None,
colormap=None,
table=False,
yerr=None,
xerr=None,
secondary_y=False,
sort_columns=False,
**kwds
):
"""
Make plots of DataFrames using matplotlib / pylab.
Each plot kind has a corresponding method on the
``DataFrame.plot`` accessor:
``psdf.plot(kind='line')`` is equivalent to
``psdf.plot.line()``.
Parameters
----------
data : DataFrame
kind : str
- 'line' : line plot (default)
- 'bar' : vertical bar plot
- 'barh' : horizontal bar plot
- 'hist' : histogram
- 'box' : boxplot
- 'kde' : Kernel Density Estimation plot
- 'density' : same as 'kde'
- 'area' : area plot
- 'pie' : pie plot
- 'scatter' : scatter plot
ax : matplotlib axes object
If not passed, uses gca()
x : label or position, default None
y : label, position or list of label, positions, default None
Allows plotting of one column versus another.
figsize : a tuple (width, height) in inches
use_index : boolean, default True
Use index as ticks for x axis
title : string or list
Title to use for the plot. If a string is passed, print the string at
the top of the figure. If a list is passed and `subplots` is True,
print each item in the list above the corresponding subplot.
grid : boolean, default None (matlab style default)
Axis grid lines
legend : False/True/'reverse'
Place legend on axis subplots
style : list or dict
matplotlib line style per column
logx : boolean, default False
Use log scaling on x axis
logy : boolean, default False
Use log scaling on y axis
loglog : boolean, default False
Use log scaling on both x and y axes
xticks : sequence
Values to use for the xticks
yticks : sequence
Values to use for the yticks
xlim : 2-tuple/list
ylim : 2-tuple/list
sharex: bool or None, default is None
Whether to share x axis or not.
sharey: bool, default is False
Whether to share y axis or not.
rot : int, default None
Rotation for ticks (xticks for vertical, yticks for horizontal plots)
fontsize : int, default None
Font size for xticks and yticks
colormap : str or matplotlib colormap object, default None
Colormap to select colors from. If string, load colormap with that name
from matplotlib.
colorbar : boolean, optional
If True, plot colorbar (only relevant for 'scatter' and 'hexbin' plots)
position : float
Specify relative alignments for bar plot layout.
From 0 (left/bottom-end) to 1 (right/top-end). Default is 0.5 (center)
table : boolean, Series or DataFrame, default False
If True, draw a table using the data in the DataFrame and the data will
be transposed to meet matplotlib's default layout.
If a Series or DataFrame is passed, use passed data to draw a table.
yerr : DataFrame, Series, array-like, dict and str
See :ref:`Plotting with Error Bars <visualization.errorbars>` for
detail.
xerr : same types as yerr.
label : label argument to provide to plot
secondary_y : boolean or sequence of ints, default False
If True then y-axis will be on the right
mark_right : boolean, default True
When using a secondary_y axis, automatically mark the column
labels with "(right)" in the legend
sort_columns: bool, default is False
When True, will sort values on plots.
**kwds : keywords
Options to pass to matplotlib plotting method
Returns
-------
axes : :class:`matplotlib.axes.Axes` or numpy.ndarray of them
Notes
-----
- See matplotlib documentation online for more on this subject
- If `kind` = 'bar' or 'barh', you can specify relative alignments
for bar plot layout by `position` keyword.
From 0 (left/bottom-end) to 1 (right/top-end). Default is 0.5 (center)
"""
return _plot(
data,
kind=kind,
x=x,
y=y,
ax=ax,
figsize=figsize,
use_index=use_index,
title=title,
grid=grid,
legend=legend,
subplots=subplots,
style=style,
logx=logx,
logy=logy,
loglog=loglog,
xticks=xticks,
yticks=yticks,
xlim=xlim,
ylim=ylim,
rot=rot,
fontsize=fontsize,
colormap=colormap,
table=table,
yerr=yerr,
xerr=xerr,
sharex=sharex,
sharey=sharey,
secondary_y=secondary_y,
layout=layout,
sort_columns=sort_columns,
**kwds,
)
def _plot(data, x=None, y=None, subplots=False, ax=None, kind="line", **kwds):
from pyspark.pandas import DataFrame
# function copied from pandas.plotting._core
# and adapted to handle pandas-on-Spark DataFrame and Series
kind = kind.lower().strip()
kind = {"density": "kde"}.get(kind, kind)
if kind in _all_kinds:
klass = _plot_klass[kind]
else:
raise ValueError("%r is not a valid plot kind" % kind)
# scatter and hexbin are inherited from PlanePlot which require x and y
if kind in ("scatter", "hexbin"):
plot_obj = klass(data, x, y, subplots=subplots, ax=ax, kind=kind, **kwds)
else:
# check data type and do preprocess before applying plot
if isinstance(data, DataFrame):
if x is not None:
data = data.set_index(x)
# TODO: check if value of y is plottable
if y is not None:
data = data[y]
plot_obj = klass(data, subplots=subplots, ax=ax, kind=kind, **kwds)
plot_obj.generate()
plot_obj.draw()
return plot_obj.result
| apache-2.0 |
stevenzhang18/Indeed-Flask | lib/pandas/tseries/tests/test_util.py | 10 | 3569 | from pandas.compat import range
import nose
import numpy as np
from numpy.testing.decorators import slow
from pandas import Series, date_range
import pandas.util.testing as tm
from datetime import datetime, date
from pandas.tseries.tools import normalize_date
from pandas.tseries.util import pivot_annual, isleapyear
class TestPivotAnnual(tm.TestCase):
"""
New pandas of scikits.timeseries pivot_annual
"""
def test_daily(self):
rng = date_range('1/1/2000', '12/31/2004', freq='D')
ts = Series(np.random.randn(len(rng)), index=rng)
annual = pivot_annual(ts, 'D')
doy = ts.index.dayofyear
doy[(~isleapyear(ts.index.year)) & (doy >= 60)] += 1
for i in range(1, 367):
subset = ts[doy == i]
subset.index = [x.year for x in subset.index]
result = annual[i].dropna()
tm.assert_series_equal(result, subset, check_names=False)
self.assertEqual(result.name, i)
# check leap days
leaps = ts[(ts.index.month == 2) & (ts.index.day == 29)]
day = leaps.index.dayofyear[0]
leaps.index = leaps.index.year
leaps.name = 60
tm.assert_series_equal(annual[day].dropna(), leaps)
def test_hourly(self):
rng_hourly = date_range(
'1/1/1994', periods=(18 * 8760 + 4 * 24), freq='H')
data_hourly = np.random.randint(100, 350, rng_hourly.size)
ts_hourly = Series(data_hourly, index=rng_hourly)
grouped = ts_hourly.groupby(ts_hourly.index.year)
hoy = grouped.apply(lambda x: x.reset_index(drop=True))
hoy = hoy.index.droplevel(0).values
hoy[~isleapyear(ts_hourly.index.year) & (hoy >= 1416)] += 24
hoy += 1
annual = pivot_annual(ts_hourly)
ts_hourly = ts_hourly.astype(float)
for i in [1, 1416, 1417, 1418, 1439, 1440, 1441, 8784]:
subset = ts_hourly[hoy == i]
subset.index = [x.year for x in subset.index]
result = annual[i].dropna()
tm.assert_series_equal(result, subset, check_names=False)
self.assertEqual(result.name, i)
leaps = ts_hourly[(ts_hourly.index.month == 2) &
(ts_hourly.index.day == 29) &
(ts_hourly.index.hour == 0)]
hour = leaps.index.dayofyear[0] * 24 - 23
leaps.index = leaps.index.year
leaps.name = 1417
tm.assert_series_equal(annual[hour].dropna(), leaps)
def test_weekly(self):
pass
def test_monthly(self):
rng = date_range('1/1/2000', '12/31/2004', freq='M')
ts = Series(np.random.randn(len(rng)), index=rng)
annual = pivot_annual(ts, 'M')
month = ts.index.month
for i in range(1, 13):
subset = ts[month == i]
subset.index = [x.year for x in subset.index]
result = annual[i].dropna()
tm.assert_series_equal(result, subset, check_names=False)
self.assertEqual(result.name, i)
def test_period_monthly(self):
pass
def test_period_daily(self):
pass
def test_period_weekly(self):
pass
def test_normalize_date():
value = date(2012, 9, 7)
result = normalize_date(value)
assert(result == datetime(2012, 9, 7))
value = datetime(2012, 9, 7, 12)
result = normalize_date(value)
assert(result == datetime(2012, 9, 7))
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| apache-2.0 |
henrykironde/scikit-learn | examples/feature_selection/plot_feature_selection.py | 249 | 2827 | """
===============================
Univariate Feature Selection
===============================
An example showing univariate feature selection.
Noisy (non informative) features are added to the iris data and
univariate feature selection is applied. For each feature, we plot the
p-values for the univariate feature selection and the corresponding
weights of an SVM. We can see that univariate feature selection
selects the informative features and that these have larger SVM weights.
In the total set of features, only the 4 first ones are significant. We
can see that they have the highest score with univariate feature
selection. The SVM assigns a large weight to one of these features, but also
Selects many of the non-informative features.
Applying univariate feature selection before the SVM
increases the SVM weight attributed to the significant features, and will
thus improve classification.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets, svm
from sklearn.feature_selection import SelectPercentile, f_classif
###############################################################################
# import some data to play with
# The iris dataset
iris = datasets.load_iris()
# Some noisy data not correlated
E = np.random.uniform(0, 0.1, size=(len(iris.data), 20))
# Add the noisy data to the informative features
X = np.hstack((iris.data, E))
y = iris.target
###############################################################################
plt.figure(1)
plt.clf()
X_indices = np.arange(X.shape[-1])
###############################################################################
# Univariate feature selection with F-test for feature scoring
# We use the default selection function: the 10% most significant features
selector = SelectPercentile(f_classif, percentile=10)
selector.fit(X, y)
scores = -np.log10(selector.pvalues_)
scores /= scores.max()
plt.bar(X_indices - .45, scores, width=.2,
label=r'Univariate score ($-Log(p_{value})$)', color='g')
###############################################################################
# Compare to the weights of an SVM
clf = svm.SVC(kernel='linear')
clf.fit(X, y)
svm_weights = (clf.coef_ ** 2).sum(axis=0)
svm_weights /= svm_weights.max()
plt.bar(X_indices - .25, svm_weights, width=.2, label='SVM weight', color='r')
clf_selected = svm.SVC(kernel='linear')
clf_selected.fit(selector.transform(X), y)
svm_weights_selected = (clf_selected.coef_ ** 2).sum(axis=0)
svm_weights_selected /= svm_weights_selected.max()
plt.bar(X_indices[selector.get_support()] - .05, svm_weights_selected,
width=.2, label='SVM weights after selection', color='b')
plt.title("Comparing feature selection")
plt.xlabel('Feature number')
plt.yticks(())
plt.axis('tight')
plt.legend(loc='upper right')
plt.show()
| bsd-3-clause |
wiedenkje/cyberexplorer | cyberexplorer/cyberexplorer/Select.py | 1 | 1732 | import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import pandas as pd
import math
import numpy as np
import sys
import os
import h5py
import csv
#class needed to load a specified data set
class Select:
'''#Function to open a data set at the location
def Open(self, Location):
"""
Open the preprocessed data
"""
new_data = {}
try:
for x in os.listdir(Location):
filename, file_extension = os.path.splitext(x)
if file_extension == '.h5':
df = pd.read_hdf(Location + x, dtype=np.int32)
new_data[filename] = df
return new_data
except:
print("We could not read the data")'''
#Opens the data set on the provided location
def Open(self, Location):
'''
Opens the data set on the provided location
'''
new_data = {}
new_list = {}
try:
for x in os.listdir(Location):
filename, file_extension = os.path.splitext(x)
if file_extension == '.h5':
h5f = h5py.File(Location + x,'r')
b = h5f['dataset_1'][:]
b = np.array(b, dtype=np.int64)
h5f.close()
new_data[filename] = b
del b
if file_extension == '.txt':
with open(Location + x, "r") as Input:
for line in Input:
lijst = line.strip().split(',')
new_list[filename] = lijst
except:
print("We could not read the data")
return new_data, new_list
| mit |
annahs/atmos_research | WHI_2012_display_all_HYSPLIT_BTs_colored_by_cluster.py | 1 | 5107 | import matplotlib.pyplot as plt
import matplotlib.lines as mlines
import numpy as np
import os
import sys
from pprint import pprint
from datetime import datetime
from datetime import timedelta
import pickle
import copy
from mpl_toolkits.basemap import Basemap
import matplotlib.colors
import shutil
GBPS_lat_min = 48.0
GBPS_lat_max = 49.4
GBPS_lon_min = -123.5
GBPS_lon_max = -122
GBPS_lat_min_2 = 47.0
GBPS_lat_max_2 = 48
GBPS_lon_min_2 = -122.75
GBPS_lon_max_2 = -122
timezone = 1
endpointsPARIS = []
clusters = []
cluster_endpoints={}
for number in [1,2,3,4,5,6,7]:
cluster_endpoints[number]=[]
clusters.append(number)
GBPS_count = []
CLUSLIST_file = 'C:/hysplit4/working/WHI/2hrly_HYSPLIT_files/all_with_sep_GBPS/CLUSLIST_6-mod-precip'
CLUSLIST_file = 'C:/Users/Sarah Hanna/Documents/Data/WHI long term record/HYSPLIT/clustering/CLUSLIST_10'
new_cluslist_data = []
with open(CLUSLIST_file,'r') as f:
for line in f:
newline_c = line.split()
cluster = int(newline_c[0])
file = newline_c[7]
if cluster == 6:
cluster = 4
tdump_file = open(file, 'r')
endpoints = []
data_start = False
i = 0
significant_rainfall = 0
GBPS = 0
rainy = False
total_rainfall = 0
for line in tdump_file:
newline = line.split()
if data_start == True:
lat = float(newline[9])
lon = float(newline[10])
pressure = float(newline[11])
rainfall = float(newline[13]) #in mm/hour
total_rainfall = total_rainfall + rainfall
if (GBPS_lat_min <= lat <=GBPS_lat_max) and (GBPS_lon_min <= lon <= GBPS_lon_max):
GBPS+=1
if (GBPS_lat_min_2 <= lat <=GBPS_lat_max_2) and (GBPS_lon_min_2 <= lon <= GBPS_lon_max_2):
GBPS+=1
if rainfall >= 1. and i <= 72:
significant_rainfall +=1
else:
significant_rainfall = 0
if significant_rainfall >= 2:
rainy = True
endpoint = [lat, lon, rainfall]
endpoints.append(endpoint)
i+=1
if newline[1] == 'PRESSURE':
data_start = True
tdump_file.close()
newline_c.append(total_rainfall)
new_cluslist_data.append(newline_c)
if rainy == True:
cluster_endpoints[7].append(endpoints)
elif GBPS >= 24:
#cluster_endpoints[cluster].append(endpoints)
#shutil.move(file, 'C:/hysplit4/working/WHI/2hrly_GBPS_tdump/')
cluster_endpoints[6].append(endpoints)
GBPS_count.append(GBPS)
else:
cluster_endpoints[cluster].append(endpoints)
print len(GBPS_count)
print 'mean hours in GBPS', np.mean(GBPS_count)
#save new cluslist data to file
os.chdir('C:/hysplit4/working/WHI/2hrly_HYSPLIT_files/all_with_sep_GBPS/')
file = open('CLUSLIST_6-mod-precip_amount_added', 'w')
for row in new_cluslist_data:
line = '\t'.join(str(x) for x in row)
file.write(line + '\n')
file.close()
#plottting
###set up the basemap instance
lat_pt = 52.
lon_pt = -162.
m = Basemap(width=7000000,height=5500000,
rsphere=(6378137.00,6356752.3142),
resolution='l',area_thresh=1000.,projection='lcc',
lat_1=48.,lat_2=55,lat_0=lat_pt,lon_0=lon_pt)
fig, axes = plt.subplots(4,2, figsize=(10, 10), facecolor='w', edgecolor='k')
fig.subplots_adjust(hspace = 0.00, wspace=0.00)
axs = axes.ravel()
for i in [-1,-2]:
axes[-1, i].axis('off')
axes[-2, i].axis('off')
axes[-3, i].axis('off')
axes[-4, i].axis('off')
colors = ['b','g','r','c','m','k','y','#DF7401','#585858','grey','#663300']
air_mass_labels = ['Bering','Northern Coastal/Continental','Northern Pacific','Southern Pacific','Western Pacific/Asia','>= 24hrs in GBPS', 'rainy']
for cluster_no in clusters:
list = cluster_endpoints[cluster_no]
print cluster_no, len(list)
axs[cluster_no-1] = fig.add_subplot(4,2,cluster_no)
m.drawmapboundary(fill_color='white')
m.drawcoastlines()
m.fillcontinents(color='#FFFFBF',lake_color='#ABD9E9',zorder=0)
m.drawcountries()
parallels = np.arange(0.,81,10.)
m.drawparallels(parallels,labels=[False,True,False,False])
meridians = np.arange(10.,351.,20.)
m.drawmeridians(meridians,labels=[False,False,False,True])
for row in list:
np_endpoints = np.array(row)
lats = np_endpoints[:,0]
lons = np_endpoints[:,1]
pressure = np_endpoints[:,2]
x,y = m(lons,lats)
bt = m.scatter(x,y, c=pressure, cmap=plt.get_cmap('jet'),edgecolors='none', marker = 'o' )
#bt = m.plot(x,y,color=colors[cluster_no-1])
cb = plt.colorbar()
plt.text(0.05,0.05,air_mass_labels[cluster_no-1], transform=axs[cluster_no-1].transAxes)
#GBPS box 1
lats = [GBPS_lat_max,GBPS_lat_min,GBPS_lat_min,GBPS_lat_max, GBPS_lat_max]
lons = [GBPS_lon_max,GBPS_lon_max,GBPS_lon_min,GBPS_lon_min, GBPS_lon_max]
x,y = m(lons,lats)
hb = m.plot(x,y, color = 'red',linewidth = 2.0)
#GBPS box 2
lats = [GBPS_lat_max_2,GBPS_lat_min_2,GBPS_lat_min_2,GBPS_lat_max_2, GBPS_lat_max_2]
lons = [GBPS_lon_max_2,GBPS_lon_max_2,GBPS_lon_min_2,GBPS_lon_min_2, GBPS_lon_max_2]
x,y = m(lons,lats)
hb = m.plot(x,y, color = 'red',linewidth = 2.0)
dir = 'C:/Users/Sarah Hanna/Documents/Data/WHI long term record/coatings/HYSPLIT/'
os.chdir(dir)
#plt.savefig('WHI_2012_all_trajectories_colored_by_cluster_6.png', bbox_inches='tight')
plt.show() | mit |
desihub/fiberassign | old/bin/universal_tiling_sequence_images.py | 1 | 3312 | # this produces plots of the tiles periodically in an equal area Sanson Flamsteed projection
# steps gives the number of plots produced, with equal intervals
# the order is initially just the ordering of the tiles (plates) in desi-tiles-full.par
# but can be changed with the function reorder
# the output files are numbered tiling00.png, tiling01.png, ...tiling10.png,...tiling11.png...
# makes is easy to make a movie from them using ImageMagick, for example with
# convert -delay 100 -loop 1 tiling*.png tiling.gif
# the code expects to find the tiling file desi-tiles.par in the same directory
# to run, need to give file listing order of tiles, e.g.
# python universal_tiling_sequence_images.py default_survey_list.txt
# the last is a sequence of tiles between 1 and 28810 from the full list desi-tiles.par
# with typically 10666 lines
#!/usr/bin/env python
import sys
import os
import numpy as N
from pylab import *
import matplotlib.pyplot as P
def readmulticolumn(f,names,types):
for line in f:
line=line.strip()
if line.startswith('#') or len(line)==0:
continue
items=line.split()
for j in range(len(names)):
names[j].append(types[j](items[j]))
return
def reorder(j):
#stub for now. allows us to use an order of the tiles other than the one in
#desi-tiles.par
return tile_order[j]
tileid=list()
ra=list()
dec=list()
passs=list()
in_desi=list()
ebv_med=list()
airmass=list()
exposefac=list()
no=list()
newfig=list()
tilenames=[tileid,no,ra,dec,passs,in_desi]
tiletypes=[str,int,float,float,int,int]
fstandard=open('/project/projectdirs/desi/software/cori/desimodel/0.4/data/footprint/desi-tiles.par','r')
readmulticolumn(fstandard,tilenames,tiletypes)
# get ordering of tile from external file specified in execution command
forder=open(str(sys.argv[1]),'r')
tile_order=list()
for line in forder:
tile_order.append([int(x) for x in line.split()][0])
for i in range(len(tile_order)):
if(i<1000):
print i,tile_order[i]
const=N.pi/180.
print len(tile_order)
len_tile_order=len(tile_order)
steps=30
y=list()
x=list()
color=list()
color_choices=['black','blue','red','green','yellow','magenta']
for k in range(steps):
for j in range(k*len_tile_order/steps,(k+1)*len_tile_order/steps):
i=reorder(j)-1
if 1==1:
this_ra=ra[i]
if this_ra>300:
this_ra-=360.
y.append(dec[i])
if(dec[i]<-20):
print " error", k,j,no[i],ra[i],dec[i],in_desi[i]
exit()
x.append((-this_ra+120.)*N.cos(const*dec[i]))
color.append(color_choices[passs[i]])
fig=P.scatter(x,y,c=color,s=5,edgecolor='none')
angle=[-60,30,120,210,300]
for i in range(5):
this_angle=angle[i]
yy=linspace(-20,90,100)
xx=(-this_angle+120.)*N.cos(const*yy)
newfig.append(plot(xx,yy,color='0.03'))
text(-20,-30,'RA=120')
text(-180,-30,'RA=300')
text(-100,-30,'RA=210')
text(60,-30,'RA=30')
text(140,-30,'RA=-60')
fig.axes.get_xaxis().set_ticks([])
ylabel('DEC (deg)')
if(k<10):
savefig('tiling00'+str(k)+'.png')
elif(k<100):
savefig('tiling0'+str(k)+'.png')
else:
savefig('tiling'+str(k)+'.png')
| bsd-3-clause |
sraising/Bikeley | distance.py | 1 | 7544 | from math import sin, cos, sqrt, atan2, radians
import csv
import pandas as pd
import sqlite3
import time
import ast
import datetime
import numpy as np
import pdb
#from geopy.distance import vincenty
from json import loads
from time import sleep
from urllib2 import Request, urlopen
def des_index(conn_node):
if conn_node[0] == 'B':
return int(conn_node[1:]) - 1
elif conn_node[0] == 'I':
return int(conn_node[1:]) + 12
# https://maps.googleapis.com/maps/api/distancematrix/json?units=imperial&origins=40.6655101,-73.89188969999998&destinations=40.6905615%2C-73.9976592%7C&mode=bicycling&key=AIzaSyA_uROdHUg3zmRIkpPLyZ1P8aJKHSOYvvU
# https://maps.googleapis.com/maps/api/distancematrix/json?units=imperial&origins=37.8745231,-122.2599953&destinations=37.8741759%2C--122.2599632%7C&key=AIzaSyBpoEofda2Svw5-CCBu6-WdVtX2kCYml8k
# rack_lat = [37.8745231,37.8741759,37.8737186,37.8742267,37.874811,
# 37.8760306,37.8756918,37.8757003,37.8755436,37.8750821,37.8735746,
# 37.8737609,37.8739472]
# int_lat = [37.8753531,37.8752048,37.8750948,37.874629,
# 37.8746967,37.8746755,37.8738964,37.8738032,37.8736932,37.8736254,
# 37.8734984,37.8755224,37.8738244,37.8739853,37.8732739,37.8727658,
# 37.8735619,37.8749254,37.8730834,37.8750228,37.8761364,37.8760475,
# 37.8762931,37.8764752]
# rack_long = [-122.2599953,-122.2599632,-122.259534,-122.2588903,
# -122.2588795,-122.2590297,-122.2589654,-122.2583109,-122.2573185,
# -122.258032,-122.2581822,-122.2575921,-122.2565782]
# int_long = [-122.2583324,-122.2594213,-122.2601509,-122.2600758,
# -122.2589493,-122.2578442,-122.2575116,-122.2582841,-122.2598612,
# -122.2587883,-122.2596467,-122.256825,-122.255398,-122.2567874,
# -122.2573024,-122.2586435,-122.2565728,-122.2584826,-122.2579461,
# -122.2567981,-122.2595716,-122.2603333,-122.2585255,-122.2570342]
#locations=[(-122.2599953, 37.8745231), (50.449561, 30.525366)] #(lat,lon) pairs
# rack_loc = zip(rack_lat, rack_long)
# int_loc = zip(int_lat, int_long)
# #locations = rack_loc + int_loc
# print locations
latitude = [37.8745189,37.8741801,37.8737101,37.8743601,37.8746691,37.8758019,
37.8757003,37.875442,37.8750821,37.8737969,37.8738456,37.8738117,37.8753531,
37.8752048,37.8750948,37.8746755,37.8736,37.8734984,37.8755224,37.8738244,
37.8732739,37.8727658,37.8730834,37.8761364,37.8762931]
longitude = [-122.2600489,-122.2599954,-122.259534,-122.2588072,-122.2589492,
-122.2589869,-122.2583967,-122.2573721,-122.258032,-122.2582814,-122.2575224,
-122.2566533,-122.2583324,-122.2594213,-122.2601509,-122.2578442,-122.2587883,
-122.2596467,-122.256825,-122.255398,-122.2573024,-122.2586435,-122.2579461,
-122.2595716,-122.2585255]
locations = zip(latitude, longitude)
dic = {
1: [2,5,15], 2: [1,3,18], 3: [2,17,18],
4: [5,10], 5: [1,4], 6: [13,14,24,25],
7: [13,25], 8: [13,19], 9: [13,16],
10: [4,11,17,23], 11: [10,12,16,21], 12: [11,20,21],
13: [6,7,8,9,14], 14: [6,13,15,24], 15: [1,14,24],
16: [9,11], 17: [3,10,18,22], 18: [2,3,17,22],
19: [8,20,25], 20: [12,19,21], 21: [11,12,20,23],
22: [17,18,23], 23: [10,21,22], 24: [6,14,15,25],
25: [6,7,19,24]
}
for i in range(0, len(dic[1])):
print dic[1][i]
#pdb.set_trace()
#pdb.set_trace()
#https://maps.googleapis.com/maps/api/distancematrix/json?units=imperial&origins=40.6655101,-73.89188969999998&destinations=40.6905615%2C-73.9976592%7C&key=AIzaSyA_uROdHUg3zmRIkpPLyZ1P8aJKHSOYvvU
# orr = []
# orr.append(37.8745231)
# orr.append(-122.2599953)
# des = []
# des.append(37.8741759)
# des.append(-122.2599632)
ls = []
#pdb.set_trace()
# pdb.set_trace()
#print 'https://maps.googleapis.com/maps/api/distancematrix/json?units=imperial&origins={0},{1}&destinations={2}%2C{3}%7C&mode=bicycling&key=AIzaSyBpoEofda2Svw5-CCBu6-WdVtX2kCYml8k'.format(orr[0],orr[1],des[0],des[1])
for i in range(0, len(locations)):
orr = locations[i]
print 'Origin:' + str(orr)
duration = [''] * 25
for j in range(0, len(dic[i+1])):
des_index = dic[i+1][j]
des = locations[des_index-1]
print str(orr) + ' to ' + str(des_index) + ': ' + str(des)
#des_ind_num = des_index(conn_node)
# print 'Destination: ' + str(des)
try:
#request = Request('https://maps.googleapis.com/maps/api/elevation/json?locations={0},{1}&key=AIzaSyA_uROdHUg3zmRIkpPLyZ1P8aJKHSOYvvU'.format(loc[0],loc[1]))
# pdb.set_trace()
request = Request('https://maps.googleapis.com/maps/api/distancematrix/json?units=imperial&origins={0},{1}&destinations={2}%2C{3}%7C&mode=bicycling&key=AIzaSyBpoEofda2Svw5-CCBu6-WdVtX2kCYml8k'.format(orr[0],orr[1],des[0],des[1]))
print request
# https://maps.googleapis.com/maps/api/distancematrix/json?units=imperial&origins=40.6655101,-73.89188969999998&destinations=40.6905615%2C-73.9976592%7C&key=AIzaSyA_uROdHUg3zmRIkpPLyZ1P8aJKHSOYvvU
# request = Request('https://maps.googleapis.com/maps/api/elevation/json?locations={0},{1}&key=AIzaSyBpoEofda2Svw5-CCBu6-WdVtX2kCYml8k'.format(loc[0],loc[1]))
response = urlopen(request).read()
places = loads(response)
dur = str(places['rows'][0]['elements'][0]['distance']['text'])
print 'duration is: ' + dur
#print 'At {0} elevation is: {1}'.format(loc, places['results'][0]['elevation'])
sleep(1)
duration[des_index-1] = dur
except:
print 'Error for location: {0}'.format(orr[0])
ls.append(duration)
with open("distance_output_11_15.csv", "wb") as f:
writer = csv.writer(f)
writer.writerows(ls)
pdb.set_trace()
# newport_ri = (37.8745231, -122.2599953)
# cleveland_oh = (37.8741759, -122.2599632)
# print(vincenty(newport_ri, cleveland_oh).miles)
# pdb.set_trace()
df = pd.read_csv("testbed.csv")
# R = 6371.0
# lat1 = df["Latitude"]
# lat2 = df.iloc[0,1]
# lon1 = df["Longitude"]
# lon2 = df.iloc[0,2]
# dlat = radians(abs(lat2 - lat1))
# dlon = radians(abs(lon2 - lon1))
#a = sin(radians(abs(df.iloc[0,1] - df["Latitude"]))/2)**2 + cos(df.iloc[0,1]) * sin(radians(abs(df.iloc[0,2] - df["Longitude"]))/2)**2
# c = R * 2 * atan2(sqrt(sin(radians(abs(df.iloc[0,1] - df["Latitude"]))/2)**2 + cos(df.iloc[0,1])
# * sin(radians(abs(df.iloc[0,2] - df["Longitude"]))/2)**2),
# sqrt(1-(sin(radians(abs(df.iloc[0,1] - df["Latitude"]))/2)**2 + cos(df.iloc[0,1])
# * sin(radians(abs(df.iloc[0,2] - df["Longitude"]))/2)**2)))
# distance = R*c
# df["B1"] = df["B1"].astype(float)
# pdb.set_trace()
# df["B1"] = R * 2 * cos(df.iloc[0,1])
# pdb.set_trace()
rad = 0.0174533
df["B1"] = R * sqrt ((rad * (df.iloc[0,2] - df["Longitude"]) * cos(0.5 * rad * (df.iloc[0,1] + df["Latitude"])))**2 *
(df.iloc[0,1] - df["Latitude"]))**2
pdb.set_trace()
df["B1"] = R * 2 * float(atan2(sqrt(sin(radians(abs(df.iloc[0,1] - df["Latitude"]))/2)**2 + cos(df.iloc[0,1])
* sin(radians(abs(df.iloc[0,2] - df["Longitude"]))/2)**2),
sqrt(1-(sin(radians(abs(df.iloc[0,1] - df["Latitude"]))/2)**2 + cos(df.iloc[0,1])
* sin(radians(abs(df.iloc[0,2] - df["Longitude"]))/2)**2))))
# def distance_calc(x):
# lat1 = df["Latitude"]
# lat2 = df.iloc[0,1]
# lon1 = df["Longitude"]
# lon2 = df.iloc[0,2]
# return lat1-lat2
pdb.set_trace()
#-------------------------------------------------
# # to sql
# current_time = "testbed_db"
# conn = sqlite3.connect(current_time + '.db')
# c = conn.cursor()
# df.to_sql("raw", conn, if_exists = 'replace')
#------------------------------------------------- | gpl-3.0 |
schets/lwan | tools/benchmark.py | 6 | 4379 | #!/usr/bin/python
import sys
import json
import commands
import time
try:
import matplotlib.pyplot as plt
except ImportError:
plt = None
def clearstderrline():
sys.stderr.write('\033[2K')
def weighttp(url, n_threads, n_connections, n_requests, keep_alive):
keep_alive = '-k' if keep_alive else ''
command = 'weighttp %(keep_alive)s ' \
'-t %(n_threads)d ' \
'-c %(n_connections)d ' \
'-n %(n_requests)d ' \
'-j ' \
'%(url)s 2> /dev/null' % locals()
clearstderrline()
sys.stderr.write('*** %s\r' % command)
output = commands.getoutput(command)
return json.loads(output)
def weighttp_has_json_output():
output = commands.getoutput('weighttp -j')
return not 'unknown option: -j' in output
def steprange(initial, final, steps=10):
step = (final - initial) / steps
while initial <= final:
yield initial
initial += step
def sleepwithstatus(msg, period):
slept = 0
spinner = 0
while slept <= period:
clearstderrline()
sys.stderr.write('\r%s: %s' % (msg, '/|\\-'[spinner % 4]))
time.sleep(0.1)
slept += 0.1
spinner += 1
sys.stderr.write('\r')
clearstderrline()
def cmdlineboolarg(arg):
has_arg = False
if arg in sys.argv:
has_arg = True
sys.argv.remove(arg)
return has_arg
def cmdlineintarg(arg, default=0):
value = default
if arg in sys.argv:
index = sys.argv.index(arg)
del sys.argv[index]
try:
value = int(sys.argv[index])
except ValueError:
print 'Argument is of invalid type for argument %s, assuming default (%d)' % (arg, default)
finally:
del sys.argv[index]
return value
class CSVOutput:
def header(self):
print 'keep_alive,n_connections,rps,kbps,2xx,3xx,4xx,5xx'
def footer(self):
clearstderrline()
def log(self, keep_alive, n_connections, rps, kbps, _2xx, _3xx, _4xx, _5xx):
clearstderrline()
print ','.join(str(token) for token in
(int(keep_alive), n_connections, rps, kbps, _2xx, _3xx, _4xx, _5xx))
class MatplotlibOutput:
def __init__(self, xkcd=False):
self.xkcd = xkcd
def header(self):
self.n_connections = []
self.rps = {'keep-alive': [], 'close': []}
def _plot(self):
plt.xlabel('# connections')
plt.ylabel('Requests/s')
n_connections = self.n_connections[:len(self.rps['close'])]
plt.plot(n_connections, self.rps['keep-alive'], label='Keep-Alive')
plt.plot(n_connections, self.rps['close'], label='Close',
marker='o', linestyle='--', color='r')
plt.title('Web Server Benchmark')
plt.legend()
plt.show()
def footer(self):
if self.xkcd:
with plt.xkcd():
self._plot()
else:
self._plot()
def log(self, keep_alive, n_connections, rps, kbps, _2xx, _3xx, _4xx, _5xx):
self.n_connections.append(n_connections)
if keep_alive:
self.rps['keep-alive'].append(rps)
else:
self.rps['close'].append(rps)
if __name__ == '__main__':
if not weighttp_has_json_output():
print 'This script requires a special version of weighttp which supports JSON'
print 'output. Get it at http://github.com/lpereira/weighttp'
sys.exit(1)
plot = cmdlineboolarg('--plot')
xkcd = cmdlineboolarg('--xkcd')
n_threads = cmdlineintarg('--threads', 2)
n_requests = cmdlineintarg('--request', 1000000)
keep_alive_timeout = cmdlineintarg('--keep-alive-timeout', 5)
n_conn_start = cmdlineintarg('--start-conn', 100)
n_conn_end = cmdlineintarg('--end-conn', 60000)
n_conn_step = cmdlineintarg('--conn-step', 10)
url = sys.argv[-1] if len(sys.argv) > 1 else 'http://localhost:8080/100.html'
if plt is None:
if plot:
print 'Matplotlib not installed!'
sys.exit(1)
output = CSVOutput()
elif plot:
output = MatplotlibOutput(xkcd)
else:
output = CSVOutput()
output.header()
for keep_alive in (True, False):
for n_connections in steprange(n_conn_start, n_conn_end, n_conn_step):
results = weighttp(url, n_threads, n_connections, n_requests, keep_alive)
status = results['status_codes']
output.log(keep_alive, n_connections, results['reqs_per_sec'],
results['kbyte_per_sec'], status['2xx'], status['3xx'],
status['4xx'], status['5xx'])
sleepwithstatus('Waiting for keepalive connection timeout', keep_alive_timeout * 1.1)
output.footer()
| gpl-2.0 |
hantek/pylearn2 | pylearn2/gui/tangent_plot.py | 44 | 1730 | """
Code for plotting curves with tangent lines.
"""
__author__ = "Ian Goodfellow"
try:
from matplotlib import pyplot
except Exception:
pyplot = None
from theano.compat.six.moves import xrange
def tangent_plot(x, y, s):
"""
Plots a curve with tangent lines.
Parameters
----------
x : list
List of x coordinates.
Assumed to be sorted into ascending order, so that the tangent
lines occupy 80 percent of the horizontal space between each pair
of points.
y : list
List of y coordinates
s : list
List of slopes
"""
assert isinstance(x, list)
assert isinstance(y, list)
assert isinstance(s, list)
n = len(x)
assert len(y) == n
assert len(s) == n
if pyplot is None:
raise RuntimeError("Could not import pyplot, can't run this code.")
pyplot.plot(x, y, color='b')
if n == 0:
pyplot.show()
return
pyplot.hold(True)
# Add dummy entries so that the for loop can use the same code on every
# entry
if n == 1:
x = [x[0] - 1] + x[0] + [x[0] + 1.]
else:
x = [x[0] - (x[1] - x[0])] + x + [x[-2] + (x[-1] - x[-2])]
y = [0.] + y + [0]
s = [0.] + s + [0]
for i in xrange(1, n + 1):
ld = 0.4 * (x[i] - x[i - 1])
lx = x[i] - ld
ly = y[i] - ld * s[i]
rd = 0.4 * (x[i + 1] - x[i])
rx = x[i] + rd
ry = y[i] + rd * s[i]
pyplot.plot([lx, rx], [ly, ry], color='g')
pyplot.show()
if __name__ == "__main__":
# Demo by plotting a quadratic function
import numpy as np
x = np.arange(-5., 5., .1)
y = 0.5 * (x ** 2)
x = list(x)
y = list(y)
tangent_plot(x, y, x)
| bsd-3-clause |
askhl/ase | ase/test/dependency_matplotlib.py | 4 | 1052 | import sys
msg = "\nThe matplotlib python module is missing or not installed properly.\n"
msg += "Is the PYTHONPATH environment variable set correctly?\n"
msg += "Please verify your installation by running on the command line:\n"
msg += "python -c 'import matplotlib'\n"
msg += "\n"
msg += "This module is optional and required in order to use "
msg += "ASE's simple GUI (ase-gui).\n"
msg += "If you don't wish to use ase-gui ignore this error, otherwise\n"
msg += "please install the package using "
msg += "your distribution package manager, i.e.:\n"
msg += "\n"
msg += " Debian/Ubuntu: sudo apt-get python-matplotlib\n"
msg += "\n"
msg += " OpenSUSE: yast -i python-matplotlib\n"
msg += "\n"
msg += " Red Hat/Fedora: yum install python-matplotlib\n"
msg += "\n"
msg += "or perform manual installation, preferably as non-root user,\n"
msg += "following http://matplotlib.sourceforge.net/users/installing.html."
if locals().get('display'):
try:
import matplotlib
except ImportError:
print >> sys.stderr, msg
raise
| gpl-2.0 |
jviada/QuantEcon.py | examples/preim1.py | 7 | 1294 | """
QE by Tom Sargent and John Stachurski.
Illustrates preimages of functions
"""
import matplotlib.pyplot as plt
import numpy as np
def f(x):
return 0.6 * np.cos(4 * x) + 1.4
xmin, xmax = -1, 1
x = np.linspace(xmin, xmax, 160)
y = f(x)
ya, yb = np.min(y), np.max(y)
fig, axes = plt.subplots(2, 1, figsize=(8, 8))
for ax in axes:
# Set the axes through the origin
for spine in ['left', 'bottom']:
ax.spines[spine].set_position('zero')
for spine in ['right', 'top']:
ax.spines[spine].set_color('none')
ax.set_ylim(-0.6, 3.2)
ax.set_xlim(xmin, xmax)
ax.set_yticks(())
ax.set_xticks(())
ax.plot(x, y, 'k-', lw=2, label=r'$f$')
ax.fill_between(x, ya, yb, facecolor='blue', alpha=0.05)
ax.vlines([0], ya, yb, lw=3, color='blue', label=r'range of $f$')
ax.text(0.04, -0.3, '$0$', fontsize=16)
ax = axes[0]
ax.legend(loc='upper right', frameon=False)
ybar = 1.5
ax.plot(x, x * 0 + ybar, 'k--', alpha=0.5)
ax.text(0.05, 0.8 * ybar, r'$y$', fontsize=16)
for i, z in enumerate((-0.35, 0.35)):
ax.vlines(z, 0, f(z), linestyle='--', alpha=0.5)
ax.text(z, -0.2, r'$x_{}$'.format(i), fontsize=16)
ax = axes[1]
ybar = 2.6
ax.plot(x, x * 0 + ybar, 'k--', alpha=0.5)
ax.text(0.04, 0.91 * ybar, r'$y$', fontsize=16)
plt.show()
| bsd-3-clause |
ctn-waterloo/nengo_theano | nengo_theano/test/test_enc.py | 1 | 1980 | """This is a file to test the encoders parameter on ensembles"""
import math
import time
import numpy as np
import matplotlib.pyplot as plt
import nengo_theano as nef
build_time_start = time.time()
timesteps = 10000
dt_step = 0.001
net = nef.Network('Encoder Test', dt=dt_step)
net.make_input('in1', math.sin)
net.make_input('in2', math.cos)
net.make('A', 100, 1)
net.make('B', 100, 1, encoders=[[1]], intercept=(0, 1.0))
net.make('C', 100, 2, radius=1.5)
net.make('D', 100, 2, encoders=[[1,1],[1,-1],[-1,-1],[-1,1]], radius=1.5)
net.make('outputC', 1, 1, mode='direct')
net.make('outputD', 1, 1, mode='direct')
net.connect('in1', 'A')
net.connect('A', 'B')
net.connect('in1', 'C', index_post=[0])
net.connect('in2', 'C', index_post=[1])
net.connect('in1', 'D', index_post=[0])
net.connect('in2', 'D', index_post=[1])
def prod(x): return x[0] * x[1]
net.connect('C', 'outputC', func=prod)
net.connect('D', 'outputD', func=prod)
t = np.linspace(dt_step, timesteps*dt_step, timesteps)
pstc = 0.01
I1p = net.make_probe('in1', dt_sample=dt_step, pstc=pstc)
I2p = net.make_probe('in2', dt_sample=dt_step, pstc=pstc)
Ap = net.make_probe('A', dt_sample=dt_step, pstc=pstc)
Bp = net.make_probe('B', dt_sample=dt_step, pstc=pstc)
Cp = net.make_probe('outputC', dt_sample=dt_step, pstc=pstc)
Dp = net.make_probe('outputD', dt_sample=dt_step, pstc=pstc)
build_time_end = time.time()
print "starting simulation"
net.run(timesteps * dt_step)
sim_time_end = time.time()
print "\nBuild time: %0.10fs" % (build_time_end - build_time_start)
print "Sim time: %0.10fs" % (sim_time_end - build_time_end)
plt.ioff(); plt.close(); plt.hold(1)
plt.subplot(211)
plt.plot(t, I1p.get_data())
plt.plot(t, Ap.get_data())
plt.plot(t, Bp.get_data())
plt.legend(['Input', 'A', 'B'])
plt.subplot(212); plt.title('Multiplication test')
plt.plot(t, I1p.get_data() * I2p.get_data())
plt.plot(t, Cp.get_data())
plt.plot(t, Dp.get_data())
plt.legend(['Answer', 'C', 'D'])
plt.tight_layout()
plt.show()
| mit |
tmills/neural-assertion | scripts/keras/multitask/assertion_multinetwork_cv.py | 1 | 5307 | #!/usr/bin/env python
#from keras.datasets import mnist
from keras.models import Sequential, model_from_json
from keras.layers import Dense, Dropout, Activation
from keras.optimizers import SGD
from keras.utils import np_utils
#from sklearn.datasets import load_svmlight_file
import sklearn as sk
import sklearn.cross_validation
import numpy as np
import cleartk_io as ctk_io
import nn_models
import os, os.path
import sys
import tempfile
num_folds = 10
batch_size = 64
nb_epoch = 20
layers = (64, 256, 256)
def get_data():
#data = load_svmlight_file("polarity.liblinear")
#return data[0][:, 1:].toarray(), data[1]-1
return ctk_io.read_multitask_liblinear('data_testing/multitask_assertion/train_and_test')
def get_f(r, p):
if r+p == 0:
return 0
return 2 * r * p / (r + p)
def main(args):
working_dir = args[0]
print("Reading data...")
Y, X = ctk_io.read_multitask_liblinear(working_dir) # get_data()
num_examples, dimension = X.shape
num_y_examples, num_labels = Y.shape
assert num_examples == num_y_examples
print("Data has %d examples and dimension %d" % (num_examples, dimension) )
print("Output has %d dimensions" % (num_labels) )
Y_adj, indices = ctk_io.flatten_outputs(Y)
print("%d labels mapped to %d outputs based on category numbers" % (Y.shape[1], Y_adj.shape[1]) )
label_scores = []
for label_ind in range(0, Y.shape[1]):
num_outputs = indices[label_ind+1] - indices[label_ind]
# model = models.get_mlp_model(dimension, num_outputs)
print("Starting to train for label %d with %d outputs" % (label_ind, num_outputs) )
folds = sk.cross_validation.KFold(num_examples, n_folds=num_folds)
scores = []
total_tp = 0
total_fp = 0
total_fn = 0
fold_ind = 0
total_score = 0
for train_indices, test_indices in folds:
print("Starting fold %d" % fold_ind)
train_x = X[train_indices]
train_y = Y_adj[train_indices, int(indices[label_ind]):int(indices[label_ind+1])]
test_x = X[test_indices]
test_y = Y_adj[test_indices, int(indices[label_ind]):int(indices[label_ind+1])]
model = nn_models.get_mlp_model(dimension, num_outputs)
model.fit(train_x, train_y,
nb_epoch=nb_epoch,
batch_size=batch_size)
### This was to test model reading/writing and it works fine.
# temp_dir = tempfile.mkdtemp()
# json_string = model.to_json()
# open(os.path.join(temp_dir, 'model_%d.json' % label_ind), 'w').write(json_string)
# model.save_weights(os.path.join(temp_dir, 'model_%d.h5' % label_ind), overwrite=True)
#
# model = None
#
# model = model_from_json(open(os.path.join(temp_dir, "model_%d.json" % label_ind)).read())
# model.load_weights(os.path.join(temp_dir, "model_%d.h5" % label_ind))
if num_outputs == 1:
labels = test_y
predictions = model.predict_classes(test_x, batch_size=batch_size)
# labels = np.reshape(test_y, (len(test_y),1))
## count up true positive occurrences where prediction = label = 1 aka prediction + label == 2
tp = len(np.where((predictions + labels) == 2)[0])
total_tp += tp
## false positives: prediction - label = 1
fp = len(np.where((predictions - labels) == 1)[0])
total_fp += fp
## false negatives: label - prediction = 1
fn = len(np.where((labels - predictions) == 1)[0])
total_fn += fn
print("tp=%d, fp=%d, fn=%d" % (tp, fp, fn) )
recall = tp / float(tp + fn) if tp > 0 else 0
precision = tp / float(tp + fp) if tp > 0 else 1
f1 = get_f(recall, precision)
print("P=%f, R=%f, F1=%f" % (precision, recall, f1) )
else:
score = model.evaluate(test_x, test_y, batch_size=batch_size)
print("score=%s" % (score) )
total_score += score[1]
# score = model.evaluate(test_x, test_y, show_accuracy=True, batch_size=batch_size)
# print("Scores for fold %d:" % fold_ind)
# print("test score: ", score[0])
# print("test accuracy: " , score[1])
fold_ind += 1
if num_outputs == 1:
recall = total_tp / float(total_tp + total_fn)
precision = total_tp / float(total_tp + total_fp)
f1 = get_f(recall, precision)
print("Overall total: P=%f, R=%f, F=%f" % (recall, precision, f1) )
label_scores.append(f1)
else:
total_score /= num_folds
print("Overall accuracy = %f" % (total_score) )
label_scores.append(total_score)
for ind, val in enumerate(label_scores):
print("%s of label %d is %f" % ("Fscore" if num_outputs==2 else "Accuracy", ind, val) )
if __name__ == "__main__":
main(sys.argv[1:])
| apache-2.0 |
hrjn/scikit-learn | sklearn/neighbors/unsupervised.py | 29 | 4756 | """Unsupervised nearest neighbors learner"""
from .base import NeighborsBase
from .base import KNeighborsMixin
from .base import RadiusNeighborsMixin
from .base import UnsupervisedMixin
class NearestNeighbors(NeighborsBase, KNeighborsMixin,
RadiusNeighborsMixin, UnsupervisedMixin):
"""Unsupervised learner for implementing neighbor searches.
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
Parameters
----------
n_neighbors : int, optional (default = 5)
Number of neighbors to use by default for :meth:`kneighbors` queries.
radius : float, optional (default = 1.0)
Range of parameter space to use by default for :meth:`radius_neighbors`
queries.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
p : integer, optional (default = 2)
Parameter for the Minkowski metric from
sklearn.metrics.pairwise.pairwise_distances. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric : string or callable, default 'minkowski'
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_params : dict, optional (default = None)
Additional keyword arguments for the metric function.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Affects only :meth:`kneighbors` and :meth:`kneighbors_graph` methods.
Examples
--------
>>> import numpy as np
>>> from sklearn.neighbors import NearestNeighbors
>>> samples = [[0, 0, 2], [1, 0, 0], [0, 0, 1]]
>>> neigh = NearestNeighbors(2, 0.4)
>>> neigh.fit(samples) #doctest: +ELLIPSIS
NearestNeighbors(...)
>>> neigh.kneighbors([[0, 0, 1.3]], 2, return_distance=False)
... #doctest: +ELLIPSIS
array([[2, 0]]...)
>>> nbrs = neigh.radius_neighbors([[0, 0, 1.3]], 0.4, return_distance=False)
>>> np.asarray(nbrs[0][0])
array(2)
See also
--------
KNeighborsClassifier
RadiusNeighborsClassifier
KNeighborsRegressor
RadiusNeighborsRegressor
BallTree
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
https://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, n_neighbors=5, radius=1.0,
algorithm='auto', leaf_size=30, metric='minkowski',
p=2, metric_params=None, n_jobs=1, **kwargs):
self._init_params(n_neighbors=n_neighbors,
radius=radius,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params, n_jobs=n_jobs, **kwargs)
| bsd-3-clause |
shyamalschandra/scikit-learn | sklearn/neural_network/rbm.py | 46 | 12303 | """Restricted Boltzmann Machine
"""
# Authors: Yann N. Dauphin <dauphiya@iro.umontreal.ca>
# Vlad Niculae
# Gabriel Synnaeve
# Lars Buitinck
# License: BSD 3 clause
import time
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator
from ..base import TransformerMixin
from ..externals.six.moves import xrange
from ..utils import check_array
from ..utils import check_random_state
from ..utils import gen_even_slices
from ..utils import issparse
from ..utils.extmath import safe_sparse_dot
from ..utils.extmath import log_logistic
from ..utils.fixes import expit # logistic function
from ..utils.validation import check_is_fitted
class BernoulliRBM(BaseEstimator, TransformerMixin):
"""Bernoulli Restricted Boltzmann Machine (RBM).
A Restricted Boltzmann Machine with binary visible units and
binary hidden units. Parameters are estimated using Stochastic Maximum
Likelihood (SML), also known as Persistent Contrastive Divergence (PCD)
[2].
The time complexity of this implementation is ``O(d ** 2)`` assuming
d ~ n_features ~ n_components.
Read more in the :ref:`User Guide <rbm>`.
Parameters
----------
n_components : int, optional
Number of binary hidden units.
learning_rate : float, optional
The learning rate for weight updates. It is *highly* recommended
to tune this hyper-parameter. Reasonable values are in the
10**[0., -3.] range.
batch_size : int, optional
Number of examples per minibatch.
n_iter : int, optional
Number of iterations/sweeps over the training dataset to perform
during training.
verbose : int, optional
The verbosity level. The default, zero, means silent mode.
random_state : integer or numpy.RandomState, optional
A random number generator instance to define the state of the
random permutations generator. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
Attributes
----------
intercept_hidden_ : array-like, shape (n_components,)
Biases of the hidden units.
intercept_visible_ : array-like, shape (n_features,)
Biases of the visible units.
components_ : array-like, shape (n_components, n_features)
Weight matrix, where n_features in the number of
visible units and n_components is the number of hidden units.
Examples
--------
>>> import numpy as np
>>> from sklearn.neural_network import BernoulliRBM
>>> X = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]])
>>> model = BernoulliRBM(n_components=2)
>>> model.fit(X)
BernoulliRBM(batch_size=10, learning_rate=0.1, n_components=2, n_iter=10,
random_state=None, verbose=0)
References
----------
[1] Hinton, G. E., Osindero, S. and Teh, Y. A fast learning algorithm for
deep belief nets. Neural Computation 18, pp 1527-1554.
http://www.cs.toronto.edu/~hinton/absps/fastnc.pdf
[2] Tieleman, T. Training Restricted Boltzmann Machines using
Approximations to the Likelihood Gradient. International Conference
on Machine Learning (ICML) 2008
"""
def __init__(self, n_components=256, learning_rate=0.1, batch_size=10,
n_iter=10, verbose=0, random_state=None):
self.n_components = n_components
self.learning_rate = learning_rate
self.batch_size = batch_size
self.n_iter = n_iter
self.verbose = verbose
self.random_state = random_state
def transform(self, X):
"""Compute the hidden layer activation probabilities, P(h=1|v=X).
Parameters
----------
X : {array-like, sparse matrix} shape (n_samples, n_features)
The data to be transformed.
Returns
-------
h : array, shape (n_samples, n_components)
Latent representations of the data.
"""
check_is_fitted(self, "components_")
X = check_array(X, accept_sparse='csr', dtype=np.float64)
return self._mean_hiddens(X)
def _mean_hiddens(self, v):
"""Computes the probabilities P(h=1|v).
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer.
Returns
-------
h : array-like, shape (n_samples, n_components)
Corresponding mean field values for the hidden layer.
"""
p = safe_sparse_dot(v, self.components_.T)
p += self.intercept_hidden_
return expit(p, out=p)
def _sample_hiddens(self, v, rng):
"""Sample from the distribution P(h|v).
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer to sample from.
rng : RandomState
Random number generator to use.
Returns
-------
h : array-like, shape (n_samples, n_components)
Values of the hidden layer.
"""
p = self._mean_hiddens(v)
return (rng.random_sample(size=p.shape) < p)
def _sample_visibles(self, h, rng):
"""Sample from the distribution P(v|h).
Parameters
----------
h : array-like, shape (n_samples, n_components)
Values of the hidden layer to sample from.
rng : RandomState
Random number generator to use.
Returns
-------
v : array-like, shape (n_samples, n_features)
Values of the visible layer.
"""
p = np.dot(h, self.components_)
p += self.intercept_visible_
expit(p, out=p)
return (rng.random_sample(size=p.shape) < p)
def _free_energy(self, v):
"""Computes the free energy F(v) = - log sum_h exp(-E(v,h)).
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer.
Returns
-------
free_energy : array-like, shape (n_samples,)
The value of the free energy.
"""
return (- safe_sparse_dot(v, self.intercept_visible_)
- np.logaddexp(0, safe_sparse_dot(v, self.components_.T)
+ self.intercept_hidden_).sum(axis=1))
def gibbs(self, v):
"""Perform one Gibbs sampling step.
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer to start from.
Returns
-------
v_new : array-like, shape (n_samples, n_features)
Values of the visible layer after one Gibbs step.
"""
check_is_fitted(self, "components_")
if not hasattr(self, "random_state_"):
self.random_state_ = check_random_state(self.random_state)
h_ = self._sample_hiddens(v, self.random_state_)
v_ = self._sample_visibles(h_, self.random_state_)
return v_
def partial_fit(self, X, y=None):
"""Fit the model to the data X which should contain a partial
segment of the data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
Returns
-------
self : BernoulliRBM
The fitted model.
"""
X = check_array(X, accept_sparse='csr', dtype=np.float64)
if not hasattr(self, 'random_state_'):
self.random_state_ = check_random_state(self.random_state)
if not hasattr(self, 'components_'):
self.components_ = np.asarray(
self.random_state_.normal(
0,
0.01,
(self.n_components, X.shape[1])
),
order='fortran')
if not hasattr(self, 'intercept_hidden_'):
self.intercept_hidden_ = np.zeros(self.n_components, )
if not hasattr(self, 'intercept_visible_'):
self.intercept_visible_ = np.zeros(X.shape[1], )
if not hasattr(self, 'h_samples_'):
self.h_samples_ = np.zeros((self.batch_size, self.n_components))
self._fit(X, self.random_state_)
def _fit(self, v_pos, rng):
"""Inner fit for one mini-batch.
Adjust the parameters to maximize the likelihood of v using
Stochastic Maximum Likelihood (SML).
Parameters
----------
v_pos : array-like, shape (n_samples, n_features)
The data to use for training.
rng : RandomState
Random number generator to use for sampling.
"""
h_pos = self._mean_hiddens(v_pos)
v_neg = self._sample_visibles(self.h_samples_, rng)
h_neg = self._mean_hiddens(v_neg)
lr = float(self.learning_rate) / v_pos.shape[0]
update = safe_sparse_dot(v_pos.T, h_pos, dense_output=True).T
update -= np.dot(h_neg.T, v_neg)
self.components_ += lr * update
self.intercept_hidden_ += lr * (h_pos.sum(axis=0) - h_neg.sum(axis=0))
self.intercept_visible_ += lr * (np.asarray(
v_pos.sum(axis=0)).squeeze() -
v_neg.sum(axis=0))
h_neg[rng.uniform(size=h_neg.shape) < h_neg] = 1.0 # sample binomial
self.h_samples_ = np.floor(h_neg, h_neg)
def score_samples(self, X):
"""Compute the pseudo-likelihood of X.
Parameters
----------
X : {array-like, sparse matrix} shape (n_samples, n_features)
Values of the visible layer. Must be all-boolean (not checked).
Returns
-------
pseudo_likelihood : array-like, shape (n_samples,)
Value of the pseudo-likelihood (proxy for likelihood).
Notes
-----
This method is not deterministic: it computes a quantity called the
free energy on X, then on a randomly corrupted version of X, and
returns the log of the logistic function of the difference.
"""
check_is_fitted(self, "components_")
v = check_array(X, accept_sparse='csr')
rng = check_random_state(self.random_state)
# Randomly corrupt one feature in each sample in v.
ind = (np.arange(v.shape[0]),
rng.randint(0, v.shape[1], v.shape[0]))
if issparse(v):
data = -2 * v[ind] + 1
v_ = v + sp.csr_matrix((data.A.ravel(), ind), shape=v.shape)
else:
v_ = v.copy()
v_[ind] = 1 - v_[ind]
fe = self._free_energy(v)
fe_ = self._free_energy(v_)
return v.shape[1] * log_logistic(fe_ - fe)
def fit(self, X, y=None):
"""Fit the model to the data X.
Parameters
----------
X : {array-like, sparse matrix} shape (n_samples, n_features)
Training data.
Returns
-------
self : BernoulliRBM
The fitted model.
"""
X = check_array(X, accept_sparse='csr', dtype=np.float64)
n_samples = X.shape[0]
rng = check_random_state(self.random_state)
self.components_ = np.asarray(
rng.normal(0, 0.01, (self.n_components, X.shape[1])),
order='fortran')
self.intercept_hidden_ = np.zeros(self.n_components, )
self.intercept_visible_ = np.zeros(X.shape[1], )
self.h_samples_ = np.zeros((self.batch_size, self.n_components))
n_batches = int(np.ceil(float(n_samples) / self.batch_size))
batch_slices = list(gen_even_slices(n_batches * self.batch_size,
n_batches, n_samples))
verbose = self.verbose
begin = time.time()
for iteration in xrange(1, self.n_iter + 1):
for batch_slice in batch_slices:
self._fit(X[batch_slice], rng)
if verbose:
end = time.time()
print("[%s] Iteration %d, pseudo-likelihood = %.2f,"
" time = %.2fs"
% (type(self).__name__, iteration,
self.score_samples(X).mean(), end - begin))
begin = end
return self
| bsd-3-clause |
cdeboever3/cdpybio | tests/star/test_star.py | 1 | 35611 | from copy import deepcopy
import os
from numpy import array
from numpy import nan
import numpy as np
import pandas as pd
from pandas.util.testing import assert_frame_equal
from pandas.util.testing import assert_panel_equal
import pytest
import cdpybio as cpb
# Note: I use pos and neg in this file to refer to the plus and minus strands
# respectively.
# TODO: I might want to include some more tests. I haven't tested whether the
# stats in the statsfiles are correct. I might want to check to make sure the
# results aren't sensitive to strand. I could also test the define_sample_name
# functionality.
def add_root(fn):
return os.path.join(cpb._root, 'tests', 'star', fn)
SJ_NEG_NEW_A = add_root('SJ.out.tab.neg_new_a')
SJ_NEG_NEW_B = add_root('SJ.out.tab.neg_new_b')
SJ_NEG_NONEW_A = add_root('SJ.out.tab.neg_nonew_a')
SJ_NEG_NONEW_B = add_root('SJ.out.tab.neg_nonew_b')
SJ_NEW = add_root('SJ.out.tab.new')
SJ_NEW_A = add_root('SJ.out.tab.new_a')
SJ_NEW_B = add_root('SJ.out.tab.new_b')
SJ_NONEW_A = add_root('SJ.out.tab.nonew_a')
SJ_NONEW_B = add_root('SJ.out.tab.nonew_b')
SJ_UNK_NONEW_A = add_root('SJ.out.tab.unk_nonew_a')
EXT = add_root('ext.tsv')
class TestMisc:
def test_read_ext(self):
vals = [['gene1', 'chr1', 10, 20, '+', 'chr1:10', 'chr1:20',
'chr1:10:+', 'chr1:20:+', 'chr1:10-20'],
['gene1', 'chr1', 5, 25, '+', 'chr1:5', 'chr1:25', 'chr1:5:+',
'chr1:25:+', 'chr1:5-25'],
['gene1', 'chr1', 2, 20, '+', 'chr1:2', 'chr1:20', 'chr1:2:+',
'chr1:20:+', 'chr1:2-20'],
['gene1', 'chr1', 5, 20, '+', 'chr1:5', 'chr1:20', 'chr1:5:+',
'chr1:20:+', 'chr1:5-20'],
['gene2', 'chr2', 10, 20, '-', 'chr2:10', 'chr2:20',
'chr2:20:-', 'chr2:10:-', 'chr2:10-20'],
['gene2', 'chr2', 5, 25, '-', 'chr2:5', 'chr2:25', 'chr2:25:-',
'chr2:5:-', 'chr2:5-25'],
['gene2', 'chr2', 2, 20, '-', 'chr2:2', 'chr2:20', 'chr2:20:-',
'chr2:2:-', 'chr2:2-20'],
['gene2', 'chr2', 5, 20, '-', 'chr2:5', 'chr2:20', 'chr2:20:-',
'chr2:5:-', 'chr2:5-20']]
ind = [u'chr1:10-20:+', u'chr1:5-25:+', u'chr1:2-20:+', u'chr1:5-20:+',
u'chr2:10-20:-', u'chr2:5-25:-', u'chr2:2-20:-', u'chr2:5-20:-']
cols=['gene', 'chrom', 'start', 'end', 'strand', 'chrom:start',
'chrom:end', 'donor', 'acceptor', 'intron']
df = pd.DataFrame(vals, index=ind, columns=cols)
df2, stats = cpb.star.read_external_annotation(EXT)
assert_frame_equal(df, df2)
def test_read_sj_out_pos(self):
vals = [['chr1', 2, 20, '+', 'GT/AG', True, 5, 1, 10],
['chr1', 5, 20, '+', 'GT/AG', True, 20, 1, 14],
['chr1', 5, 25, '+', 'CT/AC', True, 10, 1, 7],
['chr1', 10, 20, '+', 'CT/AC', True, 20, 1, 7]]
cols = [u'chrom', u'start', u'end', u'strand', u'intron_motif',
u'annotated', u'unique_junction_reads',
u'multimap_junction_reads', u'max_overhang']
df = pd.DataFrame(vals, columns=cols)
df2 = cpb.star.read_sj_out_tab(SJ_NONEW_A)
assert_frame_equal(df, df2)
def test_read_sj_out_neg(self):
vals = [['chr2', 2, 20, '-', 'GT/AG', True, 5, 1, 10],
['chr2', 5, 20, '-', 'GT/AG', True, 20, 1, 14],
['chr2', 5, 25, '-', 'CT/AC', True, 10, 1, 7],
['chr2', 10, 20, '-', 'CT/AC', True, 20, 1, 7]]
cols = [u'chrom', u'start', u'end', u'strand', u'intron_motif',
u'annotated', u'unique_junction_reads',
u'multimap_junction_reads', u'max_overhang']
df = pd.DataFrame(vals, columns=cols)
df2 = cpb.star.read_sj_out_tab(SJ_NEG_NONEW_A)
assert_frame_equal(df, df2)
def test_read_sj_out_unk(self):
df = pd.DataFrame([['chr3', 2, 20, 'unk', 'GT/AG', True, 5, 1, 10],
['chr3', 5, 20, 'unk', 'GT/AG', True, 20, 1, 14],
['chr3', 5, 25, 'unk', 'CT/AC', True, 10, 1, 7],
['chr3', 10, 20, 'unk', 'CT/AC', True, 20, 1, 7]],
columns=[u'chrom', u'start',
u'end', u'strand',
u'intron_motif', u'annotated',
u'unique_junction_reads',
u'multimap_junction_reads', u'max_overhang'])
df2 = cpb.star.read_sj_out_tab(SJ_UNK_NONEW_A)
assert_frame_equal(df, df2)
# TODO: I'm running into some kind of error when I compare the dataframes. I
# see some rumbling that there may be some numpy/pandas difficulties so it
# might not be my problem.
# def test_read_log(self):
# ind = [u'Started job on', u'Started mapping on', u'Finished on',
# u'Mapping speed, Million of reads per hour',
# u'Number of input reads', u'Average input read length',
# u'Uniquely mapped reads number', u'Uniquely mapped reads %',
# u'Average mapped length', u'Number of splices: Total',
# u'Number of splices: Annotated (sjdb)',
# u'Number of splices: GT/AG', u'Number of splices: GC/AG',
# u'Number of splices: AT/AC', u'Number of splices: Non-canonical',
# u'Mismatch rate per base, %', u'Deletion rate per base',
# u'Deletion average length', u'Insertion rate per base',
# u'Insertion average length',
# u'Number of reads mapped to multiple loci',
# u'% of reads mapped to multiple loci',
# u'Number of reads mapped to too many loci',
# u'% of reads mapped to too many loci',
# u'% of reads unmapped: too many mismatches',
# u'% of reads unmapped: too short', u'% of reads unmapped: other']
# cols = [add_root('Log.final.out.a')]
# vals= [['Mar 06 17:38:15'], ['Mar 06 17:53:05'], ['Mar 06 20:13:16'],
# ['62.51'], ['146042756'], ['135'], ['103778365'], ['71.06%'],
# ['119.74'], ['37420413'], ['35853326'], ['36980144'], ['351650'],
# ['17910'], ['70709'], ['1.13%'], ['0.01%'], ['1.51'], ['0.01%'],
# ['1.29'], ['42173939'], ['28.88%'], ['536'], ['0.00%'],
# ['0.00%'], ['0.00%'], ['0.06%']]
# df = pd.DataFrame(vals, index=ind, columns=cols)
# df2 = cpb.star._read_log(add_root('Log.final.out.a'))
# assert_frame_equal(df, df2)
# TODO: I'm running into some kind of error when I compare the dataframes. I
# see some rumbling that there may be some numpy/pandas difficulties so it
# might not be my problem.
# def test_make_logs_df(self):
# cols = [u'Started job on', u'Started mapping on', u'Finished on',
# u'Mapping speed, Million of reads per hour',
# u'Number of input reads', u'Average input read length',
# u'Uniquely mapped reads number', u'Uniquely mapped reads %',
# u'Average mapped length', u'Number of splices: Total',
# u'Number of splices: Annotated (sjdb)',
# u'Number of splices: GT/AG', u'Number of splices: GC/AG',
# u'Number of splices: AT/AC', u'Number of splices: Non-canonical',
# u'Mismatch rate per base, %', u'Deletion rate per base',
# u'Deletion average length', u'Insertion rate per base',
# u'Insertion average length',
# u'Number of reads mapped to multiple loci',
# u'% of reads mapped to multiple loci',
# u'Number of reads mapped to too many loci',
# u'% of reads mapped to too many loci',
# u'% of reads unmapped: too many mismatches',
# u'% of reads unmapped: too short', u'% of reads unmapped: other']
# ind = [add_root(x) for x in ['Log.final.out.a', u'Log.final.out.b']]
# vals = [['Mar 06 17:38:15', 'Mar 06 17:53:05', 'Mar 06 20:13:16', 62.51,
# 146042756.0, 135.0, 103778365.0, 71.06, 119.74, 37420413.0,
# '35853326', 36980144.0, 351650.0, 17910.0, 70709.0, 1.13, 0.01,
# '1.51', 0.01, '1.29', 42173939.0, 28.88, 536.0, 0.0, 0.0, 0.0,
# 0.06],
# ['Mar 04 19:39:13', 'Mar 04 19:49:11', 'Mar 04 21:13:01', 84.92,
# 118648978.0, 136.0, 105411961.0, 88.84, 132.3, 30047584.0,
# '29100214', 29616122.0, 351932.0, 21726.0, 57804.0, 0.69, 0.01,
# '1.51', 0.01, '1.25', 13141675.0, 11.08, 951.0, 0.0, 0.0, 0.0,
# 0.08]]
# df = pd.DataFrame(vals, index=ind, columns=cols)
# df2 = cpb.star.make_logs_df(
# [add_root(x) for x in ['Log.final.out.a', 'Log.final.out.b']])
# assert_frame_equal(df, df2)
class TestMakeSJOutDict:
def test_make_sj_out_dict_pos(self):
d = cpb.star._make_sj_out_dict([SJ_NONEW_A,
SJ_NONEW_B])
a = cpb.star.read_sj_out_tab(SJ_NONEW_A)
a.index = a.apply(lambda x: cpb.star._sj_out_junction(x), axis=1)
b = cpb.star.read_sj_out_tab(SJ_NONEW_B)
b.index = b.apply(lambda x: cpb.star._sj_out_junction(x), axis=1)
assert_frame_equal(a, d[SJ_NONEW_A])
assert_frame_equal(b, d[SJ_NONEW_B])
def test_make_sj_out_dict_neg(self):
d = cpb.star._make_sj_out_dict([SJ_NEG_NONEW_A,
SJ_NEG_NONEW_B])
a = cpb.star.read_sj_out_tab(SJ_NEG_NONEW_A)
a.index = a.apply(lambda x: cpb.star._sj_out_junction(x), axis=1)
b = cpb.star.read_sj_out_tab(SJ_NEG_NONEW_B)
b.index = b.apply(lambda x: cpb.star._sj_out_junction(x), axis=1)
assert_frame_equal(a, d[SJ_NEG_NONEW_A])
assert_frame_equal(b, d[SJ_NEG_NONEW_B])
class TestMakeSJOutPanel:
def test_make_sj_out_panel_pos(self):
ind = [u'chr1:5-20', u'chr1:5-25', u'chr1:10-20']
d = cpb.star._make_sj_out_dict([SJ_NONEW_A,
SJ_NONEW_B])
df = d[SJ_NONEW_A].ix[ind, cpb.star.COUNT_COLS]
df2 = d[SJ_NONEW_B].ix[ind, cpb.star.COUNT_COLS]
df2 = df2.fillna(0)
p = pd.Panel({SJ_NONEW_A:df,
SJ_NONEW_B:df2})
p = p.astype(int)
a = pd.DataFrame([['chr1', 5, 20, '+', 'GT/AG', True],
['chr1', 5, 25, '+', 'CT/AC', True],
['chr1', 10, 20, '+', 'CT/AC', True]],
index=ind,
columns=[u'chrom', u'start',
u'end', u'strand', u'intron_motif',
u'annotated'])
p2, a2 = cpb.star._make_sj_out_panel(d)
assert_frame_equal(a, a2)
assert_panel_equal(p, p2)
def test_make_sj_out_panel_neg(self):
ind = [u'chr2:5-20', u'chr2:5-25', u'chr2:10-20']
d = cpb.star._make_sj_out_dict([SJ_NEG_NONEW_A,
SJ_NEG_NONEW_B])
df = d[SJ_NEG_NONEW_A].ix[ind, cpb.star.COUNT_COLS]
df2 = d[SJ_NEG_NONEW_B].ix[ind, cpb.star.COUNT_COLS]
df2 = df2.fillna(0)
p = pd.Panel({SJ_NEG_NONEW_A:df,
SJ_NEG_NONEW_B:df2})
p = p.astype(int)
a = pd.DataFrame([['chr2', 5, 20, '-', 'GT/AG', True],
['chr2', 5, 25, '-', 'CT/AC', True],
['chr2', 10, 20, '-', 'CT/AC', True]],
index=ind,
columns=[u'chrom', u'start',
u'end', u'strand', u'intron_motif',
u'annotated'])
p2, a2 = cpb.star._make_sj_out_panel(d)
assert_frame_equal(a, a2)
assert_panel_equal(p, p2)
def test_new_junctions_pos(self):
ind = [u'chr1:2-25', u'chr1:3-25', u'chr1:5-20', u'chr1:5-30',
u'chr1:10-20', u'chr1:30-40']
d = cpb.star._make_sj_out_dict([SJ_NONEW_A,
SJ_NEW_A])
df = d[SJ_NONEW_A].ix[ind, cpb.star.COUNT_COLS]
df = df.fillna(0)
df2 = d[SJ_NEW_A].ix[ind, cpb.star.COUNT_COLS]
df2 = df2.fillna(0)
p = pd.Panel({SJ_NONEW_A:df,
SJ_NEW_A:df2})
p = p.astype(int)
a = pd.DataFrame(
[['chr1', 2, 25, '+', 'GT/AG', False],
['chr1', 3, 25, '+', 'CT/AC', False],
['chr1', 5, 20, '+', 'GT/AG', True],
['chr1', 5, 30, '+', 'GT/AG', False],
['chr1', 10, 20, '+', 'CT/AC', True],
['chr1', 30, 40, '+', 'CT/AC', False]],
index=ind,
columns=[u'chrom', u'start', u'end', u'strand',
u'intron_motif', u'annotated']
)
p2, a2 = cpb.star._make_sj_out_panel(d)
assert_frame_equal(a, a2)
assert_panel_equal(p, p2)
def test_new_junctions_neg(self):
ind = [u'chr2:2-25', u'chr2:3-25', u'chr2:5-20', u'chr2:5-30',
u'chr2:10-20', u'chr2:30-40']
d = cpb.star._make_sj_out_dict([SJ_NEG_NONEW_A,
SJ_NEG_NEW_A])
df = d[SJ_NEG_NONEW_A].ix[ind, cpb.star.COUNT_COLS]
df = df.fillna(0)
df2 = d[SJ_NEG_NEW_A].ix[ind, cpb.star.COUNT_COLS]
df2 = df2.fillna(0)
p = pd.Panel({SJ_NEG_NONEW_A:df,
SJ_NEG_NEW_A:df2})
p = p.astype(int)
a = pd.DataFrame(
[['chr2', 2, 25, '-', 'GT/AG', False],
['chr2', 3, 25, '-', 'CT/AC', False],
['chr2', 5, 20, '-', 'GT/AG', True],
['chr2', 5, 30, '-', 'GT/AG', False],
['chr2', 10, 20, '-', 'CT/AC', True],
['chr2', 30, 40, '-', 'CT/AC', False]],
index=ind,
columns=[u'chrom', u'start', u'end', u'strand',
u'intron_motif', u'annotated']
)
p2, a2 = cpb.star._make_sj_out_panel(d)
assert_frame_equal(a, a2)
assert_panel_equal(p, p2)
class TestFilterJxnsDonorAcceptor:
def test_filter_jxns_donor_acceptor_pos(self):
d = cpb.star._make_sj_out_dict([SJ_NONEW_A,
SJ_NONEW_B])
p, a = cpb.star._make_sj_out_panel(d)
ext, stats = cpb.star.read_external_annotation(EXT)
c2, a2, stats = cpb.star._filter_jxns_donor_acceptor(p, a, ext)
a = pd.DataFrame(
[['chr1', 5, 20, '+', 'GT/AG', True, True, 'chr1:5',
'chr1:20', 'gene1', 'chr1:5:+', 'chr1:20:+', False, False],
['chr1', 5, 25, '+', 'CT/AC', True, True, 'chr1:5',
'chr1:25', 'gene1', 'chr1:5:+', 'chr1:25:+', False, False],
['chr1', 10, 20, '+', 'CT/AC', True, True, 'chr1:10',
'chr1:20', 'gene1', 'chr1:10:+', 'chr1:20:+', False, False]],
index=[u'chr1:5-20:+', u'chr1:5-25:+', u'chr1:10-20:+'],
columns=[u'chrom', u'start', u'end',
u'strand', u'intron_motif', u'annotated',
u'ext_annotated', u'chrom:start', u'chrom:end',
u'gene_id', u'donor', u'acceptor', u'novel_donor',
u'novel_acceptor'])
c = pd.DataFrame(array([[20, 0],[10, 10],[20, 20]]),
index=[u'chr1:5-20:+', u'chr1:5-25:+',
u'chr1:10-20:+'],
columns=[SJ_NONEW_A, SJ_NONEW_B])
assert_frame_equal(a, a2)
assert_frame_equal(c, c2)
def test_filter_jxns_donor_acceptor_neg(self):
d = cpb.star._make_sj_out_dict([SJ_NEG_NONEW_A,
SJ_NEG_NONEW_B])
p, a = cpb.star._make_sj_out_panel(d)
ext, stats = cpb.star.read_external_annotation(EXT)
c2, a2, stats = cpb.star._filter_jxns_donor_acceptor(p, a, ext)
a = pd.DataFrame(
[['chr2', 5, 20, '-', 'GT/AG', True, True, 'chr2:5',
'chr2:20', 'gene2', 'chr2:20:-', 'chr2:5:-', False, False],
['chr2', 5, 25, '-', 'CT/AC', True, True, 'chr2:5',
'chr2:25', 'gene2', 'chr2:25:-', 'chr2:5:-', False, False],
['chr2', 10, 20, '-', 'CT/AC', True, True, 'chr2:10',
'chr2:20', 'gene2', 'chr2:20:-', 'chr2:10:-', False, False]],
index=[u'chr2:5-20:-', u'chr2:5-25:-', u'chr2:10-20:-'],
columns=[u'chrom', u'start', u'end',
u'strand', u'intron_motif', u'annotated',
u'ext_annotated', u'chrom:start', u'chrom:end',
u'gene_id', u'donor', u'acceptor', u'novel_donor',
u'novel_acceptor'])
c = pd.DataFrame(array([[20, 0],[10, 10],[20, 20]]),
index=[u'chr2:5-20:-', u'chr2:5-25:-',
u'chr2:10-20:-'],
columns=[SJ_NEG_NONEW_A,
SJ_NEG_NONEW_B])
assert_frame_equal(a, a2)
assert_frame_equal(c, c2)
def test_filter_new_jxns(self):
d = cpb.star._make_sj_out_dict([SJ_NEW_A,
SJ_NONEW_A])
p, a = cpb.star._make_sj_out_panel(d)
ext, stats = cpb.star.read_external_annotation(EXT)
c2, a2, stats = cpb.star._filter_jxns_donor_acceptor(p, a, ext)
a = pd.DataFrame(
[['chr1', 2, 25, '+', 'GT/AG', False, False, 'chr1:2',
'chr1:25', 'gene1', 'chr1:2:+', 'chr1:25:+', False, False],
['chr1', 3, 25, '+', 'CT/AC', False, False, 'chr1:3',
'chr1:25', 'gene1', 'chr1:3:+', 'chr1:25:+', True, False],
['chr1', 5, 20, '+', 'GT/AG', True, True, 'chr1:5', 'chr1:20',
'gene1', 'chr1:5:+', 'chr1:20:+', False, False],
['chr1', 5, 30, '+', 'GT/AG', False, False, 'chr1:5',
'chr1:30', 'gene1', 'chr1:5:+', 'chr1:30:+', False, True],
['chr1', 10, 20, '+', 'CT/AC', True, True, 'chr1:10',
'chr1:20', 'gene1', 'chr1:10:+', 'chr1:20:+', False, False]],
index=[u'chr1:2-25:+', u'chr1:3-25:+', u'chr1:5-20:+',
u'chr1:5-30:+', u'chr1:10-20:+'],
columns=[u'chrom', u'start', u'end', u'strand', u'intron_motif',
u'annotated', u'ext_annotated', u'chrom:start',
u'chrom:end', u'gene_id', u'donor', u'acceptor',
u'novel_donor', u'novel_acceptor'])
c = pd.DataFrame([[25, 0],
[30, 0],
[ 0, 20],
[20, 0],
[ 0, 20]],
index=[u'chr1:2-25:+', u'chr1:3-25:+', u'chr1:5-20:+',
u'chr1:5-30:+', u'chr1:10-20:+'],
columns=[SJ_NEW_A, SJ_NONEW_A])
assert_frame_equal(a, a2)
assert_frame_equal(c, c2)
def test_filter_new_jxns_neg(self):
d = cpb.star._make_sj_out_dict([SJ_NEG_NEW_A,
SJ_NEG_NONEW_A])
p, a = cpb.star._make_sj_out_panel(d)
ext, es = cpb.star.read_external_annotation(EXT)
c2, a2, s2 = cpb.star._filter_jxns_donor_acceptor(p, a, ext)
a = pd.DataFrame(
[['chr2', 2, 25, '-', 'GT/AG', False, False, 'chr2:2',
'chr2:25', 'gene2', 'chr2:25:-', 'chr2:2:-', False, False],
['chr2', 3, 25, '-', 'CT/AC', False, False, 'chr2:3',
'chr2:25', 'gene2', 'chr2:25:-', 'chr2:3:-', False, True],
['chr2', 5, 20, '-', 'GT/AG', True, True, 'chr2:5', 'chr2:20',
'gene2', 'chr2:20:-', 'chr2:5:-', False, False],
['chr2', 5, 30, '-', 'GT/AG', False, False, 'chr2:5',
'chr2:30', 'gene2', 'chr2:30:-', 'chr2:5:-', True, False],
['chr2', 10, 20, '-', 'CT/AC', True, True, 'chr2:10',
'chr2:20', 'gene2', 'chr2:20:-', 'chr2:10:-', False, False]],
index=[u'chr2:2-25:-', u'chr2:3-25:-', u'chr2:5-20:-',
u'chr2:5-30:-', u'chr2:10-20:-'],
columns=[u'chrom', u'start', u'end', u'strand', u'intron_motif',
u'annotated', u'ext_annotated', u'chrom:start',
u'chrom:end', u'gene_id', u'donor', u'acceptor',
u'novel_donor', u'novel_acceptor'])
c = pd.DataFrame([[25, 0],
[30, 0],
[ 0, 20],
[20, 0],
[ 0, 20]],
index=[u'chr2:2-25:-', u'chr2:3-25:-', u'chr2:5-20:-',
u'chr2:5-30:-', u'chr2:10-20:-'],
columns=[SJ_NEG_NEW_A,
SJ_NEG_NONEW_A])
assert_frame_equal(a, a2)
assert_frame_equal(c, c2)
class TestFindNovelDonorAcceptorDist:
def test_make_splice_targets_dict_donor_pos(self):
df, stats = cpb.star.read_external_annotation(EXT)
strand = '+'
feature = 'donor'
d = cpb.star._make_splice_targets_dict(df, feature, strand)
d2 = {'chr1:10:+': array([20]),
'chr1:2:+': array([20]),
'chr1:5:+': array([20, 25])}
assert d.keys() == d2.keys()
for k in d.keys():
assert (d[k] == d2[k]).all()
def test_make_splice_targets_dict_donor_neg(self):
df, stats = cpb.star.read_external_annotation(EXT)
strand = '-'
feature = 'donor'
d = cpb.star._make_splice_targets_dict(df, feature, strand)
d2 = {'chr2:20:-': array([2, 5, 10]),
'chr2:25:-': array([5])}
assert d.keys() == d2.keys()
for k in d.keys():
assert (d[k] == d2[k]).all()
def test_make_splice_targets_dict_acceptor_pos(self):
df, stats = cpb.star.read_external_annotation(EXT)
strand = '+'
feature = 'acceptor'
d = cpb.star._make_splice_targets_dict(df, feature, strand)
d2 = {'chr1:20:+': array([2, 5, 10]),
'chr1:25:+': array([5])}
assert d.keys() == d2.keys()
for k in d.keys():
assert (d[k] == d2[k]).all()
def test_make_splice_targets_dict_acceptor_neg(self):
df, stats = cpb.star.read_external_annotation(EXT)
strand = '-'
feature = 'acceptor'
d = cpb.star._make_splice_targets_dict(df, feature, strand)
d2 = {'chr2:2:-': array([20]),
'chr2:5:-': array([20, 25]),
'chr2:10:-': array([20])}
assert d.keys() == d2.keys()
for k in d.keys():
assert (d[k] == d2[k]).all()
def test_dist_to_annot_donor_acceptor_pos(self):
ext, stats = cpb.star.read_external_annotation(EXT)
strand = '+'
feature = 'donor'
# d is a dict whose keys are donors and whose values are sets that
# contain the positions of all acceptors associated with this donor.
d = cpb.star._make_splice_targets_dict(ext, feature, strand)
sjd = cpb.star._make_sj_out_dict([SJ_NEW_A,
SJ_NONEW_A])
p, a = cpb.star._make_sj_out_panel(sjd)
c, a, s = cpb.star._filter_jxns_donor_acceptor(p, a, ext)
novel_feature = 'acceptor'
a = a[(a.strand == strand) & (a.novel_acceptor)]
up, down = cpb.star._dist_to_annot_donor_acceptor(a, d, strand,
novel_feature)
assert up == [5]
assert down == [nan]
def test_dist_to_annot_donor_acceptor_neg(self):
ext, stats = cpb.star.read_external_annotation(EXT)
strand = '-'
feature = 'donor'
# d is a dict whose keys are donors and whose values are sets that
# contain the positions of all acceptors associated with this donor.
d = cpb.star._make_splice_targets_dict(ext, feature, strand)
sjd = cpb.star._make_sj_out_dict([SJ_NEG_NEW_A,
SJ_NEG_NONEW_A])
p, a = cpb.star._make_sj_out_panel(sjd)
c, a, s = cpb.star._filter_jxns_donor_acceptor(p, a, ext)
novel_feature = 'acceptor'
a = a[(a.strand == strand) & (a.novel_acceptor)]
up, down = cpb.star._dist_to_annot_donor_acceptor(a, d, strand,
novel_feature)
assert up == [2]
assert down == [nan]
def test_dist_to_annot_donor_acceptor_pos(self):
ext, stats = cpb.star.read_external_annotation(EXT)
strand = '+'
feature = 'acceptor'
# d is a dict whose keys are acceptors and whose values are sets that
# contain the positions of all donors associated with this acceptor.
d = cpb.star._make_splice_targets_dict(ext, feature, strand)
sjd = cpb.star._make_sj_out_dict([SJ_NEW_A,
SJ_NONEW_A])
p, a = cpb.star._make_sj_out_panel(sjd)
c, a, s = cpb.star._filter_jxns_donor_acceptor(p, a, ext)
novel_feature = 'donor'
a = a[(a.strand == strand) & (a.novel_donor)]
up, down = cpb.star._dist_to_annot_donor_acceptor(a, d, strand,
novel_feature)
assert up == [np.nan]
assert down == [2]
def test_dist_to_annot_donor_acceptor_neg(self):
ext, stats = cpb.star.read_external_annotation(EXT)
strand = '-'
feature = 'acceptor'
# d is a dict whose keys are acceptors and whose values are sets that
# contain the positions of all donors associated with this acceptor.
d = cpb.star._make_splice_targets_dict(ext, feature, strand)
sjd = cpb.star._make_sj_out_dict([SJ_NEG_NEW_A,
SJ_NEG_NONEW_A])
p, a = cpb.star._make_sj_out_panel(sjd)
c, a, s = cpb.star._filter_jxns_donor_acceptor(p, a, ext)
novel_feature = 'donor'
a = a[(a.strand == strand) & (a.novel_donor)]
up, down = cpb.star._dist_to_annot_donor_acceptor(a, d, strand,
novel_feature)
assert up == [np.nan]
assert down == [5]
def test_find_novel_donor_acceptor_dist_pos_a(self):
ext, stats = cpb.star.read_external_annotation(EXT)
sjd = cpb.star._make_sj_out_dict([SJ_NEW_A,
SJ_NONEW_A])
p, a = cpb.star._make_sj_out_panel(sjd)
c, a, s = cpb.star._filter_jxns_donor_acceptor(p, a, ext)
df = cpb.star._find_novel_donor_acceptor_dist(a, ext)
df2 = pd.DataFrame([['chr1', 2, 25, '+', 'GT/AG', False, False,
'chr1:2', 'chr1:25', 'gene1', 'chr1:2:+',
'chr1:25:+', False, False, nan, nan, nan, nan],
['chr1', 3, 25, '+', 'CT/AC', False, False,
'chr1:3', 'chr1:25', 'gene1', 'chr1:3:+',
'chr1:25:+', True, False, nan, 2.0, nan, nan],
['chr1', 5, 20, '+', 'GT/AG', True, True, 'chr1:5',
'chr1:20', 'gene1', 'chr1:5:+', 'chr1:20:+', False,
False, nan, nan, nan, nan],
['chr1', 5, 30, '+', 'GT/AG', False, False,
'chr1:5', 'chr1:30', 'gene1', 'chr1:5:+',
'chr1:30:+', False, True, nan, nan, 5.0, nan],
['chr1', 10, 20, '+', 'CT/AC', True, True,
'chr1:10', 'chr1:20', 'gene1', 'chr1:10:+',
'chr1:20:+', False, False, nan, nan, nan, nan]],
index=[u'chr1:2-25:+', u'chr1:3-25:+',
u'chr1:5-20:+', u'chr1:5-30:+',
u'chr1:10-20:+'],
columns=[u'chrom', u'start', u'end', u'strand',
u'intron_motif', u'annotated',
u'ext_annotated', u'chrom:start',
u'chrom:end', u'gene_id', u'donor',
u'acceptor', u'novel_donor',
u'novel_acceptor', u'upstream_donor_dist',
u'downstream_donor_dist',
u'upstream_acceptor_dist',
u'downstream_acceptor_dist'])
assert_frame_equal(df, df2)
def test_find_novel_donor_acceptor_dist_neg_a(self):
ext, stats = cpb.star.read_external_annotation(EXT)
sjd = cpb.star._make_sj_out_dict([SJ_NEG_NEW_A,
SJ_NEG_NONEW_A])
p, a = cpb.star._make_sj_out_panel(sjd)
c, a, s = cpb.star._filter_jxns_donor_acceptor(p, a, ext)
df = cpb.star._find_novel_donor_acceptor_dist(a, ext)
df2 = pd.DataFrame([['chr2', 2, 25, '-', 'GT/AG', False, False,
'chr2:2', 'chr2:25', 'gene2', 'chr2:25:-',
'chr2:2:-', False, False, nan, nan, nan, nan],
['chr2', 3, 25, '-', 'CT/AC', False, False,
'chr2:3', 'chr2:25', 'gene2', 'chr2:25:-',
'chr2:3:-', False, True, nan, nan, 2.0, nan],
['chr2', 5, 20, '-', 'GT/AG', True, True, 'chr2:5',
'chr2:20', 'gene2', 'chr2:20:-', 'chr2:5:-', False,
False, nan, nan, nan, nan],
['chr2', 5, 30, '-', 'GT/AG', False, False,
'chr2:5', 'chr2:30', 'gene2', 'chr2:30:-',
'chr2:5:-', True, False, nan, 5.0, nan, nan],
['chr2', 10, 20, '-', 'CT/AC', True, True,
'chr2:10', 'chr2:20', 'gene2', 'chr2:20:-',
'chr2:10:-', False, False, nan, nan, nan, nan]],
index=[u'chr2:2-25:-', u'chr2:3-25:-',
u'chr2:5-20:-', u'chr2:5-30:-',
u'chr2:10-20:-'],
columns=[u'chrom', u'start', u'end', u'strand',
u'intron_motif', u'annotated',
u'ext_annotated', u'chrom:start',
u'chrom:end', u'gene_id', u'donor',
u'acceptor', u'novel_donor',
u'novel_acceptor', u'upstream_donor_dist',
u'downstream_donor_dist',
u'upstream_acceptor_dist',
u'downstream_acceptor_dist'])
assert_frame_equal(df, df2)
def test_find_novel_donor_acceptor_dist_pos_b(self):
ext, stats = cpb.star.read_external_annotation(EXT)
sjd = cpb.star._make_sj_out_dict([SJ_NEW_B,
SJ_NONEW_A])
p, a = cpb.star._make_sj_out_panel(sjd)
c, a, s = cpb.star._filter_jxns_donor_acceptor(p, a, ext)
df = cpb.star._find_novel_donor_acceptor_dist(a, ext)
vals = \
[['chr1', 2, 25, '+', 'GT/AG', False, False, 'chr1:2', 'chr1:25',
'gene1', 'chr1:2:+', 'chr1:25:+', False, False, nan, nan, nan,
nan],
['chr1', 3, 20, '+', 'CT/AC', False, False, 'chr1:3', 'chr1:20',
'gene1', 'chr1:3:+', 'chr1:20:+', True, False, 1.0, 2.0, nan,
nan],
['chr1', 5, 20, '+', 'GT/AG', True, True, 'chr1:5', 'chr1:20',
'gene1', 'chr1:5:+', 'chr1:20:+', False, False, nan, nan, nan,
nan],
['chr1', 5, 22, '+', 'GT/AG', False, False, 'chr1:5', 'chr1:22',
'gene1', 'chr1:5:+', 'chr1:22:+', False, True, nan, nan, 2.0,
3.0],
['chr1', 5, 30, '+', 'GT/AG', False, False, 'chr1:5', 'chr1:30',
'gene1', 'chr1:5:+', 'chr1:30:+', False, True, nan, nan, 5.0,
nan],
['chr1', 10, 20, '+', 'CT/AC', True, True, 'chr1:10', 'chr1:20',
'gene1', 'chr1:10:+', 'chr1:20:+', False, False, nan, nan, nan,
nan]]
cols = [u'chrom', u'start', u'end', u'strand', u'intron_motif',
u'annotated', u'ext_annotated', u'chrom:start', u'chrom:end',
u'gene_id', u'donor', u'acceptor', u'novel_donor',
u'novel_acceptor', u'upstream_donor_dist',
u'downstream_donor_dist', u'upstream_acceptor_dist',
u'downstream_acceptor_dist']
ind = [u'chr1:2-25:+', u'chr1:3-20:+', u'chr1:5-20:+', u'chr1:5-22:+',
u'chr1:5-30:+', u'chr1:10-20:+']
df2 = pd.DataFrame(vals, index=ind, columns=cols)
assert_frame_equal(df, df2)
def test_find_novel_donor_acceptor_dist_neg_b(self):
ext, stats = cpb.star.read_external_annotation(EXT)
sjd = cpb.star._make_sj_out_dict([SJ_NEG_NEW_B,
SJ_NEG_NONEW_A])
p, a = cpb.star._make_sj_out_panel(sjd)
c, a, s = cpb.star._filter_jxns_donor_acceptor(p, a, ext)
df = cpb.star._find_novel_donor_acceptor_dist(a, ext)
vals = [['chr2', 2, 25, '-', 'GT/AG', False, False, 'chr2:2', 'chr2:25',
'gene2', 'chr2:25:-', 'chr2:2:-', False, False, nan, nan, nan,
nan],
['chr2', 3, 20, '-', 'CT/AC', False, False, 'chr2:3', 'chr2:20',
'gene2', 'chr2:20:-', 'chr2:3:-', False, True, nan, nan, 2.0,
1.0],
['chr2', 5, 20, '-', 'GT/AG', True, True, 'chr2:5', 'chr2:20',
'gene2', 'chr2:20:-', 'chr2:5:-', False, False, nan, nan, nan,
nan],
['chr2', 5, 22, '-', 'GT/AG', False, False, 'chr2:5', 'chr2:22',
'gene2', 'chr2:22:-', 'chr2:5:-', True, False, 3.0, 2.0, nan,
nan],
['chr2', 5, 30, '-', 'GT/AG', False, False, 'chr2:5', 'chr2:30',
'gene2', 'chr2:30:-', 'chr2:5:-', True, False, nan, 5.0, nan,
nan],
['chr2', 10, 20, '-', 'CT/AC', True, True, 'chr2:10', 'chr2:20',
'gene2', 'chr2:20:-', 'chr2:10:-', False, False, nan, nan, nan,
nan]]
cols = [u'chrom', u'start', u'end', u'strand', u'intron_motif',
u'annotated', u'ext_annotated', u'chrom:start', u'chrom:end',
u'gene_id', u'donor', u'acceptor', u'novel_donor',
u'novel_acceptor', u'upstream_donor_dist',
u'downstream_donor_dist', u'upstream_acceptor_dist',
u'downstream_acceptor_dist']
ind = [u'chr2:2-25:-', u'chr2:3-20:-', u'chr2:5-20:-', u'chr2:5-22:-',
u'chr2:5-30:-', u'chr2:10-20:-']
df2 = pd.DataFrame(vals, index=ind, columns=cols)
assert_frame_equal(df, df2)
| mit |
KhanSuleyman/scikit-neuralnetwork | sknn/tests/test_deep.py | 5 | 3813 | import unittest
from nose.tools import (assert_false, assert_raises, assert_true,
assert_equal, assert_in)
import io
import pickle
import numpy
import logging
from sklearn.base import clone
import sknn
from sknn.mlp import Regressor as MLPR
from sknn.mlp import Layer as L
from . import test_linear
class TestDeepNetwork(test_linear.TestLinearNetwork):
def setUp(self):
self.nn = MLPR(
layers=[
L("Rectifier", units=16),
L("Sigmoid", units=12),
L("Maxout", units=16, pieces=2),
L("Tanh", units=4),
L("Linear")],
n_iter=1)
def test_UnknownLayer(self):
assert_raises(NotImplementedError, L, "Unknown")
def test_UnknownActivation(self):
assert_raises(NotImplementedError, L, "Wrong", units=16)
# This class also runs all the tests from the linear network too.
class TestDeepDeterminism(unittest.TestCase):
def setUp(self):
self.a_in = numpy.random.uniform(0.0, 1.0, (8,16))
self.a_out = numpy.zeros((8,1))
def run_EqualityTest(self, copier, asserter):
# Only PyLearn2 supports Maxout.
extra = ["Maxout"] if sknn.backend.name != 'pylearn2' else []
for activation in ["Rectifier", "Sigmoid", "Tanh"] + extra:
nn1 = MLPR(layers=[L(activation, units=16, pieces=2), L("Linear", units=1)], random_state=1234)
nn1._initialize(self.a_in, self.a_out)
nn2 = copier(nn1, activation)
print('activation', activation)
a_out1 = nn1.predict(self.a_in)
a_out2 = nn2.predict(self.a_in)
print(a_out1, a_out2)
asserter(numpy.all(nn1.predict(self.a_in) == nn2.predict(self.a_in)))
def test_DifferentSeedPredictNotEquals(self):
def ctor(_, activation):
nn = MLPR(layers=[L(activation, units=16, pieces=2), L("Linear", units=1)], random_state=2345)
nn._initialize(self.a_in, self.a_out)
return nn
self.run_EqualityTest(ctor, assert_false)
def test_SameSeedPredictEquals(self):
def ctor(_, activation):
nn = MLPR(layers=[L(activation, units=16, pieces=2), L("Linear", units=1)], random_state=1234)
nn._initialize(self.a_in, self.a_out)
return nn
self.run_EqualityTest(ctor, assert_true)
def test_ClonePredictEquals(self):
def cloner(nn, _):
cc = clone(nn)
cc._initialize(self.a_in, self.a_out)
return cc
self.run_EqualityTest(cloner, assert_true)
def test_SerializedPredictEquals(self):
def serialize(nn, _):
buf = io.BytesIO()
pickle.dump(nn, buf)
buf.seek(0)
return pickle.load(buf)
self.run_EqualityTest(serialize, assert_true)
class TestActivations(unittest.TestCase):
def setUp(self):
self.buf = io.StringIO()
self.hnd = logging.StreamHandler(self.buf)
logging.getLogger('sknn').addHandler(self.hnd)
logging.getLogger().setLevel(logging.WARNING)
def tearDown(self):
assert_equal('', self.buf.getvalue())
sknn.mlp.log.removeHandler(self.hnd)
@unittest.skipIf(sknn.backend.name != 'pylearn2', 'only pylearn2')
def test_MissingParameterException(self):
nn = MLPR(layers=[L("Maxout", units=32), L("Linear")])
a_in = numpy.zeros((8,16))
assert_raises(ValueError, nn._initialize, a_in, a_in)
def test_UnusedParameterWarning(self):
nn = MLPR(layers=[L("Linear", pieces=2)], n_iter=1)
a_in = numpy.zeros((8,16))
nn._initialize(a_in, a_in)
assert_in('Parameter `pieces` is unused', self.buf.getvalue())
self.buf = io.StringIO() # clear
| bsd-3-clause |
nvoron23/statsmodels | statsmodels/tools/_testing.py | 29 | 4809 | """Testing helper functions
Warning: current status experimental, mostly copy paste
Warning: these functions will be changed without warning as the need
during refactoring arises.
The first group of functions provide consistency checks
"""
import numpy as np
from numpy.testing import assert_allclose, assert_
from nose import SkipTest
# the following are copied from
# statsmodels.base.tests.test_generic_methods.CheckGenericMixin
# and only adjusted to work as standalone functions
def check_ttest_tvalues(results):
# test that t_test has same results a params, bse, tvalues, ...
res = results
mat = np.eye(len(res.params))
tt = res.t_test(mat)
assert_allclose(tt.effect, res.params, rtol=1e-12)
# TODO: tt.sd and tt.tvalue are 2d also for single regressor, squeeze
assert_allclose(np.squeeze(tt.sd), res.bse, rtol=1e-10)
assert_allclose(np.squeeze(tt.tvalue), res.tvalues, rtol=1e-12)
assert_allclose(tt.pvalue, res.pvalues, rtol=5e-10)
assert_allclose(tt.conf_int(), res.conf_int(), rtol=1e-10)
# test params table frame returned by t_test
table_res = np.column_stack((res.params, res.bse, res.tvalues,
res.pvalues, res.conf_int()))
table1 = np.column_stack((tt.effect, tt.sd, tt.tvalue, tt.pvalue,
tt.conf_int()))
table2 = tt.summary_frame().values
assert_allclose(table2, table_res, rtol=1e-12)
# move this to test_attributes ?
assert_(hasattr(res, 'use_t'))
tt = res.t_test(mat[0])
tt.summary() # smoke test for #1323
assert_allclose(tt.pvalue, res.pvalues[0], rtol=5e-10)
def check_ftest_pvalues(results):
res = results
use_t = res.use_t
k_vars = len(res.params)
# check default use_t
pvals = [res.wald_test(np.eye(k_vars)[k], use_f=use_t).pvalue
for k in range(k_vars)]
assert_allclose(pvals, res.pvalues, rtol=5e-10, atol=1e-25)
# sutomatic use_f based on results class use_t
pvals = [res.wald_test(np.eye(k_vars)[k]).pvalue
for k in range(k_vars)]
assert_allclose(pvals, res.pvalues, rtol=5e-10, atol=1e-25)
# label for pvalues in summary
string_use_t = 'P>|z|' if use_t is False else 'P>|t|'
summ = str(res.summary())
assert_(string_use_t in summ)
# try except for models that don't have summary2
try:
summ2 = str(res.summary2())
except AttributeError:
summ2 = None
if summ2 is not None:
assert_(string_use_t in summ2)
# TODO The following is not (yet) guaranteed across models
#@knownfailureif(True)
def check_fitted(results):
# ignore wrapper for isinstance check
from statsmodels.genmod.generalized_linear_model import GLMResults
from statsmodels.discrete.discrete_model import DiscreteResults
# FIXME: work around GEE has no wrapper
if hasattr(results, '_results'):
results = results._results
else:
results = results
if (isinstance(results, GLMResults) or
isinstance(results, DiscreteResults)):
raise SkipTest
res = results
fitted = res.fittedvalues
assert_allclose(res.model.endog - fitted, res.resid, rtol=1e-12)
assert_allclose(fitted, res.predict(), rtol=1e-12)
def check_predict_types(results):
res = results
# squeeze to make 1d for single regressor test case
p_exog = np.squeeze(np.asarray(res.model.exog[:2]))
# ignore wrapper for isinstance check
from statsmodels.genmod.generalized_linear_model import GLMResults
from statsmodels.discrete.discrete_model import DiscreteResults
# FIXME: work around GEE has no wrapper
if hasattr(results, '_results'):
results = results._results
else:
results = results
if (isinstance(results, GLMResults) or
isinstance(results, DiscreteResults)):
# SMOKE test only TODO
res.predict(p_exog)
res.predict(p_exog.tolist())
res.predict(p_exog[0].tolist())
else:
fitted = res.fittedvalues[:2]
assert_allclose(fitted, res.predict(p_exog), rtol=1e-12)
# this needs reshape to column-vector:
assert_allclose(fitted, res.predict(np.squeeze(p_exog).tolist()),
rtol=1e-12)
# only one prediction:
assert_allclose(fitted[:1], res.predict(p_exog[0].tolist()),
rtol=1e-12)
assert_allclose(fitted[:1], res.predict(p_exog[0]),
rtol=1e-12)
# predict doesn't preserve DataFrame, e.g. dot converts to ndarray
#import pandas
#predicted = res.predict(pandas.DataFrame(p_exog))
#assert_(isinstance(predicted, pandas.DataFrame))
#assert_allclose(predicted, fitted, rtol=1e-12)
| bsd-3-clause |
glennq/scikit-learn | examples/svm/plot_iris.py | 15 | 3256 | """
==================================================
Plot different SVM classifiers in the iris dataset
==================================================
Comparison of different linear SVM classifiers on a 2D projection of the iris
dataset. We only consider the first 2 features of this dataset:
- Sepal length
- Sepal width
This example shows how to plot the decision surface for four SVM classifiers
with different kernels.
The linear models ``LinearSVC()`` and ``SVC(kernel='linear')`` yield slightly
different decision boundaries. This can be a consequence of the following
differences:
- ``LinearSVC`` minimizes the squared hinge loss while ``SVC`` minimizes the
regular hinge loss.
- ``LinearSVC`` uses the One-vs-All (also known as One-vs-Rest) multiclass
reduction while ``SVC`` uses the One-vs-One multiclass reduction.
Both linear models have linear decision boundaries (intersecting hyperplanes)
while the non-linear kernel models (polynomial or Gaussian RBF) have more
flexible non-linear decision boundaries with shapes that depend on the kind of
kernel and its parameters.
.. NOTE:: while plotting the decision function of classifiers for toy 2D
datasets can help get an intuitive understanding of their respective
expressive power, be aware that those intuitions don't always generalize to
more realistic high-dimensional problems.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
# we create an instance of SVM and fit out data. We do not scale our
# data since we want to plot the support vectors
C = 1.0 # SVM regularization parameter
svc = svm.SVC(kernel='linear', C=C).fit(X, y)
rbf_svc = svm.SVC(kernel='rbf', gamma=0.7, C=C).fit(X, y)
poly_svc = svm.SVC(kernel='poly', degree=3, C=C).fit(X, y)
lin_svc = svm.LinearSVC(C=C).fit(X, y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# title for the plots
titles = ['SVC with linear kernel',
'LinearSVC (linear kernel)',
'SVC with RBF kernel',
'SVC with polynomial (degree 3) kernel']
for i, clf in enumerate((svc, lin_svc, rbf_svc, poly_svc)):
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
plt.subplot(2, 2, i + 1)
plt.subplots_adjust(wspace=0.4, hspace=0.4)
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap=plt.cm.coolwarm, alpha=0.8)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.coolwarm)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.title(titles[i])
plt.show()
| bsd-3-clause |
sgranitz/nw | predict410/ols_ames-housing_hw01.py | 2 | 8494 | # Using Linear Regression to predict
# family home sale prices in Ames, Iowa
# Packages
import pandas as pd
import numpy as np
import statsmodels.formula.api as smf
import matplotlib.pyplot as plt
import seaborn as sns
from tabulate import tabulate
from statsmodels.iolib.summary2 import summary_col
# Set some options for the output
pd.set_option('display.notebook_repr_html', False)
pd.set_option('display.max_columns', 40)
pd.set_option('display.max_rows', 10)
pd.set_option('display.width', 120)
# Read in the data
path = 'C:/Users/sgran/Desktop/northwestern/predict_410/assignment_1/'
train = pd.read_csv(path + 'ames_train.csv')
test = pd.read_csv(path + 'ames_test.csv')
# Convert all variable names to lower case
train.columns = [col.lower() for col in train.columns]
test.columns = [col.lower() for col in test.columns]
# EDA
print('\n----- Summary of Train Data -----\n')
print('Object type: ', type(train))
print('Number of observations & variables: ', train.shape)
# Variable names and information
print(train.info())
print(train.dtypes.value_counts())
# Descriptive statistics
print(train.describe())
print(tabulate(
train[[
'saleprice',
'yrsold',
'yearbuilt',
'overallqual',
'grlivarea',
'garagecars'
]].describe().round(1),
headers='keys',
tablefmt='psql'
))
# show a portion of the beginning of the DataFrame
print(train.head(10))
print(train.shape)
train.loc[:, train.isnull().any()].isnull().sum().sort_values(ascending=False)
train[train == 0].count().sort_values(ascending=False)
t_null = train.isnull().sum()
t_zero = train[train == 0].count()
t_good = train.shape[0] - (t_null + t_zero)
xx = range(train.shape[1])
plt.figure(figsize=(8,8))
plt.bar(xx, t_good, color='g', width=1,
bottom=t_null+t_zero)
plt.bar(xx, t_zero, color='y', width=1,
bottom=t_null)
plt.bar(xx, t_null, color='r', width=1)
plt.show()
print(t_null[t_null > 1000].sort_values(ascending=False))
print(t_zero[t_zero > 1900].sort_values(ascending=False))
drop_cols = (t_null > 1000) | (t_zero > 1900)
train = train.loc[:, -drop_cols]
# Some quick plots of the data
train.hist(figsize=(18,14))
train.plot(
kind='box',
subplots=True,
layout=(5,9),
sharex=False,
sharey=False,
figsize=(18,14)
)
train.plot.scatter(x='grlivarea', y='saleprice')
train.boxplot(column='saleprice', by='yrsold')
train.plot.scatter(x='subclass', y='saleprice')
train.boxplot(column='saleprice', by='overallqual')
train.boxplot(column='saleprice', by='overallcond')
train.plot.scatter(x='overallcond', y='saleprice')
train.plot.scatter(x='lotarea', y='saleprice')
# Replace NaN values with medians in train data
train = train.fillna(train.median())
train = train.apply(lambda med:med.fillna(med.value_counts().index[0]))
train.head()
t_null = train.isnull().sum()
t_zero = train[train == 0].count()
t_good = train.shape[0] - (t_null + t_zero)
xx = range(train.shape[1])
plt.figure(figsize=(14,14))
plt.bar(xx, t_good, color='g', width=.8,
bottom=t_null+t_zero)
plt.bar(xx, t_zero, color='y', width=.8,
bottom=t_null)
plt.bar(xx, t_null, color='r', width=.8)
plt.show()
train.bldgtype.unique()
train.housestyle.unique()
# Goal is typical family home
# Drop observations too far from typical
iqr = np.percentile(train.saleprice, 75) - np.percentile(train.saleprice, 25)
drop_rows = train.saleprice > iqr * 1.5 + np.percentile(train.saleprice, 75)
train = train.loc[-drop_rows, :]
iqr = np.percentile(train.grlivarea, 75) - np.percentile(train.grlivarea, 25)
drop_rows = train.grlivarea > iqr * 1.5 + np.percentile(train.grlivarea, 75)
train = train.loc[-drop_rows, :]
iqr = np.percentile(train.lotarea, 75) - np.percentile(train.lotarea, 25)
drop_rows = train.lotarea > iqr * 1.5 + np.percentile(train.lotarea, 75)
train = train.loc[-drop_rows, :]
iqr = np.percentile(train.totalbsmtsf, 75) - np.percentile(train.totalbsmtsf, 25)
drop_rows = train.totalbsmtsf > iqr * 1.5 + np.percentile(train.totalbsmtsf, 75)
train = train.loc[-drop_rows, :]
# Replace 0 values with median to living area in train data
m = np.median(train.grlivarea[train.grlivarea > 0])
train = train.replace({'grlivarea': {0: m}})
# Discrete variables
plt.figure()
g = sns.PairGrid(train,
x_vars=["bldgtype",
"exterqual",
"centralair",
"kitchenqual",
"salecondition"],
y_vars=["saleprice"],
aspect=.75, size=3.5)
g.map(sns.violinplot, palette="pastel");
# Print correlations
corr_matrix = train.corr()
print(corr_matrix["saleprice"].sort_values(ascending=False).head(10))
print(corr_matrix["saleprice"].sort_values(ascending=True).head(10))
## Pick 10 variable to focus on
pick_10 = [
'saleprice',
'grlivarea',
'overallqual',
'garagecars',
'yearbuilt',
'totalbsmtsf',
'salecondition',
'bldgtype',
'kitchenqual',
'exterqual',
'centralair'
]
corr = train[pick_10].corr()
blank = np.zeros_like(corr, dtype=np.bool)
blank[np.triu_indices_from(blank)] = True
fig, ax = plt.subplots(figsize=(10, 10))
corr_map = sns.diverging_palette(255, 133, l=60, n=7,
center="dark", as_cmap=True)
sns.heatmap(corr, mask=blank, cmap=corr_map, square=True,
vmax=.3, linewidths=0.25, cbar_kws={"shrink": .5})
# Quick plots
for variable in pick_10[1:]:
if train[variable].dtype.name == 'object':
plt.figure()
sns.stripplot(y="saleprice", x=variable, data=train, jitter=True)
plt.show()
plt.figure()
sns.factorplot(y="saleprice", x=variable, data=train, kind="box")
plt.show()
else:
fig, ax = plt.subplots()
ax.set_ylabel('Sale Price')
ax.set_xlabel(variable)
scatter_plot = ax.scatter(
y=train['saleprice'],
x=train[variable],
facecolors = 'none',
edgecolors = 'blue'
)
plt.show()
plt.figure()
sns.factorplot(x="bldgtype", y="saleprice", col="exterqual", row="kitchenqual",
hue="overallqual", data=train, kind="swarm")
plt.figure()
sns.countplot(y="overallqual", hue="exterqual", data=train, palette="Greens_d")
# Run simple models
model1 = smf.ols(formula='saleprice ~ grlivarea', data=train).fit()
model2 = smf.ols(formula='saleprice ~ grlivarea + overallqual', data=train).fit()
model3 = smf.ols(formula='saleprice ~ grlivarea + overallqual + garagecars' , data=train).fit()
model4 = smf.ols(formula='saleprice ~ grlivarea + overallqual + garagecars + yearbuilt' , data=train).fit()
model5 = smf.ols(formula='saleprice ~ grlivarea + overallqual + garagecars + yearbuilt + totalbsmtsf + kitchenqual + exterqual + centralair', data=train).fit()
print('\n\nmodel 1----------\n', model1.summary())
print('\n\nmodel 2----------\n', model2.summary())
print('\n\nmodel 3----------\n', model3.summary())
print('\n\nmodel 4----------\n', model4.summary())
print('\n\nmodel 5----------\n', model5.summary())
out = [model1,
model2,
model3,
model4,
model5]
out_df = pd.DataFrame()
out_df['labels'] = ['rsquared', 'rsquared_adj', 'fstatistic', 'aic']
i = 0
for model in out:
train['pred'] = model.fittedvalues
plt.figure()
train.plot.scatter(x='saleprice', y='pred', title='model' + str(i+1))
plt.show()
out_df['model' + str(i+1)] = [
model.rsquared.round(3),
model.rsquared_adj.round(3),
model.fvalue.round(3),
model.aic.round(3)
]
i += 1
print(tabulate(out_df, headers=out_df.columns, tablefmt='psql'))
print(summary_col(out, stars=True))
train['predictions'] = model5.fittedvalues
print(train['predictions'])
# Clean test data
test.info()
test[3:] = test[3:].fillna(test[3:].median())
test["kitchenqual"] = test["kitchenqual"].fillna(test["kitchenqual"].value_counts().index[0])
test["exterqual"] = test["exterqual"].fillna(test["exterqual"].value_counts().index[0])
m = np.median(test.grlivarea[test.grlivarea > 0])
test = test.replace({'grlivarea': {0: m}})
print(test)
# Convert the array predictions to a data frame then merge with the index for the test data
test_predictions = model5.predict(test)
test_predictions[test_predictions < 0] = train['saleprice'].min()
print(test_predictions)
dat = {'p_saleprice': test_predictions}
df1 = test[['index']]
df2 = pd.DataFrame(data=dat)
submission = pd.concat([df1,df2], axis = 1, join_axes=[df1.index])
print(submission)
| mit |
CVML/scikit-learn | examples/linear_model/plot_logistic.py | 312 | 1426 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Logit function
=========================================================
Show in the plot is how the logistic regression would, in this
synthetic dataset, classify values as either 0 or 1,
i.e. class one or two, using the logit-curve.
"""
print(__doc__)
# Code source: Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# this is our test set, it's just a straight line with some
# Gaussian noise
xmin, xmax = -5, 5
n_samples = 100
np.random.seed(0)
X = np.random.normal(size=n_samples)
y = (X > 0).astype(np.float)
X[X > 0] *= 4
X += .3 * np.random.normal(size=n_samples)
X = X[:, np.newaxis]
# run the classifier
clf = linear_model.LogisticRegression(C=1e5)
clf.fit(X, y)
# and plot the result
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.scatter(X.ravel(), y, color='black', zorder=20)
X_test = np.linspace(-5, 10, 300)
def model(x):
return 1 / (1 + np.exp(-x))
loss = model(X_test * clf.coef_ + clf.intercept_).ravel()
plt.plot(X_test, loss, color='blue', linewidth=3)
ols = linear_model.LinearRegression()
ols.fit(X, y)
plt.plot(X_test, ols.coef_ * X_test + ols.intercept_, linewidth=1)
plt.axhline(.5, color='.5')
plt.ylabel('y')
plt.xlabel('X')
plt.xticks(())
plt.yticks(())
plt.ylim(-.25, 1.25)
plt.xlim(-4, 10)
plt.show()
| bsd-3-clause |
jostep/tensorflow | tensorflow/examples/learn/boston.py | 75 | 2549 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of DNNRegressor for Housing dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from sklearn import datasets
from sklearn import metrics
from sklearn import model_selection
from sklearn import preprocessing
import tensorflow as tf
def main(unused_argv):
# Load dataset
boston = datasets.load_boston()
x, y = boston.data, boston.target
# Split dataset into train / test
x_train, x_test, y_train, y_test = model_selection.train_test_split(
x, y, test_size=0.2, random_state=42)
# Scale data (training set) to 0 mean and unit standard deviation.
scaler = preprocessing.StandardScaler()
x_train = scaler.fit_transform(x_train)
# Build 2 layer fully connected DNN with 10, 10 units respectively.
feature_columns = [
tf.feature_column.numeric_column('x', shape=np.array(x_train).shape[1:])]
regressor = tf.estimator.DNNRegressor(
feature_columns=feature_columns, hidden_units=[10, 10])
# Train.
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={'x': x_train}, y=y_train, batch_size=1, num_epochs=None, shuffle=True)
regressor.train(input_fn=train_input_fn, steps=2000)
# Predict.
x_transformed = scaler.transform(x_test)
test_input_fn = tf.estimator.inputs.numpy_input_fn(
x={'x': x_transformed}, y=y_test, num_epochs=1, shuffle=False)
predictions = regressor.predict(input_fn=test_input_fn)
y_predicted = np.array(list(p['predictions'] for p in predictions))
y_predicted = y_predicted.reshape(np.array(y_test).shape)
# Score with sklearn.
score_sklearn = metrics.mean_squared_error(y_predicted, y_test)
print('MSE (sklearn): {0:f}'.format(score_sklearn))
# Score with tensorflow.
scores = regressor.evaluate(input_fn=test_input_fn)
print('MSE (tensorflow): {0:f}'.format(scores['average_loss']))
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
fsschneider/DeepOBS | tests/testproblems/display_vae.py | 1 | 2669 | # -*- coding: utf-8 -*-
"""Script to visualize generated VAE images from DeepOBS."""
import os
import sys
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from deepobs.tensorflow import testproblems
def generate(sess, sampled_z, grid_size=5):
"""Function to generate images using the decoder.
Args:
sess (tf.Session): A TensorFlow session.
sampled_z (tf.Variable): Sampled ``z`` with dimensions ``latent size``
times ``number of examples``.
grid_size (int): Will display grid_size**2 number of generated images.
"""
z = tf.get_default_graph().get_tensor_by_name("encoder/z:0")
dec = tf.get_default_graph().get_tensor_by_name("decoder/decoder_op:0")
imgs = sess.run(dec, feed_dict={z: sampled_z})
imgs = [
np.reshape(imgs[i], [28, 28]) for i in range(grid_size * grid_size)
]
fig = plt.figure()
for i in range(grid_size * grid_size):
axis = fig.add_subplot(grid_size, grid_size, i + 1)
axis.imshow(imgs[i], cmap='gray')
axis.axis("off")
return fig
def display_images(testproblem_cls, grid_size=5, num_epochs=1):
"""Display images from a DeepOBS data set.
Args:
testproblem_cls: The DeepOBS VAE testproblem class.
grid_size (int): Will display grid_size**2 number of generated images.
"""
tf.reset_default_graph()
tf.set_random_seed(42)
np.random.seed(42)
sampled_z = [
np.random.normal(0, 1, 8) for _ in range(grid_size * grid_size)
]
testprob = testproblem_cls(batch_size=grid_size * grid_size)
testprob.set_up()
loss = tf.reduce_mean(testprob.losses) + testprob.regularizer
train_step = tf.train.AdamOptimizer(0.0005).minimize(loss)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
sess.run(testprob.train_init_op)
# Epoch 0
fig = generate(sess, sampled_z, grid_size=grid_size)
fig.suptitle(testproblem_cls.__name__ + " epoch 0")
# fig.tight_layout(pad=0, w_pad=0, h_pad=0)
fig.show()
# Train Loop
for i in range(num_epochs):
while True:
try:
sess.run(train_step)
except tf.errors.OutOfRangeError:
break
fig = generate(sess, sampled_z, grid_size=grid_size)
fig.suptitle(testproblem_cls.__name__ + " epoch " + str(i+1))
# fig.tight_layout(pad=0, w_pad=0, h_pad=0)
fig.show()
if __name__ == "__main__":
display_images(testproblems.mnist_vae)
display_images(testproblems.fmnist_vae)
plt.show()
| mit |
nhejazi/scikit-learn | sklearn/datasets/species_distributions.py | 4 | 8780 | """
=============================
Species distribution dataset
=============================
This dataset represents the geographic distribution of species.
The dataset is provided by Phillips et. al. (2006).
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References
----------
`"Maximum entropy modeling of species geographic distributions"
<http://rob.schapire.net/papers/ecolmod.pdf>`_ S. J. Phillips,
R. P. Anderson, R. E. Schapire - Ecological Modelling, 190:231-259, 2006.
Notes
-----
For an example of using this dataset, see
:ref:`examples/applications/plot_species_distribution_modeling.py
<sphx_glr_auto_examples_applications_plot_species_distribution_modeling.py>`.
"""
# Authors: Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Jake Vanderplas <vanderplas@astro.washington.edu>
#
# License: BSD 3 clause
from io import BytesIO
from os import makedirs, remove
from os.path import exists
import sys
import logging
import numpy as np
from .base import get_data_home
from .base import _fetch_remote
from .base import RemoteFileMetadata
from ..utils import Bunch
from sklearn.datasets.base import _pkl_filepath
from sklearn.externals import joblib
PY3_OR_LATER = sys.version_info[0] >= 3
# The original data can be found at:
# http://biodiversityinformatics.amnh.org/open_source/maxent/samples.zip
SAMPLES = RemoteFileMetadata(
filename='samples.zip',
url='https://ndownloader.figshare.com/files/5976075',
checksum=('abb07ad284ac50d9e6d20f1c4211e0fd'
'3c098f7f85955e89d321ee8efe37ac28'))
# The original data can be found at:
# http://biodiversityinformatics.amnh.org/open_source/maxent/coverages.zip
COVERAGES = RemoteFileMetadata(
filename='coverages.zip',
url='https://ndownloader.figshare.com/files/5976078',
checksum=('4d862674d72e79d6cee77e63b98651ec'
'7926043ba7d39dcb31329cf3f6073807'))
DATA_ARCHIVE_NAME = "species_coverage.pkz"
logger = logging.getLogger(__name__)
def _load_coverage(F, header_length=6, dtype=np.int16):
"""Load a coverage file from an open file object.
This will return a numpy array of the given dtype
"""
header = [F.readline() for i in range(header_length)]
make_tuple = lambda t: (t.split()[0], float(t.split()[1]))
header = dict([make_tuple(line) for line in header])
M = np.loadtxt(F, dtype=dtype)
nodata = int(header[b'NODATA_value'])
if nodata != -9999:
M[nodata] = -9999
return M
def _load_csv(F):
"""Load csv file.
Parameters
----------
F : file object
CSV file open in byte mode.
Returns
-------
rec : np.ndarray
record array representing the data
"""
if PY3_OR_LATER:
# Numpy recarray wants Python 3 str but not bytes...
names = F.readline().decode('ascii').strip().split(',')
else:
# Numpy recarray wants Python 2 str but not unicode
names = F.readline().strip().split(',')
rec = np.loadtxt(F, skiprows=0, delimiter=',', dtype='a22,f4,f4')
rec.dtype.names = names
return rec
def construct_grids(batch):
"""Construct the map grid from the batch object
Parameters
----------
batch : Batch object
The object returned by :func:`fetch_species_distributions`
Returns
-------
(xgrid, ygrid) : 1-D arrays
The grid corresponding to the values in batch.coverages
"""
# x,y coordinates for corner cells
xmin = batch.x_left_lower_corner + batch.grid_size
xmax = xmin + (batch.Nx * batch.grid_size)
ymin = batch.y_left_lower_corner + batch.grid_size
ymax = ymin + (batch.Ny * batch.grid_size)
# x coordinates of the grid cells
xgrid = np.arange(xmin, xmax, batch.grid_size)
# y coordinates of the grid cells
ygrid = np.arange(ymin, ymax, batch.grid_size)
return (xgrid, ygrid)
def fetch_species_distributions(data_home=None,
download_if_missing=True):
"""Loader for species distribution dataset from Phillips et. al. (2006)
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
data_home : optional, default: None
Specify another download and cache folder for the datasets. By default
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
download_if_missing : optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
--------
The data is returned as a Bunch object with the following attributes:
coverages : array, shape = [14, 1592, 1212]
These represent the 14 features measured at each point of the map grid.
The latitude/longitude values for the grid are discussed below.
Missing data is represented by the value -9999.
train : record array, shape = (1623,)
The training points for the data. Each point has three fields:
- train['species'] is the species name
- train['dd long'] is the longitude, in degrees
- train['dd lat'] is the latitude, in degrees
test : record array, shape = (619,)
The test points for the data. Same format as the training data.
Nx, Ny : integers
The number of longitudes (x) and latitudes (y) in the grid
x_left_lower_corner, y_left_lower_corner : floats
The (x,y) position of the lower-left corner, in degrees
grid_size : float
The spacing between points of the grid, in degrees
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://rob.schapire.net/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
Notes
-----
This dataset represents the geographic distribution of species.
The dataset is provided by Phillips et. al. (2006).
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
- For an example of using this dataset with scikit-learn, see
:ref:`examples/applications/plot_species_distribution_modeling.py
<sphx_glr_auto_examples_applications_plot_species_distribution_modeling.py>`.
"""
data_home = get_data_home(data_home)
if not exists(data_home):
makedirs(data_home)
# Define parameters for the data files. These should not be changed
# unless the data model changes. They will be saved in the npz file
# with the downloaded data.
extra_params = dict(x_left_lower_corner=-94.8,
Nx=1212,
y_left_lower_corner=-56.05,
Ny=1592,
grid_size=0.05)
dtype = np.int16
archive_path = _pkl_filepath(data_home, DATA_ARCHIVE_NAME)
if not exists(archive_path):
if not download_if_missing:
raise IOError("Data not found and `download_if_missing` is False")
logger.info('Downloading species data from %s to %s' % (
SAMPLES.url, data_home))
samples_path = _fetch_remote(SAMPLES, dirname=data_home)
X = np.load(samples_path) # samples.zip is a valid npz
remove(samples_path)
for f in X.files:
fhandle = BytesIO(X[f])
if 'train' in f:
train = _load_csv(fhandle)
if 'test' in f:
test = _load_csv(fhandle)
logger.info('Downloading coverage data from %s to %s' % (
COVERAGES.url, data_home))
coverages_path = _fetch_remote(COVERAGES, dirname=data_home)
X = np.load(coverages_path) # coverages.zip is a valid npz
remove(coverages_path)
coverages = []
for f in X.files:
fhandle = BytesIO(X[f])
logger.debug(' - converting {}'.format(f))
coverages.append(_load_coverage(fhandle))
coverages = np.asarray(coverages, dtype=dtype)
bunch = Bunch(coverages=coverages,
test=test,
train=train,
**extra_params)
joblib.dump(bunch, archive_path, compress=9)
else:
bunch = joblib.load(archive_path)
return bunch
| bsd-3-clause |
LennonLab/Micro-Encounter | fig-scripts/OLD-fig-scripts/EncounterFig_Heat.py | 1 | 3846 | from __future__ import division
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import os
import sys
mydir = os.path.expanduser('~/GitHub/Micro-Encounter')
sys.path.append(mydir+'/tools')
mydir2 = os.path.expanduser("~/")
dat = pd.read_csv(mydir + '/results/simulated_data/SimData.csv')
dat = dat.convert_objects(convert_numeric=True).dropna()
#-------------------------DATA FILTERS------------------------------------------
dat = dat[dat['ResourceComplexityLevel'] != 3]
#dat = dat[dat['TrophicComplexityLevel'] != 3]
#dat = dat[dat['SpatialComplexityLevel'] == 3]
#dat = dat[dat['height'] <= 6]
#dat = dat[dat['width'] <= 6]
dat['DormFreq'] = np.log10(dat['MeanDormFreq'])
#dat['DormFreq'] = dat['MeanDormFreq']
dat = dat[np.isfinite(dat['DormFreq'])]
dat = dat[dat['DormFreq'] > -2]
dat['Encounters'] = np.log10(dat['MeanEncounter'])
dat = dat[np.isfinite(dat['Encounters'])]
dat = dat[dat['Encounters'] < 3]
dat['Production'] = np.log10(dat['MeanIndProduction'])
dat = dat[np.isfinite(dat['Production'])]
dat['TotalAbundance'] = np.log10(dat['MeanTotalAbundance'])
dat = dat[np.isfinite(dat['TotalAbundance'])]
dat['ActiveAbundance'] = np.log10(dat['MeanTotalAbundance'] * (1 - dat['MeanDormFreq']))
dat = dat[np.isfinite(dat['ActiveAbundance'])]
#-------------------------------------------------------------------------------
#### plot figure ###############################################################
fs = 8 # fontsize
fig = plt.figure()
gd = 40
mct = 1
bns = 'log'
#### PLOT 1 #################################################################
fig.add_subplot(2, 2, 1)
xlab = 'Average encounters, '+'$log$'+r'$_{10}$'
ylab = '% Dormancy, '+'$log$'+r'$_{10}$'
#ylab = '% Dormancy'
width = 1
x = dat['Encounters'].tolist()
y = dat['DormFreq'].tolist()
plt.hexbin(x, y, mincnt=mct, gridsize = gd, bins=bns, cmap=plt.cm.jet)
plt.ylabel(ylab, fontsize=fs+5)
plt.xlabel(xlab, fontsize=fs+5)
#plt.ylim(-1.0, 0.1)
plt.tick_params(axis='both', which='major', labelsize=fs)
plt.legend(bbox_to_anchor=(-0.04, 1.05, 2.48, .2), loc=10, ncol=3, mode="expand",prop={'size':fs})
#### PLOT 2 ################################
fig.add_subplot(2, 2, 2)
xlab = 'Average encounters, '+'$log$'+r'$_{10}$'
ylab = 'Productivity, '+'$log$'+r'$_{10}$'
x = dat['Encounters'].tolist()
y = dat['Production'].tolist()
plt.hexbin(x, y, mincnt=mct, gridsize = gd, bins=bns, cmap=plt.cm.jet)
plt.ylabel(ylab, fontsize=fs+5)
plt.xlabel(xlab, fontsize=fs+5)
#plt.ylim(-2.0, 2.0)
#plt.xlim(0.1, 300)
plt.tick_params(axis='both', which='major', labelsize=fs)
#### PLOT 3 #################################################################
fig.add_subplot(2, 2, 3)
xlab = 'Average encounters, '+'$log$'+r'$_{10}$'
ylab = 'Total abundance, '+'$log$'+r'$_{10}$'
x = dat['Encounters'].tolist()
y = dat['TotalAbundance'].tolist()
plt.hexbin(x, y, mincnt=mct, gridsize = gd, bins=bns, cmap=plt.cm.jet)
plt.ylabel(ylab, fontsize=fs+5)
plt.xlabel(xlab, fontsize=fs+5)
#plt.xlim(0.15, 300)
#plt.ylim(0.5, 3.1)
plt.tick_params(axis='both', which='major', labelsize=fs)
#### PLOT 4 #################################################################
fig.add_subplot(2, 2, 4)
xlab = 'Average encounters, '+'$log$'+r'$_{10}$'
ylab = 'Active abundance, '+'$log$'+r'$_{10}$'
x = dat['Encounters'].tolist()
y = dat['ActiveAbundance'].tolist()
plt.hexbin(x, y, mincnt=mct, gridsize = gd, bins=bns, cmap=plt.cm.jet)
plt.ylabel(ylab, fontsize=fs+5)
plt.xlabel(xlab, fontsize=fs+5)
#plt.xlim(0.15, 1000)
#plt.ylim(-0.5, 3.1)
plt.tick_params(axis='both', which='major', labelsize=fs)
#### Final Format and Save #####################################################
plt.subplots_adjust(wspace=0.4, hspace=0.4)
plt.savefig(mydir + '/results/figures/EncounterFig_Heat_Spatial_RC2-SC3.png', dpi=600, bbox_inches = "tight")
#plt.show()
#plt.close()
| gpl-3.0 |
vigilv/scikit-learn | examples/gaussian_process/gp_diabetes_dataset.py | 223 | 1976 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
========================================================================
Gaussian Processes regression: goodness-of-fit on the 'diabetes' dataset
========================================================================
In this example, we fit a Gaussian Process model onto the diabetes
dataset.
We determine the correlation parameters with maximum likelihood
estimation (MLE). We use an anisotropic squared exponential
correlation model with a constant regression model. We also use a
nugget of 1e-2 to account for the (strong) noise in the targets.
We compute a cross-validation estimate of the coefficient of
determination (R2) without reperforming MLE, using the set of correlation
parameters found on the whole dataset.
"""
print(__doc__)
# Author: Vincent Dubourg <vincent.dubourg@gmail.com>
# Licence: BSD 3 clause
from sklearn import datasets
from sklearn.gaussian_process import GaussianProcess
from sklearn.cross_validation import cross_val_score, KFold
# Load the dataset from scikit's data sets
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
# Instanciate a GP model
gp = GaussianProcess(regr='constant', corr='absolute_exponential',
theta0=[1e-4] * 10, thetaL=[1e-12] * 10,
thetaU=[1e-2] * 10, nugget=1e-2, optimizer='Welch')
# Fit the GP model to the data performing maximum likelihood estimation
gp.fit(X, y)
# Deactivate maximum likelihood estimation for the cross-validation loop
gp.theta0 = gp.theta_ # Given correlation parameter = MLE
gp.thetaL, gp.thetaU = None, None # None bounds deactivate MLE
# Perform a cross-validation estimate of the coefficient of determination using
# the cross_validation module using all CPUs available on the machine
K = 20 # folds
R2 = cross_val_score(gp, X, y=y, cv=KFold(y.size, K), n_jobs=1).mean()
print("The %d-Folds estimate of the coefficient of determination is R2 = %s"
% (K, R2))
| bsd-3-clause |
bzero/statsmodels | statsmodels/sandbox/infotheo.py | 33 | 16417 | """
Information Theoretic and Entropy Measures
References
----------
Golan, As. 2008. "Information and Entropy Econometrics -- A Review and
Synthesis." Foundations And Trends in Econometrics 2(1-2), 1-145.
Golan, A., Judge, G., and Miller, D. 1996. Maximum Entropy Econometrics.
Wiley & Sons, Chichester.
"""
#For MillerMadow correction
#Miller, G. 1955. Note on the bias of information estimates. Info. Theory
# Psychol. Prob. Methods II-B:95-100.
#For ChaoShen method
#Chao, A., and T.-J. Shen. 2003. Nonparametric estimation of Shannon's index of diversity when
#there are unseen species in sample. Environ. Ecol. Stat. 10:429-443.
#Good, I. J. 1953. The population frequencies of species and the estimation of population parameters.
#Biometrika 40:237-264.
#Horvitz, D.G., and D. J. Thompson. 1952. A generalization of sampling without replacement from a finute universe. J. Am. Stat. Assoc. 47:663-685.
#For NSB method
#Nemenman, I., F. Shafee, and W. Bialek. 2002. Entropy and inference, revisited. In: Dietterich, T.,
#S. Becker, Z. Gharamani, eds. Advances in Neural Information Processing Systems 14: 471-478.
#Cambridge (Massachusetts): MIT Press.
#For shrinkage method
#Dougherty, J., Kohavi, R., and Sahami, M. (1995). Supervised and unsupervised discretization of
#continuous features. In International Conference on Machine Learning.
#Yang, Y. and Webb, G. I. (2003). Discretization for naive-bayes learning: managing discretization
#bias and variance. Technical Report 2003/131 School of Computer Science and Software Engineer-
#ing, Monash University.
from statsmodels.compat.python import range, lzip, lmap
from scipy import stats
import numpy as np
from matplotlib import pyplot as plt
from scipy.misc import logsumexp as sp_logsumexp
#TODO: change these to use maxentutils so that over/underflow is handled
#with the logsumexp.
def logsumexp(a, axis=None):
"""
Compute the log of the sum of exponentials log(e^{a_1}+...e^{a_n}) of a
Avoids numerical overflow.
Parameters
----------
a : array-like
The vector to exponentiate and sum
axis : int, optional
The axis along which to apply the operation. Defaults is None.
Returns
-------
sum(log(exp(a)))
Notes
-----
This function was taken from the mailing list
http://mail.scipy.org/pipermail/scipy-user/2009-October/022931.html
This should be superceded by the ufunc when it is finished.
"""
if axis is None:
# Use the scipy.maxentropy version.
return sp_logsumexp(a)
a = np.asarray(a)
shp = list(a.shape)
shp[axis] = 1
a_max = a.max(axis=axis)
s = np.log(np.exp(a - a_max.reshape(shp)).sum(axis=axis))
lse = a_max + s
return lse
def _isproperdist(X):
"""
Checks to see if `X` is a proper probability distribution
"""
X = np.asarray(X)
if not np.allclose(np.sum(X), 1) or not np.all(X>=0) or not np.all(X<=1):
return False
else:
return True
def discretize(X, method="ef", nbins=None):
"""
Discretize `X`
Parameters
----------
bins : int, optional
Number of bins. Default is floor(sqrt(N))
method : string
"ef" is equal-frequency binning
"ew" is equal-width binning
Examples
--------
"""
nobs = len(X)
if nbins == None:
nbins = np.floor(np.sqrt(nobs))
if method == "ef":
discrete = np.ceil(nbins * stats.rankdata(X)/nobs)
if method == "ew":
width = np.max(X) - np.min(X)
width = np.floor(width/nbins)
svec, ivec = stats.fastsort(X)
discrete = np.zeros(nobs)
binnum = 1
base = svec[0]
discrete[ivec[0]] = binnum
for i in range(1,nobs):
if svec[i] < base + width:
discrete[ivec[i]] = binnum
else:
base = svec[i]
binnum += 1
discrete[ivec[i]] = binnum
return discrete
#TODO: looks okay but needs more robust tests for corner cases
def logbasechange(a,b):
"""
There is a one-to-one transformation of the entropy value from
a log base b to a log base a :
H_{b}(X)=log_{b}(a)[H_{a}(X)]
Returns
-------
log_{b}(a)
"""
return np.log(b)/np.log(a)
def natstobits(X):
"""
Converts from nats to bits
"""
return logbasechange(np.e, 2) * X
def bitstonats(X):
"""
Converts from bits to nats
"""
return logbasechange(2, np.e) * X
#TODO: make this entropy, and then have different measures as
#a method
def shannonentropy(px, logbase=2):
"""
This is Shannon's entropy
Parameters
-----------
logbase, int or np.e
The base of the log
px : 1d or 2d array_like
Can be a discrete probability distribution, a 2d joint distribution,
or a sequence of probabilities.
Returns
-----
For log base 2 (bits) given a discrete distribution
H(p) = sum(px * log2(1/px) = -sum(pk*log2(px)) = E[log2(1/p(X))]
For log base 2 (bits) given a joint distribution
H(px,py) = -sum_{k,j}*w_{kj}log2(w_{kj})
Notes
-----
shannonentropy(0) is defined as 0
"""
#TODO: haven't defined the px,py case?
px = np.asarray(px)
if not np.all(px <= 1) or not np.all(px >= 0):
raise ValueError("px does not define proper distribution")
entropy = -np.sum(np.nan_to_num(px*np.log2(px)))
if logbase != 2:
return logbasechange(2,logbase) * entropy
else:
return entropy
# Shannon's information content
def shannoninfo(px, logbase=2):
"""
Shannon's information
Parameters
----------
px : float or array-like
`px` is a discrete probability distribution
Returns
-------
For logbase = 2
np.log2(px)
"""
px = np.asarray(px)
if not np.all(px <= 1) or not np.all(px >= 0):
raise ValueError("px does not define proper distribution")
if logbase != 2:
return - logbasechange(2,logbase) * np.log2(px)
else:
return - np.log2(px)
def condentropy(px, py, pxpy=None, logbase=2):
"""
Return the conditional entropy of X given Y.
Parameters
----------
px : array-like
py : array-like
pxpy : array-like, optional
If pxpy is None, the distributions are assumed to be independent
and conendtropy(px,py) = shannonentropy(px)
logbase : int or np.e
Returns
-------
sum_{kj}log(q_{j}/w_{kj}
where q_{j} = Y[j]
and w_kj = X[k,j]
"""
if not _isproperdist(px) or not _isproperdist(py):
raise ValueError("px or py is not a proper probability distribution")
if pxpy != None and not _isproperdist(pxpy):
raise ValueError("pxpy is not a proper joint distribtion")
if pxpy == None:
pxpy = np.outer(py,px)
condent = np.sum(pxpy * np.nan_to_num(np.log2(py/pxpy)))
if logbase == 2:
return condent
else:
return logbasechange(2, logbase) * condent
def mutualinfo(px,py,pxpy, logbase=2):
"""
Returns the mutual information between X and Y.
Parameters
----------
px : array-like
Discrete probability distribution of random variable X
py : array-like
Discrete probability distribution of random variable Y
pxpy : 2d array-like
The joint probability distribution of random variables X and Y.
Note that if X and Y are independent then the mutual information
is zero.
logbase : int or np.e, optional
Default is 2 (bits)
Returns
-------
shannonentropy(px) - condentropy(px,py,pxpy)
"""
if not _isproperdist(px) or not _isproperdist(py):
raise ValueError("px or py is not a proper probability distribution")
if pxpy != None and not _isproperdist(pxpy):
raise ValueError("pxpy is not a proper joint distribtion")
if pxpy == None:
pxpy = np.outer(py,px)
return shannonentropy(px, logbase=logbase) - condentropy(px,py,pxpy,
logbase=logbase)
def corrent(px,py,pxpy,logbase=2):
"""
An information theoretic correlation measure.
Reflects linear and nonlinear correlation between two random variables
X and Y, characterized by the discrete probability distributions px and py
respectively.
Parameters
----------
px : array-like
Discrete probability distribution of random variable X
py : array-like
Discrete probability distribution of random variable Y
pxpy : 2d array-like, optional
Joint probability distribution of X and Y. If pxpy is None, X and Y
are assumed to be independent.
logbase : int or np.e, optional
Default is 2 (bits)
Returns
-------
mutualinfo(px,py,pxpy,logbase=logbase)/shannonentropy(py,logbase=logbase)
Notes
-----
This is also equivalent to
corrent(px,py,pxpy) = 1 - condent(px,py,pxpy)/shannonentropy(py)
"""
if not _isproperdist(px) or not _isproperdist(py):
raise ValueError("px or py is not a proper probability distribution")
if pxpy != None and not _isproperdist(pxpy):
raise ValueError("pxpy is not a proper joint distribtion")
if pxpy == None:
pxpy = np.outer(py,px)
return mutualinfo(px,py,pxpy,logbase=logbase)/shannonentropy(py,
logbase=logbase)
def covent(px,py,pxpy,logbase=2):
"""
An information theoretic covariance measure.
Reflects linear and nonlinear correlation between two random variables
X and Y, characterized by the discrete probability distributions px and py
respectively.
Parameters
----------
px : array-like
Discrete probability distribution of random variable X
py : array-like
Discrete probability distribution of random variable Y
pxpy : 2d array-like, optional
Joint probability distribution of X and Y. If pxpy is None, X and Y
are assumed to be independent.
logbase : int or np.e, optional
Default is 2 (bits)
Returns
-------
condent(px,py,pxpy,logbase=logbase) + condent(py,px,pxpy,
logbase=logbase)
Notes
-----
This is also equivalent to
covent(px,py,pxpy) = condent(px,py,pxpy) + condent(py,px,pxpy)
"""
if not _isproperdist(px) or not _isproperdist(py):
raise ValueError("px or py is not a proper probability distribution")
if pxpy != None and not _isproperdist(pxpy):
raise ValueError("pxpy is not a proper joint distribtion")
if pxpy == None:
pxpy = np.outer(py,px)
return condent(px,py,pxpy,logbase=logbase) + condent(py,px,pxpy,
logbase=logbase)
#### Generalized Entropies ####
def renyientropy(px,alpha=1,logbase=2,measure='R'):
"""
Renyi's generalized entropy
Parameters
----------
px : array-like
Discrete probability distribution of random variable X. Note that
px is assumed to be a proper probability distribution.
logbase : int or np.e, optional
Default is 2 (bits)
alpha : float or inf
The order of the entropy. The default is 1, which in the limit
is just Shannon's entropy. 2 is Renyi (Collision) entropy. If
the string "inf" or numpy.inf is specified the min-entropy is returned.
measure : str, optional
The type of entropy measure desired. 'R' returns Renyi entropy
measure. 'T' returns the Tsallis entropy measure.
Returns
-------
1/(1-alpha)*log(sum(px**alpha))
In the limit as alpha -> 1, Shannon's entropy is returned.
In the limit as alpha -> inf, min-entropy is returned.
"""
#TODO:finish returns
#TODO:add checks for measure
if not _isproperdist(px):
raise ValueError("px is not a proper probability distribution")
alpha = float(alpha)
if alpha == 1:
genent = shannonentropy(px)
if logbase != 2:
return logbasechange(2, logbase) * genent
return genent
elif 'inf' in string(alpha).lower() or alpha == np.inf:
return -np.log(np.max(px))
# gets here if alpha != (1 or inf)
px = px**alpha
genent = np.log(px.sum())
if logbase == 2:
return 1/(1-alpha) * genent
else:
return 1/(1-alpha) * logbasechange(2, logbase) * genent
#TODO: before completing this, need to rethink the organization of
# (relative) entropy measures, ie., all put into one function
# and have kwdargs, etc.?
def gencrossentropy(px,py,pxpy,alpha=1,logbase=2, measure='T'):
"""
Generalized cross-entropy measures.
Parameters
----------
px : array-like
Discrete probability distribution of random variable X
py : array-like
Discrete probability distribution of random variable Y
pxpy : 2d array-like, optional
Joint probability distribution of X and Y. If pxpy is None, X and Y
are assumed to be independent.
logbase : int or np.e, optional
Default is 2 (bits)
measure : str, optional
The measure is the type of generalized cross-entropy desired. 'T' is
the cross-entropy version of the Tsallis measure. 'CR' is Cressie-Read
measure.
"""
if __name__ == "__main__":
print("From Golan (2008) \"Information and Entropy Econometrics -- A Review \
and Synthesis")
print("Table 3.1")
# Examples from Golan (2008)
X = [.2,.2,.2,.2,.2]
Y = [.322,.072,.511,.091,.004]
for i in X:
print(shannoninfo(i))
for i in Y:
print(shannoninfo(i))
print(shannonentropy(X))
print(shannonentropy(Y))
p = [1e-5,1e-4,.001,.01,.1,.15,.2,.25,.3,.35,.4,.45,.5]
plt.subplot(111)
plt.ylabel("Information")
plt.xlabel("Probability")
x = np.linspace(0,1,100001)
plt.plot(x, shannoninfo(x))
# plt.show()
plt.subplot(111)
plt.ylabel("Entropy")
plt.xlabel("Probability")
x = np.linspace(0,1,101)
plt.plot(x, lmap(shannonentropy, lzip(x,1-x)))
# plt.show()
# define a joint probability distribution
# from Golan (2008) table 3.3
w = np.array([[0,0,1./3],[1/9.,1/9.,1/9.],[1/18.,1/9.,1/6.]])
# table 3.4
px = w.sum(0)
py = w.sum(1)
H_X = shannonentropy(px)
H_Y = shannonentropy(py)
H_XY = shannonentropy(w)
H_XgivenY = condentropy(px,py,w)
H_YgivenX = condentropy(py,px,w)
# note that cross-entropy is not a distance measure as the following shows
D_YX = logbasechange(2,np.e)*stats.entropy(px, py)
D_XY = logbasechange(2,np.e)*stats.entropy(py, px)
I_XY = mutualinfo(px,py,w)
print("Table 3.3")
print(H_X,H_Y, H_XY, H_XgivenY, H_YgivenX, D_YX, D_XY, I_XY)
print("discretize functions")
X=np.array([21.2,44.5,31.0,19.5,40.6,38.7,11.1,15.8,31.9,25.8,20.2,14.2,
24.0,21.0,11.3,18.0,16.3,22.2,7.8,27.8,16.3,35.1,14.9,17.1,28.2,16.4,
16.5,46.0,9.5,18.8,32.1,26.1,16.1,7.3,21.4,20.0,29.3,14.9,8.3,22.5,
12.8,26.9,25.5,22.9,11.2,20.7,26.2,9.3,10.8,15.6])
discX = discretize(X)
#CF: R's infotheo
#TODO: compare to pyentropy quantize?
print
print("Example in section 3.6 of Golan, using table 3.3")
print("Bounding errors using Fano's inequality")
print("H(P_{e}) + P_{e}log(K-1) >= H(X|Y)")
print("or, a weaker inequality")
print("P_{e} >= [H(X|Y) - 1]/log(K)")
print("P(x) = %s" % px)
print("X = 3 has the highest probability, so this is the estimate Xhat")
pe = 1 - px[2]
print("The probability of error Pe is 1 - p(X=3) = %0.4g" % pe)
H_pe = shannonentropy([pe,1-pe])
print("H(Pe) = %0.4g and K=3" % H_pe)
print("H(Pe) + Pe*log(K-1) = %0.4g >= H(X|Y) = %0.4g" % \
(H_pe+pe*np.log2(2), H_XgivenY))
print("or using the weaker inequality")
print("Pe = %0.4g >= [H(X) - 1]/log(K) = %0.4g" % (pe, (H_X - 1)/np.log2(3)))
print("Consider now, table 3.5, where there is additional information")
print("The conditional probabilities of P(X|Y=y) are ")
w2 = np.array([[0.,0.,1.],[1/3.,1/3.,1/3.],[1/6.,1/3.,1/2.]])
print(w2)
# not a proper distribution?
print("The probability of error given this information is")
print("Pe = [H(X|Y) -1]/log(K) = %0.4g" % ((np.mean([0,shannonentropy(w2[1]),shannonentropy(w2[2])])-1)/np.log2(3)))
print("such that more information lowers the error")
### Stochastic processes
markovchain = np.array([[.553,.284,.163],[.465,.312,.223],[.420,.322,.258]])
| bsd-3-clause |
eg-zhang/scikit-learn | sklearn/cross_decomposition/tests/test_pls.py | 215 | 11427 | import numpy as np
from sklearn.utils.testing import (assert_array_almost_equal,
assert_array_equal, assert_true, assert_raise_message)
from sklearn.datasets import load_linnerud
from sklearn.cross_decomposition import pls_
from nose.tools import assert_equal
def test_pls():
d = load_linnerud()
X = d.data
Y = d.target
# 1) Canonical (symmetric) PLS (PLS 2 blocks canonical mode A)
# ===========================================================
# Compare 2 algo.: nipals vs. svd
# ------------------------------
pls_bynipals = pls_.PLSCanonical(n_components=X.shape[1])
pls_bynipals.fit(X, Y)
pls_bysvd = pls_.PLSCanonical(algorithm="svd", n_components=X.shape[1])
pls_bysvd.fit(X, Y)
# check equalities of loading (up to the sign of the second column)
assert_array_almost_equal(
pls_bynipals.x_loadings_,
np.multiply(pls_bysvd.x_loadings_, np.array([1, -1, 1])), decimal=5,
err_msg="nipals and svd implementation lead to different x loadings")
assert_array_almost_equal(
pls_bynipals.y_loadings_,
np.multiply(pls_bysvd.y_loadings_, np.array([1, -1, 1])), decimal=5,
err_msg="nipals and svd implementation lead to different y loadings")
# Check PLS properties (with n_components=X.shape[1])
# ---------------------------------------------------
plsca = pls_.PLSCanonical(n_components=X.shape[1])
plsca.fit(X, Y)
T = plsca.x_scores_
P = plsca.x_loadings_
Wx = plsca.x_weights_
U = plsca.y_scores_
Q = plsca.y_loadings_
Wy = plsca.y_weights_
def check_ortho(M, err_msg):
K = np.dot(M.T, M)
assert_array_almost_equal(K, np.diag(np.diag(K)), err_msg=err_msg)
# Orthogonality of weights
# ~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(Wx, "x weights are not orthogonal")
check_ortho(Wy, "y weights are not orthogonal")
# Orthogonality of latent scores
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(T, "x scores are not orthogonal")
check_ortho(U, "y scores are not orthogonal")
# Check X = TP' and Y = UQ' (with (p == q) components)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# center scale X, Y
Xc, Yc, x_mean, y_mean, x_std, y_std =\
pls_._center_scale_xy(X.copy(), Y.copy(), scale=True)
assert_array_almost_equal(Xc, np.dot(T, P.T), err_msg="X != TP'")
assert_array_almost_equal(Yc, np.dot(U, Q.T), err_msg="Y != UQ'")
# Check that rotations on training data lead to scores
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Xr = plsca.transform(X)
assert_array_almost_equal(Xr, plsca.x_scores_,
err_msg="rotation on X failed")
Xr, Yr = plsca.transform(X, Y)
assert_array_almost_equal(Xr, plsca.x_scores_,
err_msg="rotation on X failed")
assert_array_almost_equal(Yr, plsca.y_scores_,
err_msg="rotation on Y failed")
# "Non regression test" on canonical PLS
# --------------------------------------
# The results were checked against the R-package plspm
pls_ca = pls_.PLSCanonical(n_components=X.shape[1])
pls_ca.fit(X, Y)
x_weights = np.array(
[[-0.61330704, 0.25616119, -0.74715187],
[-0.74697144, 0.11930791, 0.65406368],
[-0.25668686, -0.95924297, -0.11817271]])
assert_array_almost_equal(pls_ca.x_weights_, x_weights)
x_rotations = np.array(
[[-0.61330704, 0.41591889, -0.62297525],
[-0.74697144, 0.31388326, 0.77368233],
[-0.25668686, -0.89237972, -0.24121788]])
assert_array_almost_equal(pls_ca.x_rotations_, x_rotations)
y_weights = np.array(
[[+0.58989127, 0.7890047, 0.1717553],
[+0.77134053, -0.61351791, 0.16920272],
[-0.23887670, -0.03267062, 0.97050016]])
assert_array_almost_equal(pls_ca.y_weights_, y_weights)
y_rotations = np.array(
[[+0.58989127, 0.7168115, 0.30665872],
[+0.77134053, -0.70791757, 0.19786539],
[-0.23887670, -0.00343595, 0.94162826]])
assert_array_almost_equal(pls_ca.y_rotations_, y_rotations)
# 2) Regression PLS (PLS2): "Non regression test"
# ===============================================
# The results were checked against the R-packages plspm, misOmics and pls
pls_2 = pls_.PLSRegression(n_components=X.shape[1])
pls_2.fit(X, Y)
x_weights = np.array(
[[-0.61330704, -0.00443647, 0.78983213],
[-0.74697144, -0.32172099, -0.58183269],
[-0.25668686, 0.94682413, -0.19399983]])
assert_array_almost_equal(pls_2.x_weights_, x_weights)
x_loadings = np.array(
[[-0.61470416, -0.24574278, 0.78983213],
[-0.65625755, -0.14396183, -0.58183269],
[-0.51733059, 1.00609417, -0.19399983]])
assert_array_almost_equal(pls_2.x_loadings_, x_loadings)
y_weights = np.array(
[[+0.32456184, 0.29892183, 0.20316322],
[+0.42439636, 0.61970543, 0.19320542],
[-0.13143144, -0.26348971, -0.17092916]])
assert_array_almost_equal(pls_2.y_weights_, y_weights)
y_loadings = np.array(
[[+0.32456184, 0.29892183, 0.20316322],
[+0.42439636, 0.61970543, 0.19320542],
[-0.13143144, -0.26348971, -0.17092916]])
assert_array_almost_equal(pls_2.y_loadings_, y_loadings)
# 3) Another non-regression test of Canonical PLS on random dataset
# =================================================================
# The results were checked against the R-package plspm
n = 500
p_noise = 10
q_noise = 5
# 2 latents vars:
np.random.seed(11)
l1 = np.random.normal(size=n)
l2 = np.random.normal(size=n)
latents = np.array([l1, l1, l2, l2]).T
X = latents + np.random.normal(size=4 * n).reshape((n, 4))
Y = latents + np.random.normal(size=4 * n).reshape((n, 4))
X = np.concatenate(
(X, np.random.normal(size=p_noise * n).reshape(n, p_noise)), axis=1)
Y = np.concatenate(
(Y, np.random.normal(size=q_noise * n).reshape(n, q_noise)), axis=1)
np.random.seed(None)
pls_ca = pls_.PLSCanonical(n_components=3)
pls_ca.fit(X, Y)
x_weights = np.array(
[[0.65803719, 0.19197924, 0.21769083],
[0.7009113, 0.13303969, -0.15376699],
[0.13528197, -0.68636408, 0.13856546],
[0.16854574, -0.66788088, -0.12485304],
[-0.03232333, -0.04189855, 0.40690153],
[0.1148816, -0.09643158, 0.1613305],
[0.04792138, -0.02384992, 0.17175319],
[-0.06781, -0.01666137, -0.18556747],
[-0.00266945, -0.00160224, 0.11893098],
[-0.00849528, -0.07706095, 0.1570547],
[-0.00949471, -0.02964127, 0.34657036],
[-0.03572177, 0.0945091, 0.3414855],
[0.05584937, -0.02028961, -0.57682568],
[0.05744254, -0.01482333, -0.17431274]])
assert_array_almost_equal(pls_ca.x_weights_, x_weights)
x_loadings = np.array(
[[0.65649254, 0.1847647, 0.15270699],
[0.67554234, 0.15237508, -0.09182247],
[0.19219925, -0.67750975, 0.08673128],
[0.2133631, -0.67034809, -0.08835483],
[-0.03178912, -0.06668336, 0.43395268],
[0.15684588, -0.13350241, 0.20578984],
[0.03337736, -0.03807306, 0.09871553],
[-0.06199844, 0.01559854, -0.1881785],
[0.00406146, -0.00587025, 0.16413253],
[-0.00374239, -0.05848466, 0.19140336],
[0.00139214, -0.01033161, 0.32239136],
[-0.05292828, 0.0953533, 0.31916881],
[0.04031924, -0.01961045, -0.65174036],
[0.06172484, -0.06597366, -0.1244497]])
assert_array_almost_equal(pls_ca.x_loadings_, x_loadings)
y_weights = np.array(
[[0.66101097, 0.18672553, 0.22826092],
[0.69347861, 0.18463471, -0.23995597],
[0.14462724, -0.66504085, 0.17082434],
[0.22247955, -0.6932605, -0.09832993],
[0.07035859, 0.00714283, 0.67810124],
[0.07765351, -0.0105204, -0.44108074],
[-0.00917056, 0.04322147, 0.10062478],
[-0.01909512, 0.06182718, 0.28830475],
[0.01756709, 0.04797666, 0.32225745]])
assert_array_almost_equal(pls_ca.y_weights_, y_weights)
y_loadings = np.array(
[[0.68568625, 0.1674376, 0.0969508],
[0.68782064, 0.20375837, -0.1164448],
[0.11712173, -0.68046903, 0.12001505],
[0.17860457, -0.6798319, -0.05089681],
[0.06265739, -0.0277703, 0.74729584],
[0.0914178, 0.00403751, -0.5135078],
[-0.02196918, -0.01377169, 0.09564505],
[-0.03288952, 0.09039729, 0.31858973],
[0.04287624, 0.05254676, 0.27836841]])
assert_array_almost_equal(pls_ca.y_loadings_, y_loadings)
# Orthogonality of weights
# ~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(pls_ca.x_weights_, "x weights are not orthogonal")
check_ortho(pls_ca.y_weights_, "y weights are not orthogonal")
# Orthogonality of latent scores
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(pls_ca.x_scores_, "x scores are not orthogonal")
check_ortho(pls_ca.y_scores_, "y scores are not orthogonal")
def test_PLSSVD():
# Let's check the PLSSVD doesn't return all possible component but just
# the specificied number
d = load_linnerud()
X = d.data
Y = d.target
n_components = 2
for clf in [pls_.PLSSVD, pls_.PLSRegression, pls_.PLSCanonical]:
pls = clf(n_components=n_components)
pls.fit(X, Y)
assert_equal(n_components, pls.y_scores_.shape[1])
def test_univariate_pls_regression():
# Ensure 1d Y is correctly interpreted
d = load_linnerud()
X = d.data
Y = d.target
clf = pls_.PLSRegression()
# Compare 1d to column vector
model1 = clf.fit(X, Y[:, 0]).coef_
model2 = clf.fit(X, Y[:, :1]).coef_
assert_array_almost_equal(model1, model2)
def test_predict_transform_copy():
# check that the "copy" keyword works
d = load_linnerud()
X = d.data
Y = d.target
clf = pls_.PLSCanonical()
X_copy = X.copy()
Y_copy = Y.copy()
clf.fit(X, Y)
# check that results are identical with copy
assert_array_almost_equal(clf.predict(X), clf.predict(X.copy(), copy=False))
assert_array_almost_equal(clf.transform(X), clf.transform(X.copy(), copy=False))
# check also if passing Y
assert_array_almost_equal(clf.transform(X, Y),
clf.transform(X.copy(), Y.copy(), copy=False))
# check that copy doesn't destroy
# we do want to check exact equality here
assert_array_equal(X_copy, X)
assert_array_equal(Y_copy, Y)
# also check that mean wasn't zero before (to make sure we didn't touch it)
assert_true(np.all(X.mean(axis=0) != 0))
def test_scale():
d = load_linnerud()
X = d.data
Y = d.target
# causes X[:, -1].std() to be zero
X[:, -1] = 1.0
for clf in [pls_.PLSCanonical(), pls_.PLSRegression(),
pls_.PLSSVD()]:
clf.set_params(scale=True)
clf.fit(X, Y)
def test_pls_errors():
d = load_linnerud()
X = d.data
Y = d.target
for clf in [pls_.PLSCanonical(), pls_.PLSRegression(),
pls_.PLSSVD()]:
clf.n_components = 4
assert_raise_message(ValueError, "Invalid number of components", clf.fit, X, Y)
| bsd-3-clause |
leggitta/mne-python | mne/evoked.py | 2 | 52124 | # Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Matti Hamalainen <msh@nmr.mgh.harvard.edu>
# Denis Engemann <denis.engemann@gmail.com>
# Andrew Dykstra <andrew.r.dykstra@gmail.com>
# Mads Jensen <mje.mads@gmail.com>
#
# License: BSD (3-clause)
from copy import deepcopy
import numpy as np
import warnings
from .baseline import rescale
from .channels.channels import (ContainsMixin, UpdateChannelsMixin,
SetChannelsMixin, InterpolationMixin,
equalize_channels)
from .filter import resample, detrend, FilterMixin
from .fixes import in1d
from .utils import check_fname, logger, verbose, object_hash, _time_mask
from .viz import (plot_evoked, plot_evoked_topomap, plot_evoked_field,
plot_evoked_image, plot_evoked_topo)
from .viz.evoked import _plot_evoked_white
from .externals.six import string_types
from .io.constants import FIFF
from .io.open import fiff_open
from .io.tag import read_tag
from .io.tree import dir_tree_find
from .io.pick import channel_type, pick_types
from .io.meas_info import read_meas_info, write_meas_info
from .io.proj import ProjMixin
from .io.write import (start_file, start_block, end_file, end_block,
write_int, write_string, write_float_matrix,
write_id)
from .io.base import ToDataFrameMixin
_aspect_dict = {'average': FIFF.FIFFV_ASPECT_AVERAGE,
'standard_error': FIFF.FIFFV_ASPECT_STD_ERR}
_aspect_rev = {str(FIFF.FIFFV_ASPECT_AVERAGE): 'average',
str(FIFF.FIFFV_ASPECT_STD_ERR): 'standard_error'}
class Evoked(ProjMixin, ContainsMixin, UpdateChannelsMixin,
SetChannelsMixin, InterpolationMixin, FilterMixin,
ToDataFrameMixin):
"""Evoked data
Parameters
----------
fname : string
Name of evoked/average FIF file to load.
If None no data is loaded.
condition : int, or str
Dataset ID number (int) or comment/name (str). Optional if there is
only one data set in file.
baseline : tuple or list of length 2, or None
The time interval to apply rescaling / baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal ot (None, None) all the time
interval is used. If None, no correction is applied.
proj : bool, optional
Apply SSP projection vectors
kind : str
Either 'average' or 'standard_error'. The type of data to read.
Only used if 'condition' is a str.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Attributes
----------
info : dict
Measurement info.
ch_names : list of string
List of channels' names.
nave : int
Number of averaged epochs.
kind : str
Type of data, either average or standard_error.
first : int
First time sample.
last : int
Last time sample.
comment : string
Comment on dataset. Can be the condition.
times : array
Array of time instants in seconds.
data : array of shape (n_channels, n_times)
Evoked response.
verbose : bool, str, int, or None.
See above.
"""
@verbose
def __init__(self, fname, condition=None, baseline=None, proj=True,
kind='average', verbose=None):
if fname is None:
raise ValueError('No evoked filename specified')
self.verbose = verbose
logger.info('Reading %s ...' % fname)
f, tree, _ = fiff_open(fname)
with f as fid:
if not isinstance(proj, bool):
raise ValueError(r"'proj' must be 'True' or 'False'")
# Read the measurement info
info, meas = read_meas_info(fid, tree)
info['filename'] = fname
# Locate the data of interest
processed = dir_tree_find(meas, FIFF.FIFFB_PROCESSED_DATA)
if len(processed) == 0:
raise ValueError('Could not find processed data')
evoked_node = dir_tree_find(meas, FIFF.FIFFB_EVOKED)
if len(evoked_node) == 0:
raise ValueError('Could not find evoked data')
# find string-based entry
if isinstance(condition, string_types):
if kind not in _aspect_dict.keys():
raise ValueError('kind must be "average" or '
'"standard_error"')
comments, aspect_kinds, t = _get_entries(fid, evoked_node)
goods = np.logical_and(in1d(comments, [condition]),
in1d(aspect_kinds,
[_aspect_dict[kind]]))
found_cond = np.where(goods)[0]
if len(found_cond) != 1:
raise ValueError('condition "%s" (%s) not found, out of '
'found datasets:\n %s'
% (condition, kind, t))
condition = found_cond[0]
if condition >= len(evoked_node) or condition < 0:
fid.close()
raise ValueError('Data set selector out of range')
my_evoked = evoked_node[condition]
# Identify the aspects
aspects = dir_tree_find(my_evoked, FIFF.FIFFB_ASPECT)
if len(aspects) > 1:
logger.info('Multiple aspects found. Taking first one.')
my_aspect = aspects[0]
# Now find the data in the evoked block
nchan = 0
sfreq = -1
chs = []
comment = None
for k in range(my_evoked['nent']):
my_kind = my_evoked['directory'][k].kind
pos = my_evoked['directory'][k].pos
if my_kind == FIFF.FIFF_COMMENT:
tag = read_tag(fid, pos)
comment = tag.data
elif my_kind == FIFF.FIFF_FIRST_SAMPLE:
tag = read_tag(fid, pos)
first = int(tag.data)
elif my_kind == FIFF.FIFF_LAST_SAMPLE:
tag = read_tag(fid, pos)
last = int(tag.data)
elif my_kind == FIFF.FIFF_NCHAN:
tag = read_tag(fid, pos)
nchan = int(tag.data)
elif my_kind == FIFF.FIFF_SFREQ:
tag = read_tag(fid, pos)
sfreq = float(tag.data)
elif my_kind == FIFF.FIFF_CH_INFO:
tag = read_tag(fid, pos)
chs.append(tag.data)
if comment is None:
comment = 'No comment'
# Local channel information?
if nchan > 0:
if chs is None:
raise ValueError('Local channel information was not found '
'when it was expected.')
if len(chs) != nchan:
raise ValueError('Number of channels and number of '
'channel definitions are different')
info['chs'] = chs
info['nchan'] = nchan
logger.info(' Found channel information in evoked data. '
'nchan = %d' % nchan)
if sfreq > 0:
info['sfreq'] = sfreq
nsamp = last - first + 1
logger.info(' Found the data of interest:')
logger.info(' t = %10.2f ... %10.2f ms (%s)'
% (1000 * first / info['sfreq'],
1000 * last / info['sfreq'], comment))
if info['comps'] is not None:
logger.info(' %d CTF compensation matrices available'
% len(info['comps']))
# Read the data in the aspect block
nave = 1
epoch = []
for k in range(my_aspect['nent']):
kind = my_aspect['directory'][k].kind
pos = my_aspect['directory'][k].pos
if kind == FIFF.FIFF_COMMENT:
tag = read_tag(fid, pos)
comment = tag.data
elif kind == FIFF.FIFF_ASPECT_KIND:
tag = read_tag(fid, pos)
aspect_kind = int(tag.data)
elif kind == FIFF.FIFF_NAVE:
tag = read_tag(fid, pos)
nave = int(tag.data)
elif kind == FIFF.FIFF_EPOCH:
tag = read_tag(fid, pos)
epoch.append(tag)
logger.info(' nave = %d - aspect type = %d'
% (nave, aspect_kind))
nepoch = len(epoch)
if nepoch != 1 and nepoch != info['nchan']:
raise ValueError('Number of epoch tags is unreasonable '
'(nepoch = %d nchan = %d)'
% (nepoch, info['nchan']))
if nepoch == 1:
# Only one epoch
all_data = epoch[0].data.astype(np.float)
# May need a transpose if the number of channels is one
if all_data.shape[1] == 1 and info['nchan'] == 1:
all_data = all_data.T.astype(np.float)
else:
# Put the old style epochs together
all_data = np.concatenate([e.data[None, :] for e in epoch],
axis=0).astype(np.float)
if all_data.shape[1] != nsamp:
raise ValueError('Incorrect number of samples (%d instead of '
' %d)' % (all_data.shape[1], nsamp))
# Calibrate
cals = np.array([info['chs'][k]['cal'] *
info['chs'][k].get('scale', 1.0)
for k in range(info['nchan'])])
all_data *= cals[:, np.newaxis]
times = np.arange(first, last + 1, dtype=np.float) / info['sfreq']
self.info = info
# Put the rest together all together
self.nave = nave
self._aspect_kind = aspect_kind
self.kind = _aspect_rev.get(str(self._aspect_kind), 'Unknown')
self.first = first
self.last = last
self.comment = comment
self.times = times
self.data = all_data
# bind info, proj, data to self so apply_proj can be used
self.data = all_data
if proj:
self.apply_proj()
# Run baseline correction
self.data = rescale(self.data, times, baseline, 'mean', copy=False)
def save(self, fname):
"""Save dataset to file.
Parameters
----------
fname : string
Name of the file where to save the data.
"""
write_evokeds(fname, self)
def __repr__(self):
s = "comment : '%s'" % self.comment
s += ", time : [%f, %f]" % (self.times[0], self.times[-1])
s += ", n_epochs : %d" % self.nave
s += ", n_channels x n_times : %s x %s" % self.data.shape
return "<Evoked | %s>" % s
@property
def ch_names(self):
"""Channel names"""
return self.info['ch_names']
def crop(self, tmin=None, tmax=None, copy=False):
"""Crop data to a given time interval
Parameters
----------
tmin : float | None
Start time of selection in seconds.
tmax : float | None
End time of selection in seconds.
copy : bool
If False epochs is cropped in place.
"""
inst = self if not copy else self.copy()
mask = _time_mask(inst.times, tmin, tmax)
inst.times = inst.times[mask]
inst.first = int(inst.times[0] * inst.info['sfreq'])
inst.last = len(inst.times) + inst.first - 1
inst.data = inst.data[:, mask]
return inst
def shift_time(self, tshift, relative=True):
"""Shift time scale in evoked data
Parameters
----------
tshift : float
The amount of time shift to be applied if relative is True
else the first time point. When relative is True, positive value
of tshift moves the data forward while negative tshift moves it
backward.
relative : bool
If true, move the time backwards or forwards by specified amount.
Else, set the starting time point to the value of tshift.
Notes
-----
Maximum accuracy of time shift is 1 / evoked.info['sfreq']
"""
times = self.times
sfreq = self.info['sfreq']
offset = self.first if relative else 0
self.first = int(tshift * sfreq) + offset
self.last = self.first + len(times) - 1
self.times = np.arange(self.first, self.last + 1,
dtype=np.float) / sfreq
def plot(self, picks=None, exclude='bads', unit=True, show=True, ylim=None,
xlim='tight', proj=False, hline=None, units=None, scalings=None,
titles=None, axes=None):
"""Plot evoked data as butterfly plots
Note: If bad channels are not excluded they are shown in red.
Parameters
----------
picks : array-like of int | None
The indices of channels to plot. If None show all.
exclude : list of str | 'bads'
Channels names to exclude from being shown. If 'bads', the
bad channels are excluded.
unit : bool
Scale plot with channel (SI) unit.
show : bool
Call pyplot.show() at the end or not.
ylim : dict
ylim for plots. e.g. ylim = dict(eeg=[-200e-6, 200e-6])
Valid keys are eeg, mag, grad
xlim : 'tight' | tuple | None
xlim for plots.
proj : bool | 'interactive'
If true SSP projections are applied before display. If
'interactive', a check box for reversible selection of SSP
projection vectors will be shown.
hline : list of floats | None
The values at which show an horizontal line.
units : dict | None
The units of the channel types used for axes lables. If None,
defaults to `dict(eeg='uV', grad='fT/cm', mag='fT')`.
scalings : dict | None
The scalings of the channel types to be applied for plotting.
If None, defaults to `dict(eeg=1e6, grad=1e13, mag=1e15)`.
titles : dict | None
The titles associated with the channels. If None, defaults to
`dict(eeg='EEG', grad='Gradiometers', mag='Magnetometers')`.
axes : instance of Axes | list | None
The axes to plot to. If list, the list must be a list of Axes of
the same length as the number of channel types. If instance of
Axes, there must be only one channel type plotted.
"""
return plot_evoked(self, picks=picks, exclude=exclude, unit=unit,
show=show, ylim=ylim, proj=proj, xlim=xlim,
hline=hline, units=units, scalings=scalings,
titles=titles, axes=axes)
def plot_image(self, picks=None, exclude='bads', unit=True, show=True,
clim=None, xlim='tight', proj=False, units=None,
scalings=None, titles=None, axes=None, cmap='RdBu_r'):
"""Plot evoked data as images
Parameters
----------
picks : array-like of int | None
The indices of channels to plot. If None show all.
exclude : list of str | 'bads'
Channels names to exclude from being shown. If 'bads', the
bad channels are excluded.
unit : bool
Scale plot with channel (SI) unit.
show : bool
Call pyplot.show() at the end or not.
clim : dict
clim for images. e.g. clim = dict(eeg=[-200e-6, 200e6])
Valid keys are eeg, mag, grad
xlim : 'tight' | tuple | None
xlim for plots.
proj : bool | 'interactive'
If true SSP projections are applied before display. If
'interactive', a check box for reversible selection of SSP
projection vectors will be shown.
units : dict | None
The units of the channel types used for axes lables. If None,
defaults to `dict(eeg='uV', grad='fT/cm', mag='fT')`.
scalings : dict | None
The scalings of the channel types to be applied for plotting.
If None, defaults to `dict(eeg=1e6, grad=1e13, mag=1e15)`.
titles : dict | None
The titles associated with the channels. If None, defaults to
`dict(eeg='EEG', grad='Gradiometers', mag='Magnetometers')`.
axes : instance of Axes | list | None
The axes to plot to. If list, the list must be a list of Axes of
the same length as the number of channel types. If instance of
Axes, there must be only one channel type plotted.
cmap : matplotlib colormap
Colormap.
"""
return plot_evoked_image(self, picks=picks, exclude=exclude, unit=unit,
show=show, clim=clim, proj=proj, xlim=xlim,
units=units, scalings=scalings,
titles=titles, axes=axes, cmap=cmap)
def plot_topo(self, layout=None, layout_scale=0.945, color=None,
border='none', ylim=None, scalings=None, title=None,
proj=False, vline=[0.0], fig_facecolor='k',
fig_background=None, axis_facecolor='k', font_color='w',
show=True):
"""Plot 2D topography of evoked responses.
Clicking on the plot of an individual sensor opens a new figure showing
the evoked response for the selected sensor.
Parameters
----------
layout : instance of Layout | None
Layout instance specifying sensor positions (does not need to
be specified for Neuromag data). If possible, the correct layout is
inferred from the data.
layout_scale: float
Scaling factor for adjusting the relative size of the layout
on the canvas
color : list of color objects | color object | None
Everything matplotlib accepts to specify colors. If not list-like,
the color specified will be repeated. If None, colors are
automatically drawn.
border : str
matplotlib borders style to be used for each sensor plot.
ylim : dict | None
ylim for plots. The value determines the upper and lower subplot
limits. e.g. ylim = dict(eeg=[-200e-6, 200e6]). Valid keys are eeg,
mag, grad, misc. If None, the ylim parameter for each channel is
determined by the maximum absolute peak.
scalings : dict | None
The scalings of the channel types to be applied for plotting. If
None, defaults to `dict(eeg=1e6, grad=1e13, mag=1e15)`.
title : str
Title of the figure.
proj : bool | 'interactive'
If true SSP projections are applied before display. If
'interactive', a check box for reversible selection of SSP
projection vectors will be shown.
vline : list of floats | None
The values at which to show a vertical line.
fig_facecolor : str | obj
The figure face color. Defaults to black.
fig_background : None | numpy ndarray
A background image for the figure. This must work with a call to
plt.imshow. Defaults to None.
axis_facecolor : str | obj
The face color to be used for each sensor plot. Defaults to black.
font_color : str | obj
The color of text in the colorbar and title. Defaults to white.
show : bool
Show figure if True.
Returns
-------
fig : Instance of matplotlib.figure.Figure
Images of evoked responses at sensor locations
.. versionadded:: 0.10.0
"""
return plot_evoked_topo(self, layout=layout, layout_scale=layout_scale,
color=color, border=border, ylim=ylim,
scalings=scalings, title=title, proj=proj,
vline=vline, fig_facecolor=fig_facecolor,
fig_background=fig_background,
axis_facecolor=axis_facecolor,
font_color=font_color, show=show)
def plot_topomap(self, times=None, ch_type=None, layout=None, vmin=None,
vmax=None, cmap='RdBu_r', sensors=True, colorbar=True,
scale=None, scale_time=1e3, unit=None, res=64, size=1,
cbar_fmt="%3.1f", time_format='%01d ms', proj=False,
show=True, show_names=False, title=None, mask=None,
mask_params=None, outlines='head', contours=6,
image_interp='bilinear', average=None, head_pos=None,
axes=None):
"""Plot topographic maps of specific time points
Parameters
----------
times : float | array of floats | None.
The time point(s) to plot. If None, the number of ``axes``
determines the amount of time point(s). If ``axes`` is also None,
10 topographies will be shown with a regular time spacing between
the first and last time instant.
ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg' | None
The channel type to plot. For 'grad', the gradiometers are collec-
ted in pairs and the RMS for each pair is plotted.
If None, then channels are chosen in the order given above.
layout : None | Layout
Layout instance specifying sensor positions (does not need to
be specified for Neuromag data). If possible, the correct
layout file is inferred from the data; if no appropriate layout
file was found, the layout is automatically generated from the
sensor locations.
vmin : float | callable
The value specfying the lower bound of the color range.
If None, and vmax is None, -vmax is used. Else np.min(data).
If callable, the output equals vmin(data).
vmax : float | callable
The value specfying the upper bound of the color range.
If None, the maximum absolute value is used. If vmin is None,
but vmax is not, defaults to np.max(data).
If callable, the output equals vmax(data).
cmap : matplotlib colormap
Colormap. Defaults to 'RdBu_r'.
sensors : bool | str
Add markers for sensor locations to the plot. Accepts matplotlib
plot format string (e.g., 'r+' for red plusses). If True, a circle
will be used (via .add_artist). Defaults to True.
colorbar : bool
Plot a colorbar.
scale : dict | float | None
Scale the data for plotting. If None, defaults to 1e6 for eeg, 1e13
for grad and 1e15 for mag.
scale_time : float | None
Scale the time labels. Defaults to 1e3 (ms).
unit : dict | str | None
The unit of the channel type used for colorbar label. If
scale is None the unit is automatically determined.
res : int
The resolution of the topomap image (n pixels along each side).
size : scalar
Side length of the topomaps in inches (only applies when plotting
multiple topomaps at a time).
cbar_fmt : str
String format for colorbar values.
time_format : str
String format for topomap values. Defaults to ``"%01d ms"``.
proj : bool | 'interactive'
If true SSP projections are applied before display. If
'interactive', a check box for reversible selection of SSP
projection vectors will be shown.
show : bool
Call pyplot.show() at the end.
show_names : bool | callable
If True, show channel names on top of the map. If a callable is
passed, channel names will be formatted using the callable; e.g.,
to delete the prefix 'MEG ' from all channel names, pass the
function
lambda x: x.replace('MEG ', ''). If `mask` is not None, only
significant sensors will be shown.
title : str | None
Title. If None (default), no title is displayed.
mask : ndarray of bool, shape (n_channels, n_times) | None
The channels to be marked as significant at a given time point.
Indicies set to `True` will be considered. Defaults to None.
mask_params : dict | None
Additional plotting parameters for plotting significant sensors.
Default (None) equals:
``dict(marker='o', markerfacecolor='w', markeredgecolor='k',
linewidth=0, markersize=4)``.
outlines : 'head' | 'skirt' | dict | None
The outlines to be drawn. If 'head', the default head scheme will
be drawn. If 'skirt' the head scheme will be drawn, but sensors are
allowed to be plotted outside of the head circle. If dict, each key
refers to a tuple of x and y positions, the values in 'mask_pos'
will serve as image mask, and the 'autoshrink' (bool) field will
trigger automated shrinking of the positions due to points outside
the outline. Alternatively, a matplotlib patch object can be passed
for advanced masking options, either directly or as a function that
returns patches (required for multi-axis plots). If None, nothing
will be drawn. Defaults to 'head'.
contours : int | False | None
The number of contour lines to draw. If 0, no contours will be
drawn.
image_interp : str
The image interpolation to be used. All matplotlib options are
accepted.
average : float | None
The time window around a given time to be used for averaging
(seconds). For example, 0.01 would translate into window that
starts 5 ms before and ends 5 ms after a given time point.
Defaults to None, which means no averaging.
head_pos : dict | None
If None (default), the sensors are positioned such that they span
the head circle. If dict, can have entries 'center' (tuple) and
'scale' (tuple) for what the center and scale of the head should be
relative to the electrode locations.
axes : instance of Axes | list | None
The axes to plot to. If list, the list must be a list of Axes of
the same length as ``times`` (unless ``times`` is None). If
instance of Axes, ``times`` must be a float or a list of one float.
Defaults to None.
"""
return plot_evoked_topomap(self, times=times, ch_type=ch_type,
layout=layout, vmin=vmin,
vmax=vmax, cmap=cmap, sensors=sensors,
colorbar=colorbar, scale=scale,
scale_time=scale_time,
unit=unit, res=res, proj=proj, size=size,
cbar_fmt=cbar_fmt, time_format=time_format,
show=show, show_names=show_names,
title=title, mask=mask,
mask_params=mask_params,
outlines=outlines, contours=contours,
image_interp=image_interp,
average=average, head_pos=head_pos,
axes=axes)
def plot_field(self, surf_maps, time=None, time_label='t = %0.0f ms',
n_jobs=1):
"""Plot MEG/EEG fields on head surface and helmet in 3D
Parameters
----------
surf_maps : list
The surface mapping information obtained with make_field_map.
time : float | None
The time point at which the field map shall be displayed. If None,
the average peak latency (across sensor types) is used.
time_label : str
How to print info about the time instant visualized.
n_jobs : int
Number of jobs to run in parallel.
Returns
-------
fig : instance of mlab.Figure
The mayavi figure.
"""
return plot_evoked_field(self, surf_maps, time=time,
time_label=time_label, n_jobs=n_jobs)
def plot_white(self, noise_cov, show=True):
"""Plot whitened evoked response
Plots the whitened evoked response and the whitened GFP as described in
[1]_. If one single covariance object is passed, the GFP panel (bottom)
will depict different sensor types. If multiple covariance objects are
passed as a list, the left column will display the whitened evoked
responses for each channel based on the whitener from the noise
covariance that has the highest log-likelihood. The left column will
depict the whitened GFPs based on each estimator separately for each
sensor type. Instead of numbers of channels the GFP display shows the
estimated rank. The rank estimation will be printed by the logger for
each noise covariance estimator that is passed.
Parameters
----------
noise_cov : list | instance of Covariance | str
The noise covariance as computed by ``mne.cov.compute_covariance``.
show : bool
Whether to show the figure or not. Defaults to True.
Returns
-------
fig : instance of matplotlib.figure.Figure
The figure object containing the plot.
References
----------
.. [1] Engemann D. and Gramfort A. (2015) Automated model selection in
covariance estimation and spatial whitening of MEG and EEG
signals, vol. 108, 328-342, NeuroImage.
Notes
-----
.. versionadded:: 0.9.0
"""
return _plot_evoked_white(self, noise_cov=noise_cov, scalings=None,
rank=None, show=show)
def as_type(self, ch_type='grad', mode='fast'):
"""Compute virtual evoked using interpolated fields in mag/grad channels.
.. Warning:: Using virtual evoked to compute inverse can yield
unexpected results. The virtual channels have `'_virtual'` appended
at the end of the names to emphasize that the data contained in
them are interpolated.
Parameters
----------
ch_type : str
The destination channel type. It can be 'mag' or 'grad'.
mode : str
Either `'accurate'` or `'fast'`, determines the quality of the
Legendre polynomial expansion used. `'fast'` should be sufficient
for most applications.
Returns
-------
evoked : instance of mne.Evoked
The transformed evoked object containing only virtual channels.
Notes
-----
.. versionadded:: 0.9.0
"""
from .forward import _as_meg_type_evoked
return _as_meg_type_evoked(self, ch_type=ch_type, mode=mode)
def resample(self, sfreq, npad=100, window='boxcar'):
"""Resample data
This function operates in-place.
Parameters
----------
sfreq : float
New sample rate to use
npad : int
Amount to pad the start and end of the data.
window : string or tuple
Window to use in resampling. See scipy.signal.resample.
"""
o_sfreq = self.info['sfreq']
self.data = resample(self.data, sfreq, o_sfreq, npad, -1, window)
# adjust indirectly affected variables
self.info['sfreq'] = sfreq
self.times = (np.arange(self.data.shape[1], dtype=np.float) / sfreq +
self.times[0])
self.first = int(self.times[0] * self.info['sfreq'])
self.last = len(self.times) + self.first - 1
def detrend(self, order=1, picks=None):
"""Detrend data
This function operates in-place.
Parameters
----------
order : int
Either 0 or 1, the order of the detrending. 0 is a constant
(DC) detrend, 1 is a linear detrend.
picks : array-like of int | None
If None only MEG and EEG channels are detrended.
"""
if picks is None:
picks = pick_types(self.info, meg=True, eeg=True, ref_meg=False,
stim=False, eog=False, ecg=False, emg=False,
exclude='bads')
self.data[picks] = detrend(self.data[picks], order, axis=-1)
def copy(self):
"""Copy the instance of evoked
Returns
-------
evoked : instance of Evoked
"""
evoked = deepcopy(self)
return evoked
def __add__(self, evoked):
"""Add evoked taking into account number of epochs"""
out = combine_evoked([self, evoked])
out.comment = self.comment + " + " + evoked.comment
return out
def __sub__(self, evoked):
"""Add evoked taking into account number of epochs"""
this_evoked = deepcopy(evoked)
this_evoked.data *= -1.
out = combine_evoked([self, this_evoked])
if self.comment is None or this_evoked.comment is None:
warnings.warn('evoked.comment expects a string but is None')
out.comment = 'unknown'
else:
out.comment = self.comment + " - " + this_evoked.comment
return out
def __hash__(self):
return object_hash(dict(info=self.info, data=self.data))
def get_peak(self, ch_type=None, tmin=None, tmax=None, mode='abs',
time_as_index=False):
"""Get location and latency of peak amplitude
Parameters
----------
ch_type : {'mag', 'grad', 'eeg', 'misc', None}
The channel type to use. Defaults to None. If more than one sensor
Type is present in the data the channel type has to be explicitly
set.
tmin : float | None
The minimum point in time to be considered for peak getting.
tmax : float | None
The maximum point in time to be considered for peak getting.
mode : {'pos', 'neg', 'abs'}
How to deal with the sign of the data. If 'pos' only positive
values will be considered. If 'neg' only negative values will
be considered. If 'abs' absolute values will be considered.
Defaults to 'abs'.
time_as_index : bool
Whether to return the time index instead of the latency in seconds.
Returns
-------
ch_name : str
The channel exhibiting the maximum response.
latency : float | int
The time point of the maximum response, either latency in seconds
or index.
"""
supported = ('mag', 'grad', 'eeg', 'misc', 'None')
data_picks = pick_types(self.info, meg=True, eeg=True, ref_meg=False)
types_used = set([channel_type(self.info, idx) for idx in data_picks])
if str(ch_type) not in supported:
raise ValueError('Channel type must be `{supported}`. You gave me '
'`{ch_type}` instead.'
.format(ch_type=ch_type,
supported='` or `'.join(supported)))
elif ch_type is not None and ch_type not in types_used:
raise ValueError('Channel type `{ch_type}` not found in this '
'evoked object.'.format(ch_type=ch_type))
elif len(types_used) > 1 and ch_type is None:
raise RuntimeError('More than one sensor type found. `ch_type` '
'must not be `None`, pass a sensor type '
'value instead')
meg, eeg, misc, picks = False, False, False, None
if ch_type == 'mag':
meg = ch_type
elif ch_type == 'grad':
meg = ch_type
elif ch_type == 'eeg':
eeg = True
elif ch_type == 'misc':
misc = True
if ch_type is not None:
picks = pick_types(self.info, meg=meg, eeg=eeg, misc=misc,
ref_meg=False)
data = self.data if picks is None else self.data[picks]
ch_idx, time_idx = _get_peak(data, self.times, tmin,
tmax, mode)
return (self.ch_names[ch_idx],
time_idx if time_as_index else self.times[time_idx])
class EvokedArray(Evoked):
"""Evoked object from numpy array
Parameters
----------
data : array of shape (n_channels, n_times)
The channels' evoked response.
info : instance of Info
Info dictionary. Consider using ``create_info`` to populate
this structure.
tmin : float
Start time before event.
comment : string
Comment on dataset. Can be the condition. Defaults to ''.
nave : int
Number of averaged epochs. Defaults to 1.
kind : str
Type of data, either average or standard_error. Defaults to 'average'.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Defaults to raw.verbose.
See Also
--------
EpochsArray, io.RawArray, create_info
"""
@verbose
def __init__(self, data, info, tmin, comment='', nave=1, kind='average',
verbose=None):
dtype = np.complex128 if np.any(np.iscomplex(data)) else np.float64
data = np.asanyarray(data, dtype=dtype)
if data.ndim != 2:
raise ValueError('Data must be a 2D array of shape (n_channels, '
'n_samples)')
if len(info['ch_names']) != np.shape(data)[0]:
raise ValueError('Info (%s) and data (%s) must have same number '
'of channels.' % (len(info['ch_names']),
np.shape(data)[0]))
self.data = data
# XXX: this should use round and be tested
self.first = int(tmin * info['sfreq'])
self.last = self.first + np.shape(data)[-1] - 1
self.times = np.arange(self.first, self.last + 1,
dtype=np.float) / info['sfreq']
self.info = info
self.nave = nave
self.kind = kind
self.comment = comment
self.picks = None
self.verbose = verbose
self._projector = None
if self.kind == 'average':
self._aspect_kind = _aspect_dict['average']
else:
self._aspect_kind = _aspect_dict['standard_error']
def _get_entries(fid, evoked_node):
"""Helper to get all evoked entries"""
comments = list()
aspect_kinds = list()
for ev in evoked_node:
for k in range(ev['nent']):
my_kind = ev['directory'][k].kind
pos = ev['directory'][k].pos
if my_kind == FIFF.FIFF_COMMENT:
tag = read_tag(fid, pos)
comments.append(tag.data)
my_aspect = dir_tree_find(ev, FIFF.FIFFB_ASPECT)[0]
for k in range(my_aspect['nent']):
my_kind = my_aspect['directory'][k].kind
pos = my_aspect['directory'][k].pos
if my_kind == FIFF.FIFF_ASPECT_KIND:
tag = read_tag(fid, pos)
aspect_kinds.append(int(tag.data))
comments = np.atleast_1d(comments)
aspect_kinds = np.atleast_1d(aspect_kinds)
if len(comments) != len(aspect_kinds) or len(comments) == 0:
fid.close()
raise ValueError('Dataset names in FIF file '
'could not be found.')
t = [_aspect_rev.get(str(a), 'Unknown') for a in aspect_kinds]
t = ['"' + c + '" (' + tt + ')' for tt, c in zip(t, comments)]
t = ' ' + '\n '.join(t)
return comments, aspect_kinds, t
def _get_evoked_node(fname):
"""Helper to get info in evoked file"""
f, tree, _ = fiff_open(fname)
with f as fid:
_, meas = read_meas_info(fid, tree)
evoked_node = dir_tree_find(meas, FIFF.FIFFB_EVOKED)
return evoked_node
def grand_average(all_evoked, interpolate_bads=True):
"""Make grand average of a list evoked data
The function interpolates bad channels based on `interpolate_bads`
parameter. If `interpolate_bads` is True, the grand average
file will contain good channels and the bad channels interpolated
from the good MEG/EEG channels.
The grand_average.nave attribute will be equal the number
of evoked datasets used to calculate the grand average.
Note: Grand average evoked shall not be used for source localization.
Parameters
----------
all_evoked : list of Evoked data
The evoked datasets.
interpolate_bads : bool
If True, bad MEG and EEG channels are interpolated.
Returns
-------
grand_average : Evoked
The grand average data.
Notes
-----
.. versionadded:: 0.9.0
"""
# check if all elements in the given list are evoked data
if not all(isinstance(e, Evoked) for e in all_evoked):
raise ValueError("Not all the elements in list are evoked data")
# Copy channels to leave the original evoked datasets intact.
all_evoked = [e.copy() for e in all_evoked]
# Interpolates if necessary
if interpolate_bads:
all_evoked = [e.interpolate_bads() if len(e.info['bads']) > 0
else e for e in all_evoked]
equalize_channels(all_evoked) # apply equalize_channels
# make grand_average object using combine_evoked
grand_average = combine_evoked(all_evoked, weights='equal')
# change the grand_average.nave to the number of Evokeds
grand_average.nave = len(all_evoked)
# change comment field
grand_average.comment = "Grand average (n = %d)" % grand_average.nave
return grand_average
def combine_evoked(all_evoked, weights='nave'):
"""Merge evoked data by weighted addition
Data should have the same channels and the same time instants.
Subtraction can be performed by passing negative weights (e.g., [1, -1]).
Parameters
----------
all_evoked : list of Evoked
The evoked datasets.
weights : list of float | str
The weights to apply to the data of each evoked instance.
Can also be ``'nave'`` to weight according to evoked.nave,
or ``"equal"`` to use equal weighting (each weighted as ``1/N``).
Returns
-------
evoked : Evoked
The new evoked data.
Notes
-----
.. versionadded:: 0.9.0
"""
evoked = all_evoked[0].copy()
if isinstance(weights, string_types):
if weights not in ('nave', 'equal'):
raise ValueError('weights must be a list of float, or "nave" or '
'"equal"')
if weights == 'nave':
weights = np.array([e.nave for e in all_evoked], float)
weights /= weights.sum()
else: # == 'equal'
weights = [1. / len(all_evoked)] * len(all_evoked)
weights = np.array(weights, float)
if weights.ndim != 1 or weights.size != len(all_evoked):
raise ValueError('weights must be the same size as all_evoked')
ch_names = evoked.ch_names
for e in all_evoked[1:]:
assert e.ch_names == ch_names, ValueError("%s and %s do not contain "
"the same channels"
% (evoked, e))
assert np.max(np.abs(e.times - evoked.times)) < 1e-7, \
ValueError("%s and %s do not contain the same time instants"
% (evoked, e))
# use union of bad channels
bads = list(set(evoked.info['bads']).union(*(ev.info['bads']
for ev in all_evoked[1:])))
evoked.info['bads'] = bads
evoked.data = sum(w * e.data for w, e in zip(weights, all_evoked))
evoked.nave = max(int(1. / sum(w ** 2 / e.nave
for w, e in zip(weights, all_evoked))), 1)
return evoked
@verbose
def read_evokeds(fname, condition=None, baseline=None, kind='average',
proj=True, verbose=None):
"""Read evoked dataset(s)
Parameters
----------
fname : string
The file name, which should end with -ave.fif or -ave.fif.gz.
condition : int or str | list of int or str | None
The index or list of indices of the evoked dataset to read. FIF files
can contain multiple datasets. If None, all datasets are returned as a
list.
baseline : None (default) or tuple of length 2
The time interval to apply baseline correction. If None do not apply
it. If baseline is (a, b) the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used and if b is None then
b is set to the end of the interval. If baseline is equal to
(None, None) all the time interval is used.
kind : str
Either 'average' or 'standard_error', the type of data to read.
proj : bool
If False, available projectors won't be applied to the data.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
evoked : Evoked (if condition is int or str) or list of Evoked (if
condition is None or list)
The evoked dataset(s).
See Also
--------
write_evokeds
"""
check_fname(fname, 'evoked', ('-ave.fif', '-ave.fif.gz'))
return_list = True
if condition is None:
evoked_node = _get_evoked_node(fname)
condition = range(len(evoked_node))
elif not isinstance(condition, list):
condition = [condition]
return_list = False
out = [Evoked(fname, c, baseline=baseline, kind=kind, proj=proj,
verbose=verbose) for c in condition]
return out if return_list else out[0]
def write_evokeds(fname, evoked):
"""Write an evoked dataset to a file
Parameters
----------
fname : string
The file name, which should end with -ave.fif or -ave.fif.gz.
evoked : Evoked instance, or list of Evoked instances
The evoked dataset, or list of evoked datasets, to save in one file.
Note that the measurement info from the first evoked instance is used,
so be sure that information matches.
See Also
--------
read_evokeds
"""
check_fname(fname, 'evoked', ('-ave.fif', '-ave.fif.gz'))
if not isinstance(evoked, list):
evoked = [evoked]
# Create the file and save the essentials
with start_file(fname) as fid:
start_block(fid, FIFF.FIFFB_MEAS)
write_id(fid, FIFF.FIFF_BLOCK_ID)
if evoked[0].info['meas_id'] is not None:
write_id(fid, FIFF.FIFF_PARENT_BLOCK_ID, evoked[0].info['meas_id'])
# Write measurement info
write_meas_info(fid, evoked[0].info)
# One or more evoked data sets
start_block(fid, FIFF.FIFFB_PROCESSED_DATA)
for e in evoked:
start_block(fid, FIFF.FIFFB_EVOKED)
# Comment is optional
if e.comment is not None and len(e.comment) > 0:
write_string(fid, FIFF.FIFF_COMMENT, e.comment)
# First and last sample
write_int(fid, FIFF.FIFF_FIRST_SAMPLE, e.first)
write_int(fid, FIFF.FIFF_LAST_SAMPLE, e.last)
# The epoch itself
start_block(fid, FIFF.FIFFB_ASPECT)
write_int(fid, FIFF.FIFF_ASPECT_KIND, e._aspect_kind)
write_int(fid, FIFF.FIFF_NAVE, e.nave)
decal = np.zeros((e.info['nchan'], 1))
for k in range(e.info['nchan']):
decal[k] = 1.0 / (e.info['chs'][k]['cal'] *
e.info['chs'][k].get('scale', 1.0))
write_float_matrix(fid, FIFF.FIFF_EPOCH, decal * e.data)
end_block(fid, FIFF.FIFFB_ASPECT)
end_block(fid, FIFF.FIFFB_EVOKED)
end_block(fid, FIFF.FIFFB_PROCESSED_DATA)
end_block(fid, FIFF.FIFFB_MEAS)
end_file(fid)
def _get_peak(data, times, tmin=None, tmax=None, mode='abs'):
"""Get feature-index and time of maximum signal from 2D array
Note. This is a 'getter', not a 'finder'. For non-evoked type
data and continuous signals, please use proper peak detection algorithms.
Parameters
----------
data : instance of numpy.ndarray (n_locations, n_times)
The data, either evoked in sensor or source space.
times : instance of numpy.ndarray (n_times)
The times in seconds.
tmin : float | None
The minimum point in time to be considered for peak getting.
tmax : float | None
The maximum point in time to be considered for peak getting.
mode : {'pos', 'neg', 'abs'}
How to deal with the sign of the data. If 'pos' only positive
values will be considered. If 'neg' only negative values will
be considered. If 'abs' absolute values will be considered.
Defaults to 'abs'.
Returns
-------
max_loc : int
The index of the feature with the maximum value.
max_time : int
The time point of the maximum response, index.
"""
modes = ('abs', 'neg', 'pos')
if mode not in modes:
raise ValueError('The `mode` parameter must be `{modes}`. You gave '
'me `{mode}`'.format(modes='` or `'.join(modes),
mode=mode))
if tmin is None:
tmin = times[0]
if tmax is None:
tmax = times[-1]
if tmin < times.min():
raise ValueError('The tmin value is out of bounds. It must be '
'within {0} and {1}'.format(times.min(), times.max()))
if tmax > times.max():
raise ValueError('The tmin value is out of bounds. It must be '
'within {0} and {1}'.format(times.min(), times.max()))
if tmin >= tmax:
raise ValueError('The tmin must be smaller than tmax')
time_win = (times >= tmin) & (times <= tmax)
mask = np.ones_like(data).astype(np.bool)
mask[:, time_win] = False
maxfun = np.argmax
if mode == 'pos':
if not np.any(data > 0):
raise ValueError('No positive values encountered. Cannot '
'operate in pos mode.')
elif mode == 'neg':
if not np.any(data < 0):
raise ValueError('No negative values encountered. Cannot '
'operate in neg mode.')
maxfun = np.argmin
masked_index = np.ma.array(np.abs(data) if mode == 'abs' else data,
mask=mask)
max_loc, max_time = np.unravel_index(maxfun(masked_index), data.shape)
return max_loc, max_time
| bsd-3-clause |
ARudiuk/mne-python | tutorials/plot_mne_dspm_source_localization.py | 3 | 4738 | """
.. _tut_inverse_mne_dspm:
Source localization with MNE/dSPM/sLORETA
=========================================
The aim of this tutorials is to teach you how to compute and apply a linear
inverse method such as MNE/dSPM/sLORETA on evoked/raw/epochs data.
"""
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne.datasets import sample
from mne.minimum_norm import (make_inverse_operator, apply_inverse,
write_inverse_operator)
###############################################################################
# Process MEG data
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
raw = mne.io.read_raw_fif(raw_fname)
events = mne.find_events(raw, stim_channel='STI 014')
event_id = dict(aud_r=1) # event trigger and conditions
tmin = -0.2 # start of each epoch (200ms before the trigger)
tmax = 0.5 # end of each epoch (500ms after the trigger)
raw.info['bads'] = ['MEG 2443', 'EEG 053']
picks = mne.pick_types(raw.info, meg=True, eeg=False, eog=True,
exclude='bads')
baseline = (None, 0) # means from the first instant to t = 0
reject = dict(grad=4000e-13, mag=4e-12, eog=150e-6)
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
picks=picks, baseline=baseline, reject=reject)
###############################################################################
# Compute regularized noise covariance
# ------------------------------------
#
# For more details see :ref:`tut_compute_covariance`.
noise_cov = mne.compute_covariance(
epochs, tmax=0., method=['shrunk', 'empirical'])
fig_cov, fig_spectra = mne.viz.plot_cov(noise_cov, raw.info)
###############################################################################
# Compute the evoked response
# ---------------------------
evoked = epochs.average()
evoked.plot()
evoked.plot_topomap(times=np.linspace(0.05, 0.15, 5), ch_type='mag')
# Show whitening
evoked.plot_white(noise_cov)
###############################################################################
# Inverse modeling: MNE/dSPM on evoked and raw data
# -------------------------------------------------
# Read the forward solution and compute the inverse operator
fname_fwd = data_path + '/MEG/sample/sample_audvis-meg-oct-6-fwd.fif'
fwd = mne.read_forward_solution(fname_fwd, surf_ori=True)
# Restrict forward solution as necessary for MEG
fwd = mne.pick_types_forward(fwd, meg=True, eeg=False)
# make an MEG inverse operator
info = evoked.info
inverse_operator = make_inverse_operator(info, fwd, noise_cov,
loose=0.2, depth=0.8)
write_inverse_operator('sample_audvis-meg-oct-6-inv.fif',
inverse_operator)
###############################################################################
# Compute inverse solution
# ------------------------
method = "dSPM"
snr = 3.
lambda2 = 1. / snr ** 2
stc = apply_inverse(evoked, inverse_operator, lambda2,
method=method, pick_ori=None)
del fwd, inverse_operator, epochs # to save memory
###############################################################################
# Visualization
# -------------
# View activation time-series
plt.plot(1e3 * stc.times, stc.data[::100, :].T)
plt.xlabel('time (ms)')
plt.ylabel('%s value' % method)
plt.show()
###############################################################################
# Here we use peak getter to move visualization to the time point of the peak
# and draw a marker at the maximum peak vertex.
vertno_max, time_idx = stc.get_peak(hemi='rh', time_as_index=True)
subjects_dir = data_path + '/subjects'
brain = stc.plot(surface='inflated', hemi='rh', subjects_dir=subjects_dir)
brain.set_data_time_index(time_idx)
brain.add_foci(vertno_max, coords_as_verts=True, hemi='rh', color='blue',
scale_factor=0.6)
brain.scale_data_colormap(fmin=8, fmid=12, fmax=15, transparent=True)
brain.show_view('lateral')
###############################################################################
# Morph data to average brain
# ---------------------------
stc_fsaverage = stc.morph(subject_to='fsaverage', subjects_dir=subjects_dir)
brain_fsaverage = stc_fsaverage.plot(surface='inflated', hemi='rh',
subjects_dir=subjects_dir)
brain_fsaverage.set_data_time_index(time_idx)
brain_fsaverage.scale_data_colormap(fmin=8, fmid=12, fmax=15, transparent=True)
brain_fsaverage.show_view('lateral')
###############################################################################
# Exercise
# --------
# - By changing the method parameter to 'sloreta' recompute the source
# estimates using the sLORETA method.
| bsd-3-clause |
matternet/ardupilot | Tools/mavproxy_modules/lib/magcal_graph_ui.py | 108 | 8248 | # Copyright (C) 2016 Intel Corporation. All rights reserved.
#
# This file is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This file is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
import matplotlib.pyplot as plt
from matplotlib.backends.backend_wxagg import FigureCanvas
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
from pymavlink.mavutil import mavlink
from MAVProxy.modules.lib import wx_processguard
from MAVProxy.modules.lib.wx_loader import wx
import geodesic_grid as grid
class MagcalPanel(wx.Panel):
_status_markup_strings = {
mavlink.MAG_CAL_NOT_STARTED: 'Not started',
mavlink.MAG_CAL_WAITING_TO_START: 'Waiting to start',
mavlink.MAG_CAL_RUNNING_STEP_ONE: 'Step one',
mavlink.MAG_CAL_RUNNING_STEP_TWO: 'Step two',
mavlink.MAG_CAL_SUCCESS: '<span color="blue">Success</span>',
mavlink.MAG_CAL_FAILED: '<span color="red">Failed</span>',
}
_empty_color = '#7ea6ce'
_filled_color = '#4680b9'
def __init__(self, *k, **kw):
super(MagcalPanel, self).__init__(*k, **kw)
facecolor = self.GetBackgroundColour().GetAsString(wx.C2S_HTML_SYNTAX)
fig = plt.figure(facecolor=facecolor, figsize=(1,1))
self._canvas = FigureCanvas(self, wx.ID_ANY, fig)
self._canvas.SetMinSize((300,300))
self._id_text = wx.StaticText(self, wx.ID_ANY)
self._status_text = wx.StaticText(self, wx.ID_ANY)
self._completion_pct_text = wx.StaticText(self, wx.ID_ANY)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(self._id_text)
sizer.Add(self._status_text)
sizer.Add(self._completion_pct_text)
sizer.Add(self._canvas, proportion=1, flag=wx.EXPAND)
self.SetSizer(sizer)
ax = fig.add_subplot(111, axis_bgcolor=facecolor, projection='3d')
self.configure_plot(ax)
def configure_plot(self, ax):
extra = .5
lim = grid.radius + extra
ax.set_xlim3d(-lim, lim)
ax.set_ylim3d(-lim, lim)
ax.set_zlim3d(-lim, lim)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
ax.invert_zaxis()
ax.invert_xaxis()
ax.set_aspect('equal')
self._polygons_collection = Poly3DCollection(
grid.sections_triangles,
edgecolors='#386694',
)
ax.add_collection3d(self._polygons_collection)
def update_status_from_mavlink(self, m):
status_string = self._status_markup_strings.get(m.cal_status, '???')
self._status_text.SetLabelMarkup(
'<b>Status:</b> %s' % status_string,
)
def mavlink_magcal_report(self, m):
self.update_status_from_mavlink(m)
self._completion_pct_text.SetLabel('')
def mavlink_magcal_progress(self, m):
facecolors = []
for i, mask in enumerate(m.completion_mask):
for j in range(8):
section = i * 8 + j
if mask & 1 << j:
facecolor = self._filled_color
else:
facecolor = self._empty_color
facecolors.append(facecolor)
self._polygons_collection.set_facecolors(facecolors)
self._canvas.draw()
self._id_text.SetLabelMarkup(
'<b>Compass id:</b> %d' % m.compass_id
)
self._completion_pct_text.SetLabelMarkup(
'<b>Completion:</b> %d%%' % m.completion_pct
)
self.update_status_from_mavlink(m)
_legend_panel = None
@staticmethod
def legend_panel(*k, **kw):
if MagcalPanel._legend_panel:
return MagcalPanel._legend_panel
p = MagcalPanel._legend_panel = wx.Panel(*k, **kw)
sizer = wx.BoxSizer(wx.HORIZONTAL)
p.SetSizer(sizer)
marker = wx.Panel(p, wx.ID_ANY, size=(10, 10))
marker.SetBackgroundColour(MagcalPanel._empty_color)
sizer.Add(marker, flag=wx.ALIGN_CENTER)
text = wx.StaticText(p, wx.ID_ANY)
text.SetLabel('Sections not hit')
sizer.Add(text, border=4, flag=wx.ALIGN_CENTER | wx.LEFT)
marker = wx.Panel(p, wx.ID_ANY, size=(10, 10))
marker.SetBackgroundColour(MagcalPanel._filled_color)
sizer.Add(marker, border=10, flag=wx.ALIGN_CENTER | wx.LEFT)
text = wx.StaticText(p, wx.ID_ANY)
text.SetLabel('Sections hit')
sizer.Add(text, border=4, flag=wx.ALIGN_CENTER | wx.LEFT)
return p
class MagcalFrame(wx.Frame):
def __init__(self, conn):
super(MagcalFrame, self).__init__(
None,
wx.ID_ANY,
title='Magcal Graph',
)
self.SetMinSize((300, 300))
self._conn = conn
self._main_panel = wx.ScrolledWindow(self, wx.ID_ANY)
self._main_panel.SetScrollbars(1, 1, 1, 1)
self._magcal_panels = {}
self._sizer = wx.BoxSizer(wx.VERTICAL)
self._main_panel.SetSizer(self._sizer)
idle_text = wx.StaticText(self._main_panel, wx.ID_ANY)
idle_text.SetLabelMarkup('<i>No calibration messages received yet...</i>')
idle_text.SetForegroundColour('#444444')
self._sizer.AddStretchSpacer()
self._sizer.Add(
idle_text,
proportion=0,
flag=wx.ALIGN_CENTER | wx.ALL,
border=10,
)
self._sizer.AddStretchSpacer()
self._timer = wx.Timer(self)
self.Bind(wx.EVT_TIMER, self.timer_callback, self._timer)
self._timer.Start(200)
def add_compass(self, id):
if not self._magcal_panels:
self._sizer.Clear(deleteWindows=True)
self._magcal_panels_sizer = wx.BoxSizer(wx.HORIZONTAL)
self._sizer.Add(
self._magcal_panels_sizer,
proportion=1,
flag=wx.EXPAND,
)
legend = MagcalPanel.legend_panel(self._main_panel, wx.ID_ANY)
self._sizer.Add(
legend,
proportion=0,
flag=wx.ALIGN_CENTER,
)
self._magcal_panels[id] = MagcalPanel(self._main_panel, wx.ID_ANY)
self._magcal_panels_sizer.Add(
self._magcal_panels[id],
proportion=1,
border=10,
flag=wx.EXPAND | wx.ALL,
)
def timer_callback(self, evt):
close_requested = False
mavlink_msgs = {}
while self._conn.poll():
m = self._conn.recv()
if isinstance(m, str) and m == 'close':
close_requested = True
continue
if m.compass_id not in mavlink_msgs:
# Keep the last two messages so that we get the last progress
# if the last message is the calibration report.
mavlink_msgs[m.compass_id] = [None, m]
else:
l = mavlink_msgs[m.compass_id]
l[0] = l[1]
l[1] = m
if close_requested:
self._timer.Stop()
self.Destroy()
return
if not mavlink_msgs:
return
needs_fit = False
for k in mavlink_msgs:
if k not in self._magcal_panels:
self.add_compass(k)
needs_fit = True
if needs_fit:
self._sizer.Fit(self)
for k, l in mavlink_msgs.items():
for m in l:
if not m:
continue
panel = self._magcal_panels[k]
if m.get_type() == 'MAG_CAL_PROGRESS':
panel.mavlink_magcal_progress(m)
elif m.get_type() == 'MAG_CAL_REPORT':
panel.mavlink_magcal_report(m)
| gpl-3.0 |
jaeddy/bripipetools | scripts/plot_gene_coverage.py | 1 | 7695 | import logging
logger = logging.getLogger(__name__)
import os
import sys
import zipfile
import math
import re
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
plt.switch_backend('agg')
import lxml.html as lh
from datetime import datetime as dt
def read_rnaseq_metrics(path):
try:
logger.debug("unzipping contents for {}".format(path))
zfile = zipfile.ZipFile(path)
try:
metrics_file = zfile.open('CollectRnaSeqMetrics.metrics.txt')
except KeyError:
metrics_file = zfile.open('RNA_Seq_Metrics_html.html')
return metrics_file.readlines()
except:
logger.warn("not a zip file; reading lines directly")
with open(path) as f:
return f.readlines()
def get_norm_cov(rnaseq_metrics_lines):
logger.debug("parsing normalized coverage histogram")
try:
logger.debug("attempting to parse histogram from "
"expected location (lines 11-112)")
cov_hist_lines = rnaseq_metrics_lines[11:112]
norm_cov = [float(line.rstrip('\n').split('\t')[-1])
for line in cov_hist_lines]
except ValueError:
try:
logger.warn("parsing failed; attempting to parse histogram from "
"alternative location (lines 31-132)")
cov_hist_lines = rnaseq_metrics_lines[31:132]
norm_cov = [float(re.search('[0-9]*\t[0-9]+(\.[0-9]+)*', line)
.group()
.split('\t')[-1])
for line in cov_hist_lines]
# THIS IS A HACK-Y WORKAROUND. NEED TO PARSE TABLE BETTER
except AttributeError:
try:
logger.warn("parsing failed; attempting to parse histogram from "
"alternative location (lines 30-131)")
cov_hist_lines = rnaseq_metrics_lines[30:131]
norm_cov = [float(re.search('[0-9]*\t[0-9]+(\.[0-9]+)*', line)
.group()
.split('\t')[-1])
for line in cov_hist_lines]
except AttributeError:
logger.warn("no coverage histogram found, returning empty list")
norm_cov = []
return norm_cov
# def scrape_norm_cov_table(path):
# lh.parse(path)
def build_norm_cov_df(metrics_path):
rnaseq_metrics_files = [os.path.join(metrics_path, f)
for f in os.listdir(metrics_path)
if re.search('_al.zip', f)
or re.search('rnaseq_metrics.html', f)]
logger.info("found Picard RNA-seq metrics files for {} samples"
.format(len(rnaseq_metrics_files)))
norm_cov_dict = {}
for f in rnaseq_metrics_files:
logger.debug("reading RNA-seq metrics from {}".format(f))
rnaseq_metrics_lines = read_rnaseq_metrics(f)
norm_cov = get_norm_cov(rnaseq_metrics_lines)
norm_cov = [0] * 101 if not len(norm_cov) else norm_cov
logger.debug("parsing filename to get sample ID")
lib = ('_').join(os.path.basename(f).split('_')[0:2])
norm_cov_dict[lib] = norm_cov
return pd.DataFrame(data=norm_cov_dict)
def build_metrics_df(metrics_path):
logger.info("reading combined metrics file")
combined_metrics_file = [f for f in os.listdir(metrics_path)
if 'combined_metrics.csv' in f][0]
return pd.read_csv(os.path.join(metrics_path, combined_metrics_file))
def build_figure(ncDf, metDf, project, fc, outFolder):
logger.info("building combined figure")
with plt.style.context(('ggplot')):
fig = plt.figure()
plt.rcParams.update({'axes.titlesize':'medium'})
numRows = int(math.ceil(float(len(ncDf.columns)) / 3))
figHeight = numRows*2 + 1
plotMargin = float(figHeight - 1) / float(figHeight)
textHeight = (1 - plotMargin) / 2 + plotMargin
fig.suptitle(('Normalized coverage vs. normalized transcript position\n'
'Project %s\n'
'Flowcell: %s' % (project, fc)),
x=0.02, y=textHeight, fontsize=11,
horizontalalignment='left', verticalalignment='center')
fig.text(0.93, textHeight, '*libID_fcID [median_cv_coverage]',
fontsize=10, fontstyle='italic',
horizontalalignment='right', verticalalignment='center')
colorList = plt.cm.hot(np.linspace(0, 0.5, 200))
for idx, lib in enumerate(ncDf):
logger.debug("creating subplot for {}".format(lib))
try:
logger.debug("locating sample metrics by field 'libId'")
libIdx = metDf.libId == lib
except AttributeError:
logger.warn("failed; locating sample metrics by field 'libID'")
libIdx = metDf.libID == lib
mcc = float(metDf[libIdx]['median_cv_coverage'])
fqReads = int(metDf[libIdx]['fastq_total_reads'])
# Handle read pairs and unpaired reads, in case of PE alignment.
percAligned = (float(metDf[libIdx]['unpaired_reads_examined']) + \
float(metDf[libIdx]['read_pairs_examined'])) / \
float(metDf[libIdx]['fastq_total_reads'])* 100
mccColorIdx = min(int(mcc * 100), 200)
ymax = max(ncDf[lib].max() + 0.1, 1.0)
ax_i = fig.add_subplot(numRows, 3, idx + 1)
ncDf[lib].plot(color=[colorList[mccColorIdx - 1],0,0],
ax=ax_i, xlim=(-5,105), ylim=(0,ymax))
ax_i.text(100, 0.1, ('FASTQ reads: %s\n'
'%% aligned: %4.1f' % (fqReads, percAligned)),
bbox={'facecolor':'white', 'edgecolor':'grey',
'alpha':0.75, 'pad':10},
fontsize=9, horizontalalignment='right')
ax_i.get_xaxis().tick_bottom()
ax_i.get_yaxis().tick_left()
ax_i.set_title('%s [%1.2f]' % (lib, mcc))
ax_i.set_facecolor('white')
# pull out fc id from full run string
fcid_regex = fcid_regex = re.compile('(A|B|D)([A-Z]|[0-9])*X(X|Y|2)')
fcid = fcid_regex.search(fc).group()
fig.set_size_inches(7.5, figHeight)
fig.tight_layout(rect=(0, 0, 1, plotMargin))
fig.savefig(outFolder + project + '_' + fcid + '_' + dt.now().strftime('%y%m%d') + '_' + 'geneModelCoverage.pdf',
format = "pdf")
def main(argv):
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
metrics_path = argv[0]
out_path = metrics_path
path_parts = re.split("/", metrics_path)
try:
project_regex = re.compile('P+[0-9]+(-[0-9]+){,1}')
#project = project_regex.search(path_parts[5]).group()
project = project_regex.search(metrics_path).group()
#flowcell_id = path_parts[4]
flowcell_regex = re.compile('[0-9]{6}_[A-Z0-9]+_[0-9]+_(A|B|D)([A-Z]|[0-9])*X(X|Y|2)')
flowcell_id = flowcell_regex.search(metrics_path).group()
except AttributeError:
project = re.sub('Processed.*', '', path_parts[4].lstrip('Project_'))
flowcell_id = path_parts[3]
print "Metrics path: " + metrics_path
print "Project: " + project
print "Flow cell ID: " + flowcell_id
norm_cov_df = build_norm_cov_df(metrics_path)
metrics_df = build_metrics_df(metrics_path)
build_figure(norm_cov_df, metrics_df, project, flowcell_id, out_path)
if __name__ == "__main__":
main(sys.argv[1:])
| mit |
llmercury/Kaggle_Mercedes-Benz_Greener_Manufacturing | Stratified_train_validation_split.py | 1 | 1836 | """
Split train data into development and validation data sets by stratifying the y variable. Here y is a continuous variable.
"""
import numpy as np
import pandas as pd
import random
import pickle
def stratified_kfold(data, nsplit = 5):
num_per_fold = data.shape[0]//nsplit
num_folds_1less = data.shape[0]%nsplit
rand_delta_lst = list(range(nsplit))
lst_kfold = [None]*nsplit
for i in range(nsplit):
lst_kfold[i] = []
for i in range(num_per_fold):
random.shuffle(rand_delta_lst)
for j in range(nsplit):
lst_kfold[j].append(int(data.iloc[(i*nsplit + rand_delta_lst[j]), 2]))
rand_delta_lst = list(range(num_folds_1less))
random.shuffle(rand_delta_lst)
for k in range(num_folds_1less):
lst_kfold[k].append(int(data.iloc[(num_per_fold*nsplit + rand_delta_lst[k]), 2]))
val_ind = [None]*nsplit
dev_ind = [None]*nsplit
for i in range(nsplit):
val_ind[i] = []
dev_ind[i] = []
for l in range(nsplit):
val_ind[l] = lst_kfold[l]
for p in range(nsplit):
if p != l:
dev_ind[l] += lst_kfold[p]
return dev_ind, val_ind
def n_stratified_k_fold(data, nsplit, ntimes):
skf_result = [None] * ntimes
for i in range(ntimes):
dev_ind, val_ind = stratified_kfold(data, nsplit)
skf_result[i] = [dev_ind, val_ind]
return skf_result
if __name__ == "__main__":
train = pd.read_csv('train.csv')
IDny = train.loc[:, ['ID', 'y']]
IDny['ind'] = train.index.values
IDny.sort_values('y', axis=0, inplace=True)
skf_result = n_stratified_k_fold(IDny.iloc[:, :], 10, 20)
with open('dev_val_ind_10f_20_times.pickle', 'wb') as handle:
pickle.dump(skf_result, handle, protocol=pickle.HIGHEST_PROTOCOL) | mit |
4tikhonov/eurogis | maps/usecases/maprender.py | 3 | 4513 | #!/usr/bin/python
# Perfect use case to get boundaries of the city in time
# (C) Vyacheslav Tykhonov vty@iisg.nl
# International Institute of Social History
# http://socialhistory.org
#get_ipython().magic(u'matplotlib inline')
import os
os.environ['MPLCONFIGDIR'] = "/tmp"
import matplotlib
matplotlib.use("Agg")
import urllib2
import simplejson
import json
import sys
from shapely.geometry import shape, Polygon, MultiPolygon
import numpy as np
import matplotlib.pyplot as plt
from pylab import *
from collections import defaultdict
from random import randint
varyear = None
varcode = None
savefile = None
varcode = 10426
max = 0
x = {}
y = {}
if sys.argv[1]:
varcode = sys.argv[1]
if len(sys.argv) >= 2:
varyear = sys.argv[2]
if len(sys.argv) > 2:
savefile = sys.argv[3]
#varyear = 1812
#savefile = "lastmap.png"
# In[ ]:
# In[2]:
# Default
debug = 0
varname = "Utrecht"
apiurl = "http://node-128.dev.socialhistoryservices.org/api/maps"
colors = ['red', 'green', 'orange', 'brown', 'purple', 'blue', 'cyan']
#colors = ['#334433', '#6699aa', '#88aaaa', '#aacccc', '#447799']
#colors = ['#bbaa66','#ffaa55','#ffcc77','#eecc77','#bbcc99']
def drawmap(x,y):
fig, ax = subplots(figsize=(5,5))
ax = fig.gca()
ax.plot(x,y)
ax.axis('scaled')
if savefile:
fig.savefig(savefile + '.png')
plt.show()
return
def coordinates(polygons, amscode, cityname):
fullmappolygon = defaultdict(list)
z = 1
co = {}
if cityname:
amscode = ''
for key in polygons:
if key == 'features':
data = polygons[key]
for key in data:
response = json.dumps(key)
dict = json.loads(response)
for key in dict:
if key == 'properties':
maincode = str(dict[key]['amsterdamcode'])
intcode = dict[key]['amsterdamcode']
mainname = dict[key]['name']
#fullmappolygon[intcode] = dict['geometry']['coordinates']
x = [i for i,j in dict['geometry']['coordinates'][0][0]]
y = [j for i,j in dict['geometry']['coordinates'][0][0]]
fullmappolygon[intcode].append(x)
fullmappolygon[intcode].append(y)
z = z + 1
if z == 0:
print intcode
print fullmappolygon[intcode][0]
print fullmappolygon[intcode][1]
if maincode == amscode:
co = dict['geometry']['coordinates']
if mainname.encode('utf-8') == cityname:
co = dict['geometry']['coordinates']
return (co, fullmappolygon)
def load_api_map(apiurl, code, year):
amscode = str(code)
jsondataurl = apiurl + "?year=" + str(year) + "&format=geojson"
if debug:
print jsondataurl
req = urllib2.Request(jsondataurl)
opener = urllib2.build_opener()
f = opener.open(req)
datapolygons = simplejson.load(f)
return datapolygons
def getcoords(datapolygons, amscode, cityname):
(coords, fullmap) = coordinates(datapolygons, amscode, cityname)
x = []
y = []
#x = [i for i,j in coords[0][0]]
#y = [j for i,j in coords[0][0]]
return fullmap
fullmappoly = load_api_map(apiurl, varcode, varyear)
varcity = ''
(map) = getcoords(fullmappoly, varcode, varcity)
# In[3]:
count = 0
#fig, ax = subplots(figsize=(8,8), dpi=300)
fig = plt.figure(figsize=(14,10),dpi=100,frameon=False)
ax = fig.gca()
#ax.axes.get_xaxis().set_visible(False)
#ax.axes.get_yaxis().set_visible(False)
ax.set_axis_off()
def plot_polygon(ax, poly, color='red'):
a = np.asarray(poly.exterior)
ax.add_patch(Polygon(a, facecolor=color, alpha=0.3))
ax.plot(a[:, 0], a[:, 1], color='black')
for code in map:
x = map[code][0]
y = map[code][1]
thiscolor = 'black'
if code == varcode:
if debug:
print varcode
thiscolor = 'green'
ax.add_patch(Polygon(zip(x,y), facecolor=thiscolor, alpha=0.3))
else:
colorID = randint(0,4)
ax.add_patch(Polygon(zip(x,y), facecolor=colors[colorID], alpha=0.3))
ax.plot(x, y, color=thiscolor)
count = count + 1
if count == 0:
coords = zip(x,y)
print coords
ax.axis('scaled')
if savefile:
filename = savefile
fig.savefig(filename)
plt.show()
| gpl-3.0 |
mwcraig/aplpy | aplpy/tests/test_axis_labels.py | 5 | 1670 | import matplotlib
matplotlib.use('Agg')
import numpy as np
from astropy.tests.helper import pytest
from .. import FITSFigure
def test_axis_labels_show_hide():
data = np.zeros((16, 16))
f = FITSFigure(data)
f.axis_labels.hide()
f.axis_labels.show()
f.axis_labels.hide_x()
f.axis_labels.show_x()
f.axis_labels.hide_y()
f.axis_labels.show_y()
f.close()
def test_axis_labels_text():
data = np.zeros((16, 16))
f = FITSFigure(data)
f.axis_labels.set_xtext('x')
f.axis_labels.set_ytext('y')
f.close()
def test_axis_labels_pad():
data = np.zeros((16, 16))
f = FITSFigure(data)
f.axis_labels.set_xpad(-1.)
f.axis_labels.set_ypad(0.5)
f.close()
def test_axis_labels_position():
data = np.zeros((16, 16))
f = FITSFigure(data)
f.axis_labels.set_xposition('top')
f.axis_labels.set_xposition('bottom')
f.axis_labels.set_yposition('right')
f.axis_labels.set_yposition('left')
f.close()
def test_axis_labels_position_invalid():
data = np.zeros((16, 16))
f = FITSFigure(data)
with pytest.raises(ValueError):
f.axis_labels.set_xposition('right')
with pytest.raises(ValueError):
f.axis_labels.set_xposition('left')
with pytest.raises(ValueError):
f.axis_labels.set_yposition('top')
with pytest.raises(ValueError):
f.axis_labels.set_yposition('bottom')
f.close()
def test_axis_labels_font():
data = np.zeros((16, 16))
f = FITSFigure(data)
f.axis_labels.set_font(size='small', weight='bold', stretch='normal',
family='serif', style='normal', variant='normal')
f.close()
| mit |
maheshakya/scikit-learn | sklearn/utils/testing.py | 2 | 22085 | """Testing utilities."""
# Copyright (c) 2011, 2012
# Authors: Pietro Berkes,
# Andreas Muller
# Mathieu Blondel
# Olivier Grisel
# Arnaud Joly
# Denis Engemann
# License: BSD 3 clause
import os
import inspect
import pkgutil
import warnings
import sys
import re
import platform
import scipy as sp
import scipy.io
from functools import wraps
try:
# Python 2
from urllib2 import urlopen
from urllib2 import HTTPError
except ImportError:
# Python 3+
from urllib.request import urlopen
from urllib.error import HTTPError
import sklearn
from sklearn.base import BaseEstimator
# Conveniently import all assertions in one place.
from nose.tools import assert_equal
from nose.tools import assert_not_equal
from nose.tools import assert_true
from nose.tools import assert_false
from nose.tools import assert_raises
from nose.tools import raises
from nose import SkipTest
from nose import with_setup
from numpy.testing import assert_almost_equal
from numpy.testing import assert_array_equal
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_less
import numpy as np
from sklearn.base import (ClassifierMixin, RegressorMixin, TransformerMixin,
ClusterMixin)
__all__ = ["assert_equal", "assert_not_equal", "assert_raises",
"assert_raises_regexp", "raises", "with_setup", "assert_true",
"assert_false", "assert_almost_equal", "assert_array_equal",
"assert_array_almost_equal", "assert_array_less",
"assert_less", "assert_less_equal",
"assert_greater", "assert_greater_equal"]
try:
from nose.tools import assert_in, assert_not_in
except ImportError:
# Nose < 1.0.0
def assert_in(x, container):
assert_true(x in container, msg="%r in %r" % (x, container))
def assert_not_in(x, container):
assert_false(x in container, msg="%r in %r" % (x, container))
try:
from nose.tools import assert_raises_regex
except ImportError:
# for Py 2.6
def assert_raises_regex(expected_exception, expected_regexp,
callable_obj=None, *args, **kwargs):
"""Helper function to check for message patterns in exceptions"""
not_raised = False
try:
callable_obj(*args, **kwargs)
not_raised = True
except Exception as e:
error_message = str(e)
if not re.compile(expected_regexp).search(error_message):
raise AssertionError("Error message should match pattern "
"%r. %r does not." %
(expected_regexp, error_message))
if not_raised:
raise AssertionError("Should have raised %r" %
expected_exception(expected_regexp))
# assert_raises_regexp is deprecated in Python 3.4 in favor of
# assert_raises_regex but lets keep the bacward compat in scikit-learn with
# the old name for now
assert_raises_regexp = assert_raises_regex
def _assert_less(a, b, msg=None):
message = "%r is not lower than %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a < b, message
def _assert_greater(a, b, msg=None):
message = "%r is not greater than %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a > b, message
def assert_less_equal(a, b, msg=None):
message = "%r is not lower than or equal to %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a <= b, message
def assert_greater_equal(a, b, msg=None):
message = "%r is not greater than or equal to %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a >= b, message
def assert_warns(warning_class, func, *args, **kw):
"""Test that a certain warning occurs.
Parameters
----------
warning_class : the warning class
The class to test for, e.g. UserWarning.
func : callable
Calable object to trigger warnings.
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`
Returns
-------
result : the return value of `func`
"""
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
result = func(*args, **kw)
if hasattr(np, 'VisibleDeprecationWarning'):
# Filter out numpy-specific warnings in numpy >= 1.9
w = [e for e in w
if e.category is not np.VisibleDeprecationWarning]
# Verify some things
if not len(w) > 0:
raise AssertionError("No warning raised when calling %s"
% func.__name__)
found = any(warning.category is warning_class for warning in w)
if not found:
raise AssertionError("%s did not give warning: %s( is %s)"
% (func.__name__, warning_class, w))
return result
def assert_warns_message(warning_class, message, func, *args, **kw):
# very important to avoid uncontrolled state propagation
"""Test that a certain warning occurs and with a certain message.
Parameters
----------
warning_class : the warning class
The class to test for, e.g. UserWarning.
message : str | callable
The entire message or a substring to test for. If callable,
it takes a string as argument and will trigger an assertion error
if it returns `False`.
func : callable
Calable object to trigger warnings.
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`.
Returns
-------
result : the return value of `func`
"""
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
if hasattr(np, 'VisibleDeprecationWarning'):
# Let's not catch the numpy internal DeprecationWarnings
warnings.simplefilter('ignore', np.VisibleDeprecationWarning)
# Trigger a warning.
result = func(*args, **kw)
# Verify some things
if not len(w) > 0:
raise AssertionError("No warning raised when calling %s"
% func.__name__)
found = [warning.category is warning_class for warning in w]
if not any(found):
raise AssertionError("No warning raised for %s with class "
"%s"
% (func.__name__, warning_class))
message_found = False
# Checks the message of all warnings belong to warning_class
for index in [i for i, x in enumerate(found) if x]:
# substring will match, the entire message with typo won't
msg = w[index].message # For Python 3 compatibility
msg = str(msg.args[0] if hasattr(msg, 'args') else msg)
if callable(message): # add support for certain tests
check_in_message = message
else:
check_in_message = lambda msg: message in msg
if check_in_message(msg):
message_found = True
break
if not message_found:
raise AssertionError("Did not receive the message you expected "
"('%s') for <%s>."
% (message, func.__name__))
return result
# To remove when we support numpy 1.7
def assert_no_warnings(func, *args, **kw):
# XXX: once we may depend on python >= 2.6, this can be replaced by the
# warnings module context manager.
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
result = func(*args, **kw)
if hasattr(np, 'VisibleDeprecationWarning'):
# Filter out numpy-specific warnings in numpy >= 1.9
w = [e for e in w
if e.category is not np.VisibleDeprecationWarning]
if len(w) > 0:
raise AssertionError("Got warnings when calling %s: %s"
% (func.__name__, w))
return result
def ignore_warnings(obj=None):
""" Context manager and decorator to ignore warnings
Note. Using this (in both variants) will clear all warnings
from all python modules loaded. In case you need to test
cross-module-warning-logging this is not your tool of choice.
Examples
--------
>>> with ignore_warnings():
... warnings.warn('buhuhuhu')
>>> def nasty_warn():
... warnings.warn('buhuhuhu')
... print(42)
>>> ignore_warnings(nasty_warn)()
42
"""
if callable(obj):
return _ignore_warnings(obj)
else:
return _IgnoreWarnings()
def _ignore_warnings(fn):
"""Decorator to catch and hide warnings without visual nesting"""
@wraps(fn)
def wrapper(*args, **kwargs):
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
return fn(*args, **kwargs)
w[:] = []
return wrapper
class _IgnoreWarnings(object):
"""Improved and simplified Python warnings context manager
Copied from Python 2.7.5 and modified as required.
"""
def __init__(self):
"""
Parameters
==========
category : warning class
The category to filter. Defaults to Warning. If None,
all categories will be muted.
"""
self._record = True
self._module = sys.modules['warnings']
self._entered = False
self.log = []
def __repr__(self):
args = []
if self._record:
args.append("record=True")
if self._module is not sys.modules['warnings']:
args.append("module=%r" % self._module)
name = type(self).__name__
return "%s(%s)" % (name, ", ".join(args))
def __enter__(self):
clean_warning_registry() # be safe and not propagate state + chaos
warnings.simplefilter('always')
if self._entered:
raise RuntimeError("Cannot enter %r twice" % self)
self._entered = True
self._filters = self._module.filters
self._module.filters = self._filters[:]
self._showwarning = self._module.showwarning
if self._record:
self.log = []
def showwarning(*args, **kwargs):
self.log.append(warnings.WarningMessage(*args, **kwargs))
self._module.showwarning = showwarning
return self.log
else:
return None
def __exit__(self, *exc_info):
if not self._entered:
raise RuntimeError("Cannot exit %r without entering first" % self)
self._module.filters = self._filters
self._module.showwarning = self._showwarning
self.log[:] = []
clean_warning_registry() # be safe and not propagate state + chaos
try:
from nose.tools import assert_less
except ImportError:
assert_less = _assert_less
try:
from nose.tools import assert_greater
except ImportError:
assert_greater = _assert_greater
def _assert_allclose(actual, desired, rtol=1e-7, atol=0,
err_msg='', verbose=True):
actual, desired = np.asanyarray(actual), np.asanyarray(desired)
if np.allclose(actual, desired, rtol=rtol, atol=atol):
return
msg = ('Array not equal to tolerance rtol=%g, atol=%g: '
'actual %s, desired %s') % (rtol, atol, actual, desired)
raise AssertionError(msg)
if hasattr(np.testing, 'assert_allclose'):
assert_allclose = np.testing.assert_allclose
else:
assert_allclose = _assert_allclose
def assert_raise_message(exception, message, function, *args, **kwargs):
"""Helper function to test error messages in exceptions"""
try:
function(*args, **kwargs)
raise AssertionError("Should have raised %r" % exception(message))
except exception as e:
error_message = str(e)
assert_in(message, error_message)
def fake_mldata(columns_dict, dataname, matfile, ordering=None):
"""Create a fake mldata data set.
Parameters
----------
columns_dict: contains data as
columns_dict[column_name] = array of data
dataname: name of data set
matfile: file-like object or file name
ordering: list of column_names, determines the ordering in the data set
Note: this function transposes all arrays, while fetch_mldata only
transposes 'data', keep that into account in the tests.
"""
datasets = dict(columns_dict)
# transpose all variables
for name in datasets:
datasets[name] = datasets[name].T
if ordering is None:
ordering = sorted(list(datasets.keys()))
# NOTE: setting up this array is tricky, because of the way Matlab
# re-packages 1D arrays
datasets['mldata_descr_ordering'] = sp.empty((1, len(ordering)),
dtype='object')
for i, name in enumerate(ordering):
datasets['mldata_descr_ordering'][0, i] = name
scipy.io.savemat(matfile, datasets, oned_as='column')
class mock_mldata_urlopen(object):
def __init__(self, mock_datasets):
"""Object that mocks the urlopen function to fake requests to mldata.
`mock_datasets` is a dictionary of {dataset_name: data_dict}, or
{dataset_name: (data_dict, ordering).
`data_dict` itself is a dictionary of {column_name: data_array},
and `ordering` is a list of column_names to determine the ordering
in the data set (see `fake_mldata` for details).
When requesting a dataset with a name that is in mock_datasets,
this object creates a fake dataset in a StringIO object and
returns it. Otherwise, it raises an HTTPError.
"""
self.mock_datasets = mock_datasets
def __call__(self, urlname):
dataset_name = urlname.split('/')[-1]
if dataset_name in self.mock_datasets:
resource_name = '_' + dataset_name
from io import BytesIO
matfile = BytesIO()
dataset = self.mock_datasets[dataset_name]
ordering = None
if isinstance(dataset, tuple):
dataset, ordering = dataset
fake_mldata(dataset, resource_name, matfile, ordering)
matfile.seek(0)
return matfile
else:
raise HTTPError(urlname, 404, dataset_name + " is not available",
[], None)
def install_mldata_mock(mock_datasets):
# Lazy import to avoid mutually recursive imports
from sklearn import datasets
datasets.mldata.urlopen = mock_mldata_urlopen(mock_datasets)
def uninstall_mldata_mock():
# Lazy import to avoid mutually recursive imports
from sklearn import datasets
datasets.mldata.urlopen = urlopen
# Meta estimators need another estimator to be instantiated.
META_ESTIMATORS = ["OneVsOneClassifier",
"OutputCodeClassifier", "OneVsRestClassifier", "RFE",
"RFECV", "BaseEnsemble"]
# estimators that there is no way to default-construct sensibly
OTHER = ["Pipeline", "FeatureUnion", "GridSearchCV", "RandomizedSearchCV"]
# some trange ones
DONT_TEST = ['SparseCoder', 'EllipticEnvelope', 'DictVectorizer',
'LabelBinarizer', 'LabelEncoder', 'MultiLabelBinarizer',
'TfidfTransformer', 'IsotonicRegression', 'OneHotEncoder',
'RandomTreesEmbedding', 'FeatureHasher', 'DummyClassifier',
'DummyRegressor', 'TruncatedSVD', 'PolynomialFeatures',
'GaussianRandomProjectionHash']
def all_estimators(include_meta_estimators=False, include_other=False,
type_filter=None, include_dont_test=False):
"""Get a list of all estimators from sklearn.
This function crawls the module and gets all classes that inherit
from BaseEstimator. Classes that are defined in test-modules are not
included.
By default meta_estimators such as GridSearchCV are also not included.
Parameters
----------
include_meta_estimators : boolean, default=False
Whether to include meta-estimators that can be constructed using
an estimator as their first argument. These are currently
BaseEnsemble, OneVsOneClassifier, OutputCodeClassifier,
OneVsRestClassifier, RFE, RFECV.
include_other : boolean, default=False
Wether to include meta-estimators that are somehow special and can
not be default-constructed sensibly. These are currently
Pipeline, FeatureUnion and GridSearchCV
include_dont_test : boolean, default=False
Whether to include "special" label estimator or test processors.
type_filter : string, list of string, or None, default=None
Which kind of estimators should be returned. If None, no filter is
applied and all estimators are returned. Possible values are
'classifier', 'regressor', 'cluster' and 'transformer' to get
estimators only of these specific types, or a list of these to
get the estimators that fit at least one of the types.
Returns
-------
estimators : list of tuples
List of (name, class), where ``name`` is the class name as string
and ``class`` is the actuall type of the class.
"""
def is_abstract(c):
if not(hasattr(c, '__abstractmethods__')):
return False
if not len(c.__abstractmethods__):
return False
return True
all_classes = []
# get parent folder
path = sklearn.__path__
for importer, modname, ispkg in pkgutil.walk_packages(
path=path, prefix='sklearn.', onerror=lambda x: None):
if ".tests." in modname:
continue
module = __import__(modname, fromlist="dummy")
classes = inspect.getmembers(module, inspect.isclass)
all_classes.extend(classes)
all_classes = set(all_classes)
estimators = [c for c in all_classes
if (issubclass(c[1], BaseEstimator)
and c[0] != 'BaseEstimator')]
# get rid of abstract base classes
estimators = [c for c in estimators if not is_abstract(c[1])]
if not include_dont_test:
estimators = [c for c in estimators if not c[0] in DONT_TEST]
if not include_other:
estimators = [c for c in estimators if not c[0] in OTHER]
# possibly get rid of meta estimators
if not include_meta_estimators:
estimators = [c for c in estimators if not c[0] in META_ESTIMATORS]
if type_filter is not None:
if not isinstance(type_filter, list):
type_filter = [type_filter]
else:
type_filter = list(type_filter) # copy
filtered_estimators = []
filters = {'classifier': ClassifierMixin,
'regressor': RegressorMixin,
'transformer': TransformerMixin,
'cluster': ClusterMixin}
for name, mixin in filters.items():
if name in type_filter:
type_filter.remove(name)
filtered_estimators.extend([est for est in estimators
if issubclass(est[1], mixin)])
estimators = filtered_estimators
if type_filter:
raise ValueError("Parameter type_filter must be 'classifier', "
"'regressor', 'transformer', 'cluster' or None, got"
" %s." % repr(type_filter))
# drop duplicates, sort for reproducibility
return sorted(set(estimators))
def set_random_state(estimator, random_state=0):
if "random_state" in estimator.get_params().keys():
estimator.set_params(random_state=random_state)
def if_matplotlib(func):
"""Test decorator that skips test if matplotlib not installed. """
@wraps(func)
def run_test(*args, **kwargs):
try:
import matplotlib
matplotlib.use('Agg', warn=False)
# this fails if no $DISPLAY specified
matplotlib.pylab.figure()
except:
raise SkipTest('Matplotlib not available.')
else:
return func(*args, **kwargs)
return run_test
def if_not_mac_os(versions=('10.7', '10.8', '10.9'),
message='Multi-process bug in Mac OS X >= 10.7 '
'(see issue #636)'):
"""Test decorator that skips test if OS is Mac OS X and its
major version is one of ``versions``.
"""
mac_version, _, _ = platform.mac_ver()
skip = '.'.join(mac_version.split('.')[:2]) in versions
def decorator(func):
if skip:
@wraps(func)
def func(*args, **kwargs):
raise SkipTest(message)
return func
return decorator
def clean_warning_registry():
"""Safe way to reset warnings """
warnings.resetwarnings()
reg = "__warningregistry__"
for mod_name, mod in list(sys.modules.items()):
if 'six.moves' in mod_name:
continue
if hasattr(mod, reg):
getattr(mod, reg).clear()
def check_skip_network():
if int(os.environ.get('SKLEARN_SKIP_NETWORK_TESTS', 0)):
raise SkipTest("Text tutorial requires large dataset download")
def check_skip_travis():
"""Skip test if being run on Travis."""
if os.environ.get('TRAVIS') == "true":
raise SkipTest("This test needs to be skipped on Travis")
with_network = with_setup(check_skip_network)
with_travis = with_setup(check_skip_travis)
| bsd-3-clause |
ycaihua/scikit-learn | benchmarks/bench_plot_ward.py | 290 | 1260 | """
Benchmark scikit-learn's Ward implement compared to SciPy's
"""
import time
import numpy as np
from scipy.cluster import hierarchy
import pylab as pl
from sklearn.cluster import AgglomerativeClustering
ward = AgglomerativeClustering(n_clusters=3, linkage='ward')
n_samples = np.logspace(.5, 3, 9)
n_features = np.logspace(1, 3.5, 7)
N_samples, N_features = np.meshgrid(n_samples,
n_features)
scikits_time = np.zeros(N_samples.shape)
scipy_time = np.zeros(N_samples.shape)
for i, n in enumerate(n_samples):
for j, p in enumerate(n_features):
X = np.random.normal(size=(n, p))
t0 = time.time()
ward.fit(X)
scikits_time[j, i] = time.time() - t0
t0 = time.time()
hierarchy.ward(X)
scipy_time[j, i] = time.time() - t0
ratio = scikits_time / scipy_time
pl.figure("scikit-learn Ward's method benchmark results")
pl.imshow(np.log(ratio), aspect='auto', origin="lower")
pl.colorbar()
pl.contour(ratio, levels=[1, ], colors='k')
pl.yticks(range(len(n_features)), n_features.astype(np.int))
pl.ylabel('N features')
pl.xticks(range(len(n_samples)), n_samples.astype(np.int))
pl.xlabel('N samples')
pl.title("Scikit's time, in units of scipy time (log)")
pl.show()
| bsd-3-clause |
eg-zhang/scikit-learn | examples/linear_model/plot_omp.py | 385 | 2263 | """
===========================
Orthogonal Matching Pursuit
===========================
Using orthogonal matching pursuit for recovering a sparse signal from a noisy
measurement encoded with a dictionary
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import OrthogonalMatchingPursuit
from sklearn.linear_model import OrthogonalMatchingPursuitCV
from sklearn.datasets import make_sparse_coded_signal
n_components, n_features = 512, 100
n_nonzero_coefs = 17
# generate the data
###################
# y = Xw
# |x|_0 = n_nonzero_coefs
y, X, w = make_sparse_coded_signal(n_samples=1,
n_components=n_components,
n_features=n_features,
n_nonzero_coefs=n_nonzero_coefs,
random_state=0)
idx, = w.nonzero()
# distort the clean signal
##########################
y_noisy = y + 0.05 * np.random.randn(len(y))
# plot the sparse signal
########################
plt.figure(figsize=(7, 7))
plt.subplot(4, 1, 1)
plt.xlim(0, 512)
plt.title("Sparse signal")
plt.stem(idx, w[idx])
# plot the noise-free reconstruction
####################################
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_nonzero_coefs)
omp.fit(X, y)
coef = omp.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 2)
plt.xlim(0, 512)
plt.title("Recovered signal from noise-free measurements")
plt.stem(idx_r, coef[idx_r])
# plot the noisy reconstruction
###############################
omp.fit(X, y_noisy)
coef = omp.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 3)
plt.xlim(0, 512)
plt.title("Recovered signal from noisy measurements")
plt.stem(idx_r, coef[idx_r])
# plot the noisy reconstruction with number of non-zeros set by CV
##################################################################
omp_cv = OrthogonalMatchingPursuitCV()
omp_cv.fit(X, y_noisy)
coef = omp_cv.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 4)
plt.xlim(0, 512)
plt.title("Recovered signal from noisy measurements with CV")
plt.stem(idx_r, coef[idx_r])
plt.subplots_adjust(0.06, 0.04, 0.94, 0.90, 0.20, 0.38)
plt.suptitle('Sparse signal recovery with Orthogonal Matching Pursuit',
fontsize=16)
plt.show()
| bsd-3-clause |
JeffHeard/terrapyn | geocms/drivers/shapefile.py | 1 | 11063 | # from ga_ows.views import wms, wfs
import shutil
import json
from zipfile import ZipFile
import pandas
from django.contrib.gis.geos import Polygon
import os
import sh
from osgeo import osr, ogr
from . import Driver
from pandas import DataFrame
from shapely import wkb
from django.template.defaultfilters import slugify
import re
def ogrfield(elt):
return re.sub('-', '_', slugify(elt).encode('ascii'))[0:10]
def identity(x):
return '"' + x + '"' if isinstance(x, basestring) else str(x)
dtypes = {
'int64': ogr.OFTInteger,
'float64': ogr.OFTReal,
'object': ogr.OFTString,
'datetime64[ns]': ogr.OFTDateTime
}
geomTypes = {
'GeometryCollection': ogr.wkbGeometryCollection,
'LinearRing': ogr.wkbLinearRing,
'LineString': ogr.wkbLineString,
'MultiLineString': ogr.wkbMultiLineString,
'MultiPoint': ogr.wkbMultiPoint,
'MultiPolygon': ogr.wkbMultiPolygon,
'Point': ogr.wkbPoint,
'Polygon': ogr.wkbPolygon
}
def transform(geom, crx):
if crx:
geom.Transform(crx)
return geom
class ShapefileDriver(Driver):
@classmethod
def supports_multiple_layers(cls):
return False
@classmethod
def supports_configuration(cls):
return False
def ready_data_resource(self, **kwargs):
"""Other keyword args get passed in as a matter of course, like BBOX, time, and elevation, but this basic driver
ignores them"""
slug, srs = super(ShapefileDriver, self).ready_data_resource(**kwargs)
return slug, srs, {
'type': 'shape',
"file": self.cached_basename + '.shp'
}
def clear_cached_files(self):
sh.rm('-f', sh.glob(os.path.join(self.cache_path, '*.shp')))
sh.rm('-f', sh.glob(os.path.join(self.cache_path, '*.shx')))
sh.rm('-f', sh.glob(os.path.join(self.cache_path, '*.dbf')))
sh.rm('-f', sh.glob(os.path.join(self.cache_path, '*.prj')))
def compute_spatial_metadata(self, **kwargs):
"""Other keyword args get passed in as a matter of course, like BBOX, time, and elevation, but this basic driver
ignores them"""
super(ShapefileDriver, self).compute_spatial_metadata(**kwargs)
self.clear_cached_files()
archive = ZipFile(self.cached_basename + self.src_ext)
projection_found = False
for name in archive.namelist():
xtn = name.split('.')[-1].lower()
if xtn in {'shp', 'shx', 'dbf', 'prj'} and "__MACOSX" not in name:
projection_found = projection_found or xtn == 'prj'
with open(self.cached_basename + '.' + xtn, 'wb') as fout:
with archive.open(name) as fin:
chunk = fin.read(65536)
while chunk:
fout.write(chunk)
chunk = fin.read(65536)
if not projection_found:
with open(self.cached_basename + '.prj', 'w') as f:
srs = osr.SpatialReference()
srs.ImportFromEPSG(4326)
f.write(srs.ExportToWkt())
ds = ogr.Open(self.cached_basename + '.shp')
lyr = ds.GetLayerByIndex(0) if 'sublayer' not in kwargs else ds.GetLayerByName(kwargs['sublayer'])
xmin, xmax, ymin, ymax = lyr.GetExtent()
crs = lyr.GetSpatialRef()
self.resource.spatial_metadata.native_srs = crs.ExportToProj4()
e4326 = osr.SpatialReference()
e4326.ImportFromEPSG(4326)
crx = osr.CoordinateTransformation(crs, e4326)
x04326, y04326, _ = crx.TransformPoint(xmin, ymin)
x14326, y14326, _ = crx.TransformPoint(xmax, ymax)
self.resource.spatial_metadata.bounding_box = Polygon.from_bbox((x04326, y04326, x14326, y14326))
self.resource.spatial_metadata.native_bounding_box = Polygon.from_bbox((xmin, ymin, xmax, ymax))
self.resource.spatial_metadata.three_d = False
self.resource.spatial_metadata.save()
self.resource.save()
def get_data_fields(self, **kwargs):
_, _, result = self.ready_data_resource(**kwargs)
ds = ogr.Open(result['file'])
lyr = ds.GetLayerByIndex(0) if 'layer' not in kwargs else ds.GetLayerByName(kwargs['sublayer'])
return [(field.name, field.GetTypeName(), field.width) for field in lyr.schema]
def get_data_for_point(self, wherex, wherey, srs, **kwargs):
result, x1, y1, epsilon = super(ShapefileDriver, self).get_data_for_point(wherex, wherey, srs, **kwargs)
ds = ogr.Open(result['file'])
lyr = ds.GetLayerByIndex(0) if 'sublayer' not in kwargs else ds.GetLayerByName(kwargs['sublayer'])
if epsilon==0:
lyr.SetSpatialFilter(ogr.CreateGeometryFromWkt("POINT({x1} {y1})".format(**locals())))
else:
from django.contrib.gis import geos
wkt = geos.Point(x1,y1).buffer(epsilon).wkt
print wkt
lyr.SetSpatialFilter(ogr.CreateGeometryFromWkt(wkt))
return [f.items() for f in lyr]
def attrquery(self, key, value):
key, op = key.split('__')
op = {
'gt' : ">",
'gte' : ">=",
'lt' : "<",
'lte' : '<=',
'startswith' : 'LIKE',
'endswith' : 'LIKE',
'istartswith' : 'ILIKE',
'iendswith' : 'ILIKE',
'icontains' : "ILIKE",
'contains' : "LIKE",
'in' : 'IN',
'ne' : "<>"
}[op]
value = {
'gt': identity,
'gte': identity,
'lt': identity,
'lte': identity,
'startswith': lambda x : '%' + x,
'endswith': lambda x : x + '%',
'istartswith': lambda x : '%' + x,
'iendswith': lambda x : x + '%',
'icontains': lambda x : '%' + x + '%',
'contains': lambda x: '%' + x + '%',
'in': lambda x: x if isinstance(x, basestring) else '(' + ','.join(identity(a) for a in x) + ')',
'ne': identity
}[op](value)
return ' '.join([key, op, value])
def as_dataframe(self, **kwargs):
"""
Creates a dataframe object for a shapefile's main layer using layer_as_dataframe. This object is cached on disk for
layer use, but the cached copy will only be picked up if the shapefile's mtime is older than the dataframe's mtime.
:param shp: The shapefile
:return:
"""
dfx_path = self.get_filename('dfx')
shp_path = self.get_filename('shp')
if len(kwargs) != 0:
ds = ogr.Open(shp_path)
lyr = ds.GetLayerByIndex(0)
crx=xrc=None
if 'bbox' in kwargs:
minx,miny,maxx,maxy = kwargs['bbox']
if 'srs' in kwargs:
if isinstance(kwargs['srs'], basestring):
s_srs = osr.SpatialReference()
if kwargs['srs'].lower().startswith('epsg:'):
s_srs.ImportFromEPSG(int(kwargs['srs'].split(':')[1]))
else:
s_srs.ImportFromProj4(kwargs['srs'])
else:
s_srs = kwargs['srs']
t_srs = self.resource.srs
if s_srs.ExportToProj4() != t_srs.ExportToProj4():
crx = osr.CoordinateTransformation(s_srs, t_srs)
minx, miny, _ = crx.TransformPoint(minx, miny)
maxx, maxy, _ = crx.TransformPoint(maxx, maxy)
xrc = osr.CoordinateTransformation(t_srs, s_srs)
lyr.SetSpatialFilterRect(minx, miny, maxx, maxy)
elif 'boundary' in kwargs:
boundary = ogr.Geometry(geomTypes[kwargs['boundary_type']], kwargs["boundary"])
lyr.SetSpatialFilter(boundary)
if 'query' in kwargs:
if isinstance(kwargs['query'], basestring):
query = json.loads(kwargs['query'])
else:
query = kwargs['query']
for key, value in query.items():
attrq= self.attrquery(key, value) if '__' in key else key, '='
lyr.SetAttributeFilter(attrq)
start = kwargs['start'] if 'start' in kwargs else 0
count = kwargs['count'] if 'count' in kwargs else len(lyr) - start
records = []
for i in range(start):
lyr.next()
for i in range(count):
f = lyr.next()
if f.geometry():
records.append(dict(fid=i, geometry=wkb.loads(transform(f.geometry(), xrc).ExportToWkb()), **f.items()))
df = DataFrame.from_records(
data=records,
index='fid'
)
if 'sort_by' in kwargs:
df = df.sort_index(by=kwargs['sort_by'])
return df
elif hasattr(self, '_df'):
return self._df
elif os.path.exists(dfx_path) and os.stat(dfx_path).st_mtime >= os.stat(shp_path).st_mtime:
if self.resource.big:
self._df = pandas.read_hdf(dfx_path, 'df')
else:
self._df = pandas.read_pickle(dfx_path)
return self._df
else:
ds = ogr.Open(shp_path)
lyr = ds.GetLayerByIndex(0)
df= DataFrame.from_records(
data=[dict(fid=f.GetFID(), geometry=wkb.loads(f.geometry().ExportToWkb()), **f.items()) for f in lyr if f.geometry()],
index='fid'
)
if self.resource.big:
df.to_hdf(dfx_path, 'df')
else:
df.to_pickle(dfx_path)
self._df = df
return self._df
@classmethod
def from_dataframe(cls, df, shp, srs):
"""Write an dataframe object out as a shapefile"""
drv = ogr.GetDriverByName('ESRI Shapefile')
if os.path.exists(shp):
shutil.rmtree(shp)
os.mkdir(shp)
ds = drv.CreateDataSource(shp)
keys = df.keys()
fieldDefns = [ogr.FieldDefn(ogrfield(name), dtypes[df[name].dtype.name]) for name in keys if name != 'geometry']
geomType = geomTypes[(f for f in df['geometry']).next().type]
l = ds.CreateLayer(
name=os.path.split(shp)[-1],
srs=srs,
geom_type=geomType
)
for f in fieldDefns:
l.CreateField(f)
for i, record in df.iterrows():
feature = ogr.Feature(l.GetLayerDefn())
for field, value in ((k, v) for k, v in record.to_dict().items() if k != 'geometry'):
if isinstance(value, basestring):
value=value.encode('ascii')
feature.SetField(ogrfield(field), value)
feature.SetGeometry(ogr.CreateGeometryFromWkb(record['geometry'].wkb))
l.CreateFeature(feature)
del ds
driver = ShapefileDriver
| apache-2.0 |
rbalda/neural_ocr | env/lib/python2.7/site-packages/matplotlib/text.py | 4 | 79856 | """
Classes for including text in a figure.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
from matplotlib.externals.six.moves import zip
import math
import warnings
import contextlib
import numpy as np
from matplotlib import cbook
from matplotlib import rcParams
import matplotlib.artist as artist
from matplotlib.artist import Artist
from matplotlib.cbook import is_string_like, maxdict
from matplotlib import docstring
from matplotlib.font_manager import FontProperties
from matplotlib.patches import FancyBboxPatch
from matplotlib.patches import FancyArrowPatch, Rectangle
import matplotlib.transforms as mtransforms
from matplotlib.transforms import Affine2D, Bbox, Transform
from matplotlib.transforms import BboxBase, BboxTransformTo
from matplotlib.lines import Line2D
from matplotlib.path import Path
from matplotlib.artist import allow_rasterization
from matplotlib.backend_bases import RendererBase
from matplotlib.textpath import TextPath
def _process_text_args(override, fontdict=None, **kwargs):
"Return an override dict. See :func:`~pyplot.text' docstring for info"
if fontdict is not None:
override.update(fontdict)
override.update(kwargs)
return override
@contextlib.contextmanager
def _wrap_text(textobj):
"""
Temporarily inserts newlines to the text if the wrap option is enabled.
"""
if textobj.get_wrap():
old_text = textobj.get_text()
try:
textobj.set_text(textobj._get_wrapped_text())
yield textobj
finally:
textobj.set_text(old_text)
else:
yield textobj
# Extracted from Text's method to serve as a function
def get_rotation(rotation):
"""
Return the text angle as float. The returned
angle is between 0 and 360 deg.
*rotation* may be 'horizontal', 'vertical', or a numeric value in degrees.
"""
try:
angle = float(rotation)
except (ValueError, TypeError):
isString = isinstance(rotation, six.string_types)
if ((isString and rotation == 'horizontal') or rotation is None):
angle = 0.
elif (isString and rotation == 'vertical'):
angle = 90.
else:
raise ValueError("rotation is {0} expected either 'horizontal'"
" 'vertical', numeric value or"
"None".format(rotation))
return angle % 360
# these are not available for the object inspector until after the
# class is build so we define an initial set here for the init
# function and they will be overridden after object defn
docstring.interpd.update(Text="""
========================== ================================================
Property Value
========================== ================================================
alpha float or None
animated [True | False]
backgroundcolor any matplotlib color
bbox rectangle prop dict plus key 'pad' which is a
pad in points; if a boxstyle is supplied as
a string, then pad is instead a fraction
of the font size
clip_box a matplotlib.transform.Bbox instance
clip_on [True | False]
color any matplotlib color
family ['serif' | 'sans-serif' | 'cursive' |
'fantasy' | 'monospace']
figure a matplotlib.figure.Figure instance
fontproperties a matplotlib.font_manager.FontProperties
instance
horizontalalignment or ha ['center' | 'right' | 'left']
label any string
linespacing float
lod [True | False]
multialignment ['left' | 'right' | 'center' ]
name or fontname string e.g.,
['Sans' | 'Courier' | 'Helvetica' ...]
position (x,y)
rotation [ angle in degrees 'vertical' | 'horizontal'
rotation_mode [ None | 'anchor']
size or fontsize [size in points | relative size e.g., 'smaller',
'x-large']
style or fontstyle [ 'normal' | 'italic' | 'oblique']
text string
transform a matplotlib.transform transformation instance
usetex [True | False | None]
variant ['normal' | 'small-caps']
verticalalignment or va ['center' | 'top' | 'bottom' | 'baseline']
visible [True | False]
weight or fontweight ['normal' | 'bold' | 'heavy' | 'light' |
'ultrabold' | 'ultralight']
wrap [True | False]
x float
y float
zorder any number
========================== ===============================================
""")
# TODO : This function may move into the Text class as a method. As a
# matter of fact, The information from the _get_textbox function
# should be available during the Text._get_layout() call, which is
# called within the _get_textbox. So, it would better to move this
# function as a method with some refactoring of _get_layout method.
def _get_textbox(text, renderer):
"""
Calculate the bounding box of the text. Unlike
:meth:`matplotlib.text.Text.get_extents` method, The bbox size of
the text before the rotation is calculated.
"""
projected_xs = []
projected_ys = []
theta = np.deg2rad(text.get_rotation())
tr = mtransforms.Affine2D().rotate(-theta)
_, parts, d = text._get_layout(renderer)
for t, wh, x, y in parts:
w, h = wh
xt1, yt1 = tr.transform_point((x, y))
yt1 -= d
xt2, yt2 = xt1 + w, yt1 + h
projected_xs.extend([xt1, xt2])
projected_ys.extend([yt1, yt2])
xt_box, yt_box = min(projected_xs), min(projected_ys)
w_box, h_box = max(projected_xs) - xt_box, max(projected_ys) - yt_box
tr = mtransforms.Affine2D().rotate(theta)
x_box, y_box = tr.transform_point((xt_box, yt_box))
return x_box, y_box, w_box, h_box
class Text(Artist):
"""
Handle storing and drawing of text in window or data coordinates.
"""
zorder = 3
_cached = maxdict(50)
def __str__(self):
return "Text(%g,%g,%s)" % (self._x, self._y, repr(self._text))
def __init__(self,
x=0, y=0, text='',
color=None, # defaults to rc params
verticalalignment='baseline',
horizontalalignment='left',
multialignment=None,
fontproperties=None, # defaults to FontProperties()
rotation=None,
linespacing=None,
rotation_mode=None,
usetex=None, # defaults to rcParams['text.usetex']
wrap=False,
**kwargs
):
"""
Create a :class:`~matplotlib.text.Text` instance at *x*, *y*
with string *text*.
Valid kwargs are
%(Text)s
"""
Artist.__init__(self)
self._x, self._y = x, y
if color is None:
color = rcParams['text.color']
if fontproperties is None:
fontproperties = FontProperties()
elif is_string_like(fontproperties):
fontproperties = FontProperties(fontproperties)
self.set_text(text)
self.set_color(color)
self.set_usetex(usetex)
self.set_wrap(wrap)
self._verticalalignment = verticalalignment
self._horizontalalignment = horizontalalignment
self._multialignment = multialignment
self._rotation = rotation
self._fontproperties = fontproperties
self._bbox_patch = None # a FancyBboxPatch instance
self._renderer = None
if linespacing is None:
linespacing = 1.2 # Maybe use rcParam later.
self._linespacing = linespacing
self.set_rotation_mode(rotation_mode)
self.update(kwargs)
def update(self, kwargs):
"""
Update properties from a dictionary.
"""
bbox = kwargs.pop('bbox', None)
super(Text, self).update(kwargs)
if bbox:
self.set_bbox(bbox) # depends on font properties
def __getstate__(self):
d = super(Text, self).__getstate__()
# remove the cached _renderer (if it exists)
d['_renderer'] = None
return d
def contains(self, mouseevent):
"""Test whether the mouse event occurred in the patch.
In the case of text, a hit is true anywhere in the
axis-aligned bounding-box containing the text.
Returns True or False.
"""
if six.callable(self._contains):
return self._contains(self, mouseevent)
if not self.get_visible() or self._renderer is None:
return False, {}
l, b, w, h = self.get_window_extent().bounds
r, t = l + w, b + h
x, y = mouseevent.x, mouseevent.y
inside = (l <= x <= r and b <= y <= t)
cattr = {}
# if the text has a surrounding patch, also check containment for it,
# and merge the results with the results for the text.
if self._bbox_patch:
patch_inside, patch_cattr = self._bbox_patch.contains(mouseevent)
inside = inside or patch_inside
cattr["bbox_patch"] = patch_cattr
return inside, cattr
def _get_xy_display(self):
'get the (possibly unit converted) transformed x, y in display coords'
x, y = self.get_unitless_position()
return self.get_transform().transform_point((x, y))
def _get_multialignment(self):
if self._multialignment is not None:
return self._multialignment
else:
return self._horizontalalignment
def get_rotation(self):
'return the text angle as float in degrees'
return get_rotation(self._rotation) # string_or_number -> number
def set_rotation_mode(self, m):
"""
set text rotation mode. If "anchor", the un-rotated text
will first aligned according to their *ha* and
*va*, and then will be rotated with the alignement
reference point as a origin. If None (default), the text will be
rotated first then will be aligned.
"""
if m is None or m in ["anchor", "default"]:
self._rotation_mode = m
else:
raise ValueError("Unknown rotation_mode : %s" % repr(m))
self.stale = True
def get_rotation_mode(self):
"get text rotation mode"
return self._rotation_mode
def update_from(self, other):
'Copy properties from other to self'
Artist.update_from(self, other)
self._color = other._color
self._multialignment = other._multialignment
self._verticalalignment = other._verticalalignment
self._horizontalalignment = other._horizontalalignment
self._fontproperties = other._fontproperties.copy()
self._rotation = other._rotation
self._picker = other._picker
self._linespacing = other._linespacing
self.stale = True
def _get_layout(self, renderer):
"""
return the extent (bbox) of the text together with
multiple-alignment information. Note that it returns an extent
of a rotated text when necessary.
"""
key = self.get_prop_tup()
if key in self._cached:
return self._cached[key]
horizLayout = []
thisx, thisy = 0.0, 0.0
xmin, ymin = 0.0, 0.0
width, height = 0.0, 0.0
lines = self.get_text().split('\n')
whs = np.zeros((len(lines), 2))
horizLayout = np.zeros((len(lines), 4))
# Find full vertical extent of font,
# including ascenders and descenders:
tmp, lp_h, lp_bl = renderer.get_text_width_height_descent('lp',
self._fontproperties,
ismath=False)
offsety = (lp_h - lp_bl) * self._linespacing
baseline = 0
for i, line in enumerate(lines):
clean_line, ismath = self.is_math_text(line)
if clean_line:
w, h, d = renderer.get_text_width_height_descent(clean_line,
self._fontproperties,
ismath=ismath)
else:
w, h, d = 0, 0, 0
# For multiline text, increase the line spacing when the
# text net-height(excluding baseline) is larger than that
# of a "l" (e.g., use of superscripts), which seems
# what TeX does.
h = max(h, lp_h)
d = max(d, lp_bl)
whs[i] = w, h
baseline = (h - d) - thisy
thisy -= max(offsety, (h - d) * self._linespacing)
horizLayout[i] = thisx, thisy, w, h
thisy -= d
width = max(width, w)
descent = d
ymin = horizLayout[-1][1]
ymax = horizLayout[0][1] + horizLayout[0][3]
height = ymax - ymin
xmax = xmin + width
# get the rotation matrix
M = Affine2D().rotate_deg(self.get_rotation())
offsetLayout = np.zeros((len(lines), 2))
offsetLayout[:] = horizLayout[:, 0:2]
# now offset the individual text lines within the box
if len(lines) > 1: # do the multiline aligment
malign = self._get_multialignment()
if malign == 'center':
offsetLayout[:, 0] += width / 2.0 - horizLayout[:, 2] / 2.0
elif malign == 'right':
offsetLayout[:, 0] += width - horizLayout[:, 2]
# the corners of the unrotated bounding box
cornersHoriz = np.array(
[(xmin, ymin), (xmin, ymax), (xmax, ymax), (xmax, ymin)],
np.float_)
cornersHoriz[:, 1] -= descent
# now rotate the bbox
cornersRotated = M.transform(cornersHoriz)
txs = cornersRotated[:, 0]
tys = cornersRotated[:, 1]
# compute the bounds of the rotated box
xmin, xmax = txs.min(), txs.max()
ymin, ymax = tys.min(), tys.max()
width = xmax - xmin
height = ymax - ymin
# Now move the box to the target position offset the display
# bbox by alignment
halign = self._horizontalalignment
valign = self._verticalalignment
rotation_mode = self.get_rotation_mode()
if rotation_mode != "anchor":
# compute the text location in display coords and the offsets
# necessary to align the bbox with that location
if halign == 'center':
offsetx = (xmin + width / 2.0)
elif halign == 'right':
offsetx = (xmin + width)
else:
offsetx = xmin
if valign == 'center':
offsety = (ymin + height / 2.0)
elif valign == 'top':
offsety = (ymin + height)
elif valign == 'baseline':
offsety = (ymin + height) - baseline
else:
offsety = ymin
else:
xmin1, ymin1 = cornersHoriz[0]
xmax1, ymax1 = cornersHoriz[2]
if halign == 'center':
offsetx = (xmin1 + xmax1) / 2.0
elif halign == 'right':
offsetx = xmax1
else:
offsetx = xmin1
if valign == 'center':
offsety = (ymin1 + ymax1) / 2.0
elif valign == 'top':
offsety = ymax1
elif valign == 'baseline':
offsety = ymax1 - baseline
else:
offsety = ymin1
offsetx, offsety = M.transform_point((offsetx, offsety))
xmin -= offsetx
ymin -= offsety
bbox = Bbox.from_bounds(xmin, ymin, width, height)
# now rotate the positions around the first x,y position
xys = M.transform(offsetLayout)
xys -= (offsetx, offsety)
xs, ys = xys[:, 0], xys[:, 1]
ret = bbox, list(zip(lines, whs, xs, ys)), descent
self._cached[key] = ret
return ret
def set_bbox(self, rectprops):
"""
Draw a bounding box around self. rectprops are any settable
properties for a FancyBboxPatch, e.g., facecolor='red', alpha=0.5.
t.set_bbox(dict(facecolor='red', alpha=0.5))
The default boxstyle is 'square'. The mutation
scale of the FancyBboxPatch is set to the fontsize.
ACCEPTS: FancyBboxPatch prop dict
"""
if rectprops is not None:
props = rectprops.copy()
boxstyle = props.pop("boxstyle", None)
pad = props.pop("pad", None)
if boxstyle is None:
boxstyle = "square"
if pad is None:
pad = 4 # points
pad /= self.get_size() # to fraction of font size
else:
if pad is None:
pad = 0.3
# boxstyle could be a callable or a string
if is_string_like(boxstyle) and "pad" not in boxstyle:
boxstyle += ",pad=%0.2f" % pad
bbox_transmuter = props.pop("bbox_transmuter", None)
self._bbox_patch = FancyBboxPatch(
(0., 0.),
1., 1.,
boxstyle=boxstyle,
bbox_transmuter=bbox_transmuter,
transform=mtransforms.IdentityTransform(),
**props)
else:
self._bbox_patch = None
self._update_clip_properties()
def get_bbox_patch(self):
"""
Return the bbox Patch object. Returns None if the
FancyBboxPatch is not made.
"""
return self._bbox_patch
def update_bbox_position_size(self, renderer):
"""
Update the location and the size of the bbox. This method
should be used when the position and size of the bbox needs to
be updated before actually drawing the bbox.
"""
if self._bbox_patch:
trans = self.get_transform()
# don't use self.get_unitless_position here, which refers to text
# position in Text, and dash position in TextWithDash:
posx = float(self.convert_xunits(self._x))
posy = float(self.convert_yunits(self._y))
posx, posy = trans.transform_point((posx, posy))
x_box, y_box, w_box, h_box = _get_textbox(self, renderer)
self._bbox_patch.set_bounds(0., 0., w_box, h_box)
theta = np.deg2rad(self.get_rotation())
tr = mtransforms.Affine2D().rotate(theta)
tr = tr.translate(posx + x_box, posy + y_box)
self._bbox_patch.set_transform(tr)
fontsize_in_pixel = renderer.points_to_pixels(self.get_size())
self._bbox_patch.set_mutation_scale(fontsize_in_pixel)
def _draw_bbox(self, renderer, posx, posy):
""" Update the location and the size of the bbox
(FancyBboxPatch), and draw
"""
x_box, y_box, w_box, h_box = _get_textbox(self, renderer)
self._bbox_patch.set_bounds(0., 0., w_box, h_box)
theta = np.deg2rad(self.get_rotation())
tr = mtransforms.Affine2D().rotate(theta)
tr = tr.translate(posx + x_box, posy + y_box)
self._bbox_patch.set_transform(tr)
fontsize_in_pixel = renderer.points_to_pixels(self.get_size())
self._bbox_patch.set_mutation_scale(fontsize_in_pixel)
self._bbox_patch.draw(renderer)
def _update_clip_properties(self):
clipprops = dict(clip_box=self.clipbox,
clip_path=self._clippath,
clip_on=self._clipon)
if self._bbox_patch:
bbox = self._bbox_patch.update(clipprops)
def set_clip_box(self, clipbox):
"""
Set the artist's clip :class:`~matplotlib.transforms.Bbox`.
ACCEPTS: a :class:`matplotlib.transforms.Bbox` instance
"""
super(Text, self).set_clip_box(clipbox)
self._update_clip_properties()
def set_clip_path(self, path, transform=None):
"""
Set the artist's clip path, which may be:
* a :class:`~matplotlib.patches.Patch` (or subclass) instance
* a :class:`~matplotlib.path.Path` instance, in which case
an optional :class:`~matplotlib.transforms.Transform`
instance may be provided, which will be applied to the
path before using it for clipping.
* *None*, to remove the clipping path
For efficiency, if the path happens to be an axis-aligned
rectangle, this method will set the clipping box to the
corresponding rectangle and set the clipping path to *None*.
ACCEPTS: [ (:class:`~matplotlib.path.Path`,
:class:`~matplotlib.transforms.Transform`) |
:class:`~matplotlib.patches.Patch` | None ]
"""
super(Text, self).set_clip_path(path, transform)
self._update_clip_properties()
def set_clip_on(self, b):
"""
Set whether artist uses clipping.
When False artists will be visible out side of the axes which
can lead to unexpected results.
ACCEPTS: [True | False]
"""
super(Text, self).set_clip_on(b)
self._update_clip_properties()
def get_wrap(self):
"""
Returns the wrapping state for the text.
"""
return self._wrap
def set_wrap(self, wrap):
"""
Sets the wrapping state for the text.
"""
self._wrap = wrap
def _get_wrap_line_width(self):
"""
Returns the maximum line width for wrapping text based on the
current orientation.
"""
x0, y0 = self.get_transform().transform(self.get_position())
figure_box = self.get_figure().get_window_extent()
# Calculate available width based on text alignment
alignment = self.get_horizontalalignment()
self.set_rotation_mode('anchor')
rotation = self.get_rotation()
left = self._get_dist_to_box(rotation, x0, y0, figure_box)
right = self._get_dist_to_box(
(180 + rotation) % 360,
x0,
y0,
figure_box)
if alignment == 'left':
line_width = left
elif alignment == 'right':
line_width = right
else:
line_width = 2 * min(left, right)
return line_width
def _get_dist_to_box(self, rotation, x0, y0, figure_box):
"""
Returns the distance from the given points, to the boundaries
of a rotated box in pixels.
"""
if rotation > 270:
quad = rotation - 270
h1 = y0 / math.cos(math.radians(quad))
h2 = (figure_box.x1 - x0) / math.cos(math.radians(90 - quad))
elif rotation > 180:
quad = rotation - 180
h1 = x0 / math.cos(math.radians(quad))
h2 = y0 / math.cos(math.radians(90 - quad))
elif rotation > 90:
quad = rotation - 90
h1 = (figure_box.y1 - y0) / math.cos(math.radians(quad))
h2 = x0 / math.cos(math.radians(90 - quad))
else:
h1 = (figure_box.x1 - x0) / math.cos(math.radians(rotation))
h2 = (figure_box.y1 - y0) / math.cos(math.radians(90 - rotation))
return min(h1, h2)
def _get_rendered_text_width(self, text):
"""
Returns the width of a given text string, in pixels.
"""
w, h, d = self._renderer.get_text_width_height_descent(
text,
self.get_fontproperties(),
False)
return math.ceil(w)
def _get_wrapped_text(self):
"""
Return a copy of the text with new lines added, so that
the text is wrapped relative to the parent figure.
"""
# Not fit to handle breaking up latex syntax correctly, so
# ignore latex for now.
if self.get_usetex():
return self.get_text()
# Build the line incrementally, for a more accurate measure of length
line_width = self._get_wrap_line_width()
wrapped_str = ""
line = ""
for word in self.get_text().split(' '):
# New lines in the user's test need to force a split, so that it's
# not using the longest current line width in the line being built
sub_words = word.split('\n')
for i in range(len(sub_words)):
current_width = self._get_rendered_text_width(
line + ' ' + sub_words[i])
# Split long lines, and each newline found in the current word
if current_width > line_width or i > 0:
wrapped_str += line + '\n'
line = ""
if line == "":
line = sub_words[i]
else:
line += ' ' + sub_words[i]
return wrapped_str + line
@allow_rasterization
def draw(self, renderer):
"""
Draws the :class:`Text` object to the given *renderer*.
"""
if renderer is not None:
self._renderer = renderer
if not self.get_visible():
return
if self.get_text().strip() == '':
return
renderer.open_group('text', self.get_gid())
with _wrap_text(self) as textobj:
bbox, info, descent = textobj._get_layout(renderer)
trans = textobj.get_transform()
# don't use textobj.get_position here, which refers to text
# position in Text, and dash position in TextWithDash:
posx = float(textobj.convert_xunits(textobj._x))
posy = float(textobj.convert_yunits(textobj._y))
if not np.isfinite(posx) or not np.isfinite(posy):
raise ValueError("posx and posy should be finite values")
posx, posy = trans.transform_point((posx, posy))
canvasw, canvash = renderer.get_canvas_width_height()
# draw the FancyBboxPatch
if textobj._bbox_patch:
textobj._draw_bbox(renderer, posx, posy)
gc = renderer.new_gc()
gc.set_foreground(textobj.get_color())
gc.set_alpha(textobj.get_alpha())
gc.set_url(textobj._url)
textobj._set_gc_clip(gc)
angle = textobj.get_rotation()
for line, wh, x, y in info:
mtext = textobj if len(info) == 1 else None
x = x + posx
y = y + posy
if renderer.flipy():
y = canvash - y
clean_line, ismath = textobj.is_math_text(line)
if textobj.get_path_effects():
from matplotlib.patheffects import PathEffectRenderer
textrenderer = PathEffectRenderer(
textobj.get_path_effects(), renderer)
else:
textrenderer = renderer
if textobj.get_usetex():
textrenderer.draw_tex(gc, x, y, clean_line,
textobj._fontproperties, angle,
mtext=mtext)
else:
textrenderer.draw_text(gc, x, y, clean_line,
textobj._fontproperties, angle,
ismath=ismath, mtext=mtext)
gc.restore()
renderer.close_group('text')
self.stale = False
def get_color(self):
"Return the color of the text"
return self._color
def get_fontproperties(self):
"Return the :class:`~font_manager.FontProperties` object"
return self._fontproperties
def get_font_properties(self):
'alias for get_fontproperties'
return self.get_fontproperties()
def get_family(self):
"Return the list of font families used for font lookup"
return self._fontproperties.get_family()
def get_fontfamily(self):
'alias for get_family'
return self.get_family()
def get_name(self):
"Return the font name as string"
return self._fontproperties.get_name()
def get_style(self):
"Return the font style as string"
return self._fontproperties.get_style()
def get_size(self):
"Return the font size as integer"
return self._fontproperties.get_size_in_points()
def get_variant(self):
"Return the font variant as a string"
return self._fontproperties.get_variant()
def get_fontvariant(self):
'alias for get_variant'
return self.get_variant()
def get_weight(self):
"Get the font weight as string or number"
return self._fontproperties.get_weight()
def get_fontname(self):
'alias for get_name'
return self.get_name()
def get_fontstyle(self):
'alias for get_style'
return self.get_style()
def get_fontsize(self):
'alias for get_size'
return self.get_size()
def get_fontweight(self):
'alias for get_weight'
return self.get_weight()
def get_stretch(self):
'Get the font stretch as a string or number'
return self._fontproperties.get_stretch()
def get_fontstretch(self):
'alias for get_stretch'
return self.get_stretch()
def get_ha(self):
'alias for get_horizontalalignment'
return self.get_horizontalalignment()
def get_horizontalalignment(self):
"""
Return the horizontal alignment as string. Will be one of
'left', 'center' or 'right'.
"""
return self._horizontalalignment
def get_unitless_position(self):
"Return the unitless position of the text as a tuple (*x*, *y*)"
# This will get the position with all unit information stripped away.
# This is here for convienience since it is done in several locations.
x = float(self.convert_xunits(self._x))
y = float(self.convert_yunits(self._y))
return x, y
def get_position(self):
"Return the position of the text as a tuple (*x*, *y*)"
# This should return the same data (possible unitized) as was
# specified with 'set_x' and 'set_y'.
return self._x, self._y
def get_prop_tup(self):
"""
Return a hashable tuple of properties.
Not intended to be human readable, but useful for backends who
want to cache derived information about text (e.g., layouts) and
need to know if the text has changed.
"""
x, y = self.get_unitless_position()
return (x, y, self.get_text(), self._color,
self._verticalalignment, self._horizontalalignment,
hash(self._fontproperties),
self._rotation, self._rotation_mode,
self.figure.dpi, id(self._renderer),
)
def get_text(self):
"Get the text as string"
return self._text
def get_va(self):
'alias for :meth:`getverticalalignment`'
return self.get_verticalalignment()
def get_verticalalignment(self):
"""
Return the vertical alignment as string. Will be one of
'top', 'center', 'bottom' or 'baseline'.
"""
return self._verticalalignment
def get_window_extent(self, renderer=None, dpi=None):
'''
Return a :class:`~matplotlib.transforms.Bbox` object bounding
the text, in display units.
In addition to being used internally, this is useful for
specifying clickable regions in a png file on a web page.
*renderer* defaults to the _renderer attribute of the text
object. This is not assigned until the first execution of
:meth:`draw`, so you must use this kwarg if you want
to call :meth:`get_window_extent` prior to the first
:meth:`draw`. For getting web page regions, it is
simpler to call the method after saving the figure.
*dpi* defaults to self.figure.dpi; the renderer dpi is
irrelevant. For the web application, if figure.dpi is not
the value used when saving the figure, then the value that
was used must be specified as the *dpi* argument.
'''
#return _unit_box
if not self.get_visible():
return Bbox.unit()
if dpi is not None:
dpi_orig = self.figure.dpi
self.figure.dpi = dpi
if self.get_text().strip() == '':
tx, ty = self._get_xy_display()
return Bbox.from_bounds(tx, ty, 0, 0)
if renderer is not None:
self._renderer = renderer
if self._renderer is None:
raise RuntimeError('Cannot get window extent w/o renderer')
bbox, info, descent = self._get_layout(self._renderer)
x, y = self.get_unitless_position()
x, y = self.get_transform().transform_point((x, y))
bbox = bbox.translated(x, y)
if dpi is not None:
self.figure.dpi = dpi_orig
return bbox
def set_backgroundcolor(self, color):
"""
Set the background color of the text by updating the bbox.
.. seealso::
:meth:`set_bbox`
To change the position of the bounding box.
ACCEPTS: any matplotlib color
"""
if self._bbox_patch is None:
self.set_bbox(dict(facecolor=color, edgecolor=color))
else:
self._bbox_patch.update(dict(facecolor=color))
self._update_clip_properties()
self.stale = True
def set_color(self, color):
"""
Set the foreground color of the text
ACCEPTS: any matplotlib color
"""
# Make sure it is hashable, or get_prop_tup will fail.
try:
hash(color)
except TypeError:
color = tuple(color)
self._color = color
self.stale = True
def set_ha(self, align):
'alias for set_horizontalalignment'
self.set_horizontalalignment(align)
def set_horizontalalignment(self, align):
"""
Set the horizontal alignment to one of
ACCEPTS: [ 'center' | 'right' | 'left' ]
"""
legal = ('center', 'right', 'left')
if align not in legal:
raise ValueError('Horizontal alignment must be one of %s' %
str(legal))
self._horizontalalignment = align
self.stale = True
def set_ma(self, align):
'alias for set_verticalalignment'
self.set_multialignment(align)
def set_multialignment(self, align):
"""
Set the alignment for multiple lines layout. The layout of the
bounding box of all the lines is determined bu the horizontalalignment
and verticalalignment properties, but the multiline text within that
box can be
ACCEPTS: ['left' | 'right' | 'center' ]
"""
legal = ('center', 'right', 'left')
if align not in legal:
raise ValueError('Horizontal alignment must be one of %s' %
str(legal))
self._multialignment = align
self.stale = True
def set_linespacing(self, spacing):
"""
Set the line spacing as a multiple of the font size.
Default is 1.2.
ACCEPTS: float (multiple of font size)
"""
self._linespacing = spacing
self.stale = True
def set_family(self, fontname):
"""
Set the font family. May be either a single string, or a list
of strings in decreasing priority. Each string may be either
a real font name or a generic font class name. If the latter,
the specific font names will be looked up in the
:file:`matplotlibrc` file.
ACCEPTS: [FONTNAME | 'serif' | 'sans-serif' | 'cursive' | 'fantasy' |
'monospace' ]
"""
self._fontproperties.set_family(fontname)
self.stale = True
def set_variant(self, variant):
"""
Set the font variant, either 'normal' or 'small-caps'.
ACCEPTS: [ 'normal' | 'small-caps' ]
"""
self._fontproperties.set_variant(variant)
self.stale = True
def set_fontvariant(self, variant):
'alias for set_variant'
return self.set_variant(variant)
def set_name(self, fontname):
"""alias for set_family"""
return self.set_family(fontname)
def set_fontname(self, fontname):
"""alias for set_family"""
self.set_family(fontname)
def set_style(self, fontstyle):
"""
Set the font style.
ACCEPTS: [ 'normal' | 'italic' | 'oblique']
"""
self._fontproperties.set_style(fontstyle)
self.stale = True
def set_fontstyle(self, fontstyle):
'alias for set_style'
return self.set_style(fontstyle)
def set_size(self, fontsize):
"""
Set the font size. May be either a size string, relative to
the default font size, or an absolute font size in points.
ACCEPTS: [size in points | 'xx-small' | 'x-small' | 'small' |
'medium' | 'large' | 'x-large' | 'xx-large' ]
"""
self._fontproperties.set_size(fontsize)
self.stale = True
def set_fontsize(self, fontsize):
'alias for set_size'
return self.set_size(fontsize)
def set_weight(self, weight):
"""
Set the font weight.
ACCEPTS: [a numeric value in range 0-1000 | 'ultralight' | 'light' |
'normal' | 'regular' | 'book' | 'medium' | 'roman' |
'semibold' | 'demibold' | 'demi' | 'bold' | 'heavy' |
'extra bold' | 'black' ]
"""
self._fontproperties.set_weight(weight)
self.stale = True
def set_fontweight(self, weight):
'alias for set_weight'
return self.set_weight(weight)
def set_stretch(self, stretch):
"""
Set the font stretch (horizontal condensation or expansion).
ACCEPTS: [a numeric value in range 0-1000 | 'ultra-condensed' |
'extra-condensed' | 'condensed' | 'semi-condensed' |
'normal' | 'semi-expanded' | 'expanded' | 'extra-expanded' |
'ultra-expanded' ]
"""
self._fontproperties.set_stretch(stretch)
self.stale = True
def set_fontstretch(self, stretch):
'alias for set_stretch'
return self.set_stretch(stretch)
def set_position(self, xy):
"""
Set the (*x*, *y*) position of the text
ACCEPTS: (x,y)
"""
self.set_x(xy[0])
self.set_y(xy[1])
def set_x(self, x):
"""
Set the *x* position of the text
ACCEPTS: float
"""
self._x = x
self.stale = True
def set_y(self, y):
"""
Set the *y* position of the text
ACCEPTS: float
"""
self._y = y
self.stale = True
def set_rotation(self, s):
"""
Set the rotation of the text
ACCEPTS: [ angle in degrees | 'vertical' | 'horizontal' ]
"""
self._rotation = s
self.stale = True
def set_va(self, align):
'alias for set_verticalalignment'
self.set_verticalalignment(align)
def set_verticalalignment(self, align):
"""
Set the vertical alignment
ACCEPTS: [ 'center' | 'top' | 'bottom' | 'baseline' ]
"""
legal = ('top', 'bottom', 'center', 'baseline')
if align not in legal:
raise ValueError('Vertical alignment must be one of %s' %
str(legal))
self._verticalalignment = align
self.stale = True
def set_text(self, s):
"""
Set the text string *s*
It may contain newlines (``\\n``) or math in LaTeX syntax.
ACCEPTS: string or anything printable with '%s' conversion.
"""
self._text = '%s' % (s,)
self.stale = True
@staticmethod
def is_math_text(s):
"""
Returns a cleaned string and a boolean flag.
The flag indicates if the given string *s* contains any mathtext,
determined by counting unescaped dollar signs. If no mathtext
is present, the cleaned string has its dollar signs unescaped.
If usetex is on, the flag always has the value "TeX".
"""
# Did we find an even number of non-escaped dollar signs?
# If so, treat is as math text.
if rcParams['text.usetex']:
if s == ' ':
s = r'\ '
return s, 'TeX'
if cbook.is_math_text(s):
return s, True
else:
return s.replace(r'\$', '$'), False
def set_fontproperties(self, fp):
"""
Set the font properties that control the text. *fp* must be a
:class:`matplotlib.font_manager.FontProperties` object.
ACCEPTS: a :class:`matplotlib.font_manager.FontProperties` instance
"""
if is_string_like(fp):
fp = FontProperties(fp)
self._fontproperties = fp.copy()
self.stale = True
def set_font_properties(self, fp):
'alias for set_fontproperties'
self.set_fontproperties(fp)
def set_usetex(self, usetex):
"""
Set this `Text` object to render using TeX (or not).
If `None` is given, the option will be reset to use the value of
`rcParams['text.usetex']`
"""
if usetex is None:
self._usetex = None
else:
self._usetex = bool(usetex)
self.stale = True
def get_usetex(self):
"""
Return whether this `Text` object will render using TeX.
If the user has not manually set this value, it will default to
the value of `rcParams['text.usetex']`
"""
if self._usetex is None:
return rcParams['text.usetex']
else:
return self._usetex
docstring.interpd.update(Text=artist.kwdoc(Text))
docstring.dedent_interpd(Text.__init__)
class TextWithDash(Text):
"""
This is basically a :class:`~matplotlib.text.Text` with a dash
(drawn with a :class:`~matplotlib.lines.Line2D`) before/after
it. It is intended to be a drop-in replacement for
:class:`~matplotlib.text.Text`, and should behave identically to
it when *dashlength* = 0.0.
The dash always comes between the point specified by
:meth:`~matplotlib.text.Text.set_position` and the text. When a
dash exists, the text alignment arguments (*horizontalalignment*,
*verticalalignment*) are ignored.
*dashlength* is the length of the dash in canvas units.
(default = 0.0).
*dashdirection* is one of 0 or 1, where 0 draws the dash after the
text and 1 before. (default = 0).
*dashrotation* specifies the rotation of the dash, and should
generally stay *None*. In this case
:meth:`~matplotlib.text.TextWithDash.get_dashrotation` returns
:meth:`~matplotlib.text.Text.get_rotation`. (i.e., the dash takes
its rotation from the text's rotation). Because the text center is
projected onto the dash, major deviations in the rotation cause
what may be considered visually unappealing results.
(default = *None*)
*dashpad* is a padding length to add (or subtract) space
between the text and the dash, in canvas units.
(default = 3)
*dashpush* "pushes" the dash and text away from the point
specified by :meth:`~matplotlib.text.Text.set_position` by the
amount in canvas units. (default = 0)
.. note::
The alignment of the two objects is based on the bounding box
of the :class:`~matplotlib.text.Text`, as obtained by
:meth:`~matplotlib.artist.Artist.get_window_extent`. This, in
turn, appears to depend on the font metrics as given by the
rendering backend. Hence the quality of the "centering" of the
label text with respect to the dash varies depending on the
backend used.
.. note::
I'm not sure that I got the
:meth:`~matplotlib.text.TextWithDash.get_window_extent` right,
or whether that's sufficient for providing the object bounding
box.
"""
__name__ = 'textwithdash'
def __str__(self):
return "TextWithDash(%g,%g,%s)" % (self._x, self._y, repr(self._text))
def __init__(self,
x=0, y=0, text='',
color=None, # defaults to rc params
verticalalignment='center',
horizontalalignment='center',
multialignment=None,
fontproperties=None, # defaults to FontProperties()
rotation=None,
linespacing=None,
dashlength=0.0,
dashdirection=0,
dashrotation=None,
dashpad=3,
dashpush=0,
):
Text.__init__(self, x=x, y=y, text=text, color=color,
verticalalignment=verticalalignment,
horizontalalignment=horizontalalignment,
multialignment=multialignment,
fontproperties=fontproperties,
rotation=rotation,
linespacing=linespacing)
# The position (x,y) values for text and dashline
# are bogus as given in the instantiation; they will
# be set correctly by update_coords() in draw()
self.dashline = Line2D(xdata=(x, x),
ydata=(y, y),
color='k',
linestyle='-')
self._dashx = float(x)
self._dashy = float(y)
self._dashlength = dashlength
self._dashdirection = dashdirection
self._dashrotation = dashrotation
self._dashpad = dashpad
self._dashpush = dashpush
#self.set_bbox(dict(pad=0))
def get_unitless_position(self):
"Return the unitless position of the text as a tuple (*x*, *y*)"
# This will get the position with all unit information stripped away.
# This is here for convienience since it is done in several locations.
x = float(self.convert_xunits(self._dashx))
y = float(self.convert_yunits(self._dashy))
return x, y
def get_position(self):
"Return the position of the text as a tuple (*x*, *y*)"
# This should return the same data (possibly unitized) as was
# specified with set_x and set_y
return self._dashx, self._dashy
def get_prop_tup(self):
"""
Return a hashable tuple of properties.
Not intended to be human readable, but useful for backends who
want to cache derived information about text (e.g., layouts) and
need to know if the text has changed.
"""
props = [p for p in Text.get_prop_tup(self)]
props.extend([self._x, self._y, self._dashlength,
self._dashdirection, self._dashrotation, self._dashpad,
self._dashpush])
return tuple(props)
def draw(self, renderer):
"""
Draw the :class:`TextWithDash` object to the given *renderer*.
"""
self.update_coords(renderer)
Text.draw(self, renderer)
if self.get_dashlength() > 0.0:
self.dashline.draw(renderer)
self.stale = False
def update_coords(self, renderer):
"""
Computes the actual *x*, *y* coordinates for text based on the
input *x*, *y* and the *dashlength*. Since the rotation is
with respect to the actual canvas's coordinates we need to map
back and forth.
"""
dashx, dashy = self.get_unitless_position()
dashlength = self.get_dashlength()
# Shortcircuit this process if we don't have a dash
if dashlength == 0.0:
self._x, self._y = dashx, dashy
return
dashrotation = self.get_dashrotation()
dashdirection = self.get_dashdirection()
dashpad = self.get_dashpad()
dashpush = self.get_dashpush()
angle = get_rotation(dashrotation)
theta = np.pi * (angle / 180.0 + dashdirection - 1)
cos_theta, sin_theta = np.cos(theta), np.sin(theta)
transform = self.get_transform()
# Compute the dash end points
# The 'c' prefix is for canvas coordinates
cxy = transform.transform_point((dashx, dashy))
cd = np.array([cos_theta, sin_theta])
c1 = cxy + dashpush * cd
c2 = cxy + (dashpush + dashlength) * cd
inverse = transform.inverted()
(x1, y1) = inverse.transform_point(tuple(c1))
(x2, y2) = inverse.transform_point(tuple(c2))
self.dashline.set_data((x1, x2), (y1, y2))
# We now need to extend this vector out to
# the center of the text area.
# The basic problem here is that we're "rotating"
# two separate objects but want it to appear as
# if they're rotated together.
# This is made non-trivial because of the
# interaction between text rotation and alignment -
# text alignment is based on the bbox after rotation.
# We reset/force both alignments to 'center'
# so we can do something relatively reasonable.
# There's probably a better way to do this by
# embedding all this in the object's transformations,
# but I don't grok the transformation stuff
# well enough yet.
we = Text.get_window_extent(self, renderer=renderer)
w, h = we.width, we.height
# Watch for zeros
if sin_theta == 0.0:
dx = w
dy = 0.0
elif cos_theta == 0.0:
dx = 0.0
dy = h
else:
tan_theta = sin_theta / cos_theta
dx = w
dy = w * tan_theta
if dy > h or dy < -h:
dy = h
dx = h / tan_theta
cwd = np.array([dx, dy]) / 2
cwd *= 1 + dashpad / np.sqrt(np.dot(cwd, cwd))
cw = c2 + (dashdirection * 2 - 1) * cwd
newx, newy = inverse.transform_point(tuple(cw))
self._x, self._y = newx, newy
# Now set the window extent
# I'm not at all sure this is the right way to do this.
we = Text.get_window_extent(self, renderer=renderer)
self._twd_window_extent = we.frozen()
self._twd_window_extent.update_from_data_xy(np.array([c1]), False)
# Finally, make text align center
Text.set_horizontalalignment(self, 'center')
Text.set_verticalalignment(self, 'center')
def get_window_extent(self, renderer=None):
'''
Return a :class:`~matplotlib.transforms.Bbox` object bounding
the text, in display units.
In addition to being used internally, this is useful for
specifying clickable regions in a png file on a web page.
*renderer* defaults to the _renderer attribute of the text
object. This is not assigned until the first execution of
:meth:`draw`, so you must use this kwarg if you want
to call :meth:`get_window_extent` prior to the first
:meth:`draw`. For getting web page regions, it is
simpler to call the method after saving the figure.
'''
self.update_coords(renderer)
if self.get_dashlength() == 0.0:
return Text.get_window_extent(self, renderer=renderer)
else:
return self._twd_window_extent
def get_dashlength(self):
"""
Get the length of the dash.
"""
return self._dashlength
def set_dashlength(self, dl):
"""
Set the length of the dash.
ACCEPTS: float (canvas units)
"""
self._dashlength = dl
self.stale = True
def get_dashdirection(self):
"""
Get the direction dash. 1 is before the text and 0 is after.
"""
return self._dashdirection
def set_dashdirection(self, dd):
"""
Set the direction of the dash following the text.
1 is before the text and 0 is after. The default
is 0, which is what you'd want for the typical
case of ticks below and on the left of the figure.
ACCEPTS: int (1 is before, 0 is after)
"""
self._dashdirection = dd
self.stale = True
def get_dashrotation(self):
"""
Get the rotation of the dash in degrees.
"""
if self._dashrotation is None:
return self.get_rotation()
else:
return self._dashrotation
def set_dashrotation(self, dr):
"""
Set the rotation of the dash, in degrees
ACCEPTS: float (degrees)
"""
self._dashrotation = dr
self.stale = True
def get_dashpad(self):
"""
Get the extra spacing between the dash and the text, in canvas units.
"""
return self._dashpad
def set_dashpad(self, dp):
"""
Set the "pad" of the TextWithDash, which is the extra spacing
between the dash and the text, in canvas units.
ACCEPTS: float (canvas units)
"""
self._dashpad = dp
self.stale = True
def get_dashpush(self):
"""
Get the extra spacing between the dash and the specified text
position, in canvas units.
"""
return self._dashpush
def set_dashpush(self, dp):
"""
Set the "push" of the TextWithDash, which
is the extra spacing between the beginning
of the dash and the specified position.
ACCEPTS: float (canvas units)
"""
self._dashpush = dp
self.stale = True
def set_position(self, xy):
"""
Set the (*x*, *y*) position of the :class:`TextWithDash`.
ACCEPTS: (x, y)
"""
self.set_x(xy[0])
self.set_y(xy[1])
def set_x(self, x):
"""
Set the *x* position of the :class:`TextWithDash`.
ACCEPTS: float
"""
self._dashx = float(x)
self.stale = True
def set_y(self, y):
"""
Set the *y* position of the :class:`TextWithDash`.
ACCEPTS: float
"""
self._dashy = float(y)
self.stale = True
def set_transform(self, t):
"""
Set the :class:`matplotlib.transforms.Transform` instance used
by this artist.
ACCEPTS: a :class:`matplotlib.transforms.Transform` instance
"""
Text.set_transform(self, t)
self.dashline.set_transform(t)
self.stale = True
def get_figure(self):
'return the figure instance the artist belongs to'
return self.figure
def set_figure(self, fig):
"""
Set the figure instance the artist belong to.
ACCEPTS: a :class:`matplotlib.figure.Figure` instance
"""
Text.set_figure(self, fig)
self.dashline.set_figure(fig)
docstring.interpd.update(TextWithDash=artist.kwdoc(TextWithDash))
class OffsetFrom(object):
def __init__(self, artist, ref_coord, unit="points"):
self._artist = artist
self._ref_coord = ref_coord
self.set_unit(unit)
def set_unit(self, unit):
if unit not in ["points", "pixels"]:
raise ValueError("'unit' must be one of [ 'points' | 'pixels' ]")
self._unit = unit
def get_unit(self):
return self._unit
def _get_scale(self, renderer):
unit = self.get_unit()
if unit == "pixels":
return 1.
else:
return renderer.points_to_pixels(1.)
def __call__(self, renderer):
if isinstance(self._artist, Artist):
bbox = self._artist.get_window_extent(renderer)
l, b, w, h = bbox.bounds
xf, yf = self._ref_coord
x, y = l + w * xf, b + h * yf
elif isinstance(self._artist, BboxBase):
l, b, w, h = self._artist.bounds
xf, yf = self._ref_coord
x, y = l + w * xf, b + h * yf
elif isinstance(self._artist, Transform):
x, y = self._artist.transform_point(self._ref_coord)
else:
raise RuntimeError("unknown type")
sc = self._get_scale(renderer)
tr = Affine2D().scale(sc, sc).translate(x, y)
return tr
class _AnnotationBase(object):
def __init__(self,
xy,
xycoords='data',
annotation_clip=None):
self.xy = xy
self.xycoords = xycoords
self.set_annotation_clip(annotation_clip)
self._draggable = None
def _get_xy(self, renderer, x, y, s):
if isinstance(s, tuple):
s1, s2 = s
else:
s1, s2 = s, s
if s1 == 'data':
x = float(self.convert_xunits(x))
if s2 == 'data':
y = float(self.convert_yunits(y))
tr = self._get_xy_transform(renderer, s)
x1, y1 = tr.transform_point((x, y))
return x1, y1
def _get_xy_transform(self, renderer, s):
if isinstance(s, tuple):
s1, s2 = s
from matplotlib.transforms import blended_transform_factory
tr1 = self._get_xy_transform(renderer, s1)
tr2 = self._get_xy_transform(renderer, s2)
tr = blended_transform_factory(tr1, tr2)
return tr
if six.callable(s):
tr = s(renderer)
if isinstance(tr, BboxBase):
return BboxTransformTo(tr)
elif isinstance(tr, Transform):
return tr
else:
raise RuntimeError("unknown return type ...")
if isinstance(s, Artist):
bbox = s.get_window_extent(renderer)
return BboxTransformTo(bbox)
elif isinstance(s, BboxBase):
return BboxTransformTo(s)
elif isinstance(s, Transform):
return s
elif not is_string_like(s):
raise RuntimeError("unknown coordinate type : %s" % (s,))
if s == 'data':
return self.axes.transData
elif s == 'polar':
from matplotlib.projections import PolarAxes
tr = PolarAxes.PolarTransform()
trans = tr + self.axes.transData
return trans
s_ = s.split()
if len(s_) != 2:
raise ValueError("%s is not a recognized coordinate" % s)
bbox0, xy0 = None, None
bbox_name, unit = s_
# if unit is offset-like
if bbox_name == "figure":
bbox0 = self.figure.bbox
elif bbox_name == "axes":
bbox0 = self.axes.bbox
# elif bbox_name == "bbox":
# if bbox is None:
# raise RuntimeError("bbox is specified as a coordinate but "
# "never set")
# bbox0 = self._get_bbox(renderer, bbox)
if bbox0 is not None:
xy0 = bbox0.bounds[:2]
elif bbox_name == "offset":
xy0 = self._get_ref_xy(renderer)
if xy0 is not None:
# reference x, y in display coordinate
ref_x, ref_y = xy0
from matplotlib.transforms import Affine2D
if unit == "points":
# dots per points
dpp = self.figure.get_dpi() / 72.
tr = Affine2D().scale(dpp, dpp)
elif unit == "pixels":
tr = Affine2D()
elif unit == "fontsize":
fontsize = self.get_size()
dpp = fontsize * self.figure.get_dpi() / 72.
tr = Affine2D().scale(dpp,
dpp)
elif unit == "fraction":
w, h = bbox0.bounds[2:]
tr = Affine2D().scale(w, h)
else:
raise ValueError("%s is not a recognized coordinate" % s)
return tr.translate(ref_x, ref_y)
else:
raise ValueError("%s is not a recognized coordinate" % s)
def _get_ref_xy(self, renderer):
"""
return x, y (in display coordinate) that is to be used for a reference
of any offset coordinate
"""
if isinstance(self.xycoords, tuple):
s1, s2 = self.xycoords
if ((is_string_like(s1) and s1.split()[0] == "offset") or
(is_string_like(s2) and s2.split()[0] == "offset")):
raise ValueError("xycoords should not be an offset coordinate")
x, y = self.xy
x1, y1 = self._get_xy(renderer, x, y, s1)
x2, y2 = self._get_xy(renderer, x, y, s2)
return x1, y2
elif (is_string_like(self.xycoords) and
self.xycoords.split()[0] == "offset"):
raise ValueError("xycoords should not be an offset coordinate")
else:
x, y = self.xy
return self._get_xy(renderer, x, y, self.xycoords)
#raise RuntimeError("must be defined by the derived class")
# def _get_bbox(self, renderer):
# if hasattr(bbox, "bounds"):
# return bbox
# elif hasattr(bbox, "get_window_extent"):
# bbox = bbox.get_window_extent()
# return bbox
# else:
# raise ValueError("A bbox instance is expected but got %s" %
# str(bbox))
def set_annotation_clip(self, b):
"""
set *annotation_clip* attribute.
* True: the annotation will only be drawn when self.xy is inside
the axes.
* False: the annotation will always be drawn regardless of its
position.
* None: the self.xy will be checked only if *xycoords* is "data"
"""
self._annotation_clip = b
def get_annotation_clip(self):
"""
Return *annotation_clip* attribute.
See :meth:`set_annotation_clip` for the meaning of return values.
"""
return self._annotation_clip
def _get_position_xy(self, renderer):
"Return the pixel position of the annotated point."
x, y = self.xy
return self._get_xy(renderer, x, y, self.xycoords)
def _check_xy(self, renderer, xy_pixel):
"""
given the xy pixel coordinate, check if the annotation need to
be drawn.
"""
b = self.get_annotation_clip()
if b or (b is None and self.xycoords == "data"):
# check if self.xy is inside the axes.
if not self.axes.contains_point(xy_pixel):
return False
return True
def draggable(self, state=None, use_blit=False):
"""
Set the draggable state -- if state is
* None : toggle the current state
* True : turn draggable on
* False : turn draggable off
If draggable is on, you can drag the annotation on the canvas with
the mouse. The DraggableAnnotation helper instance is returned if
draggable is on.
"""
from matplotlib.offsetbox import DraggableAnnotation
is_draggable = self._draggable is not None
# if state is None we'll toggle
if state is None:
state = not is_draggable
if state:
if self._draggable is None:
self._draggable = DraggableAnnotation(self, use_blit)
else:
if self._draggable is not None:
self._draggable.disconnect()
self._draggable = None
return self._draggable
class Annotation(Text, _AnnotationBase):
"""
A :class:`~matplotlib.text.Text` class to make annotating things
in the figure, such as :class:`~matplotlib.figure.Figure`,
:class:`~matplotlib.axes.Axes`,
:class:`~matplotlib.patches.Rectangle`, etc., easier.
Annotate the *x*, *y* point *xy* with text *s* at *x*, *y*
location *xytext*. (If *xytext* = *None*, defaults to *xy*,
and if *textcoords* = *None*, defaults to *xycoords*).
"""
def __str__(self):
return "Annotation(%g,%g,%s)" % (self.xy[0],
self.xy[1],
repr(self._text))
@docstring.dedent_interpd
def __init__(self, s, xy,
xytext=None,
xycoords='data',
textcoords=None,
arrowprops=None,
annotation_clip=None,
**kwargs):
"""
*arrowprops*, if not *None*, is a dictionary of line properties
(see :class:`matplotlib.lines.Line2D`) for the arrow that connects
annotation to the point.
If the dictionary has a key *arrowstyle*, a
`~matplotlib.patches.FancyArrowPatch` instance is created with
the given dictionary and is drawn. Otherwise, a
`~matplotlib.patches.YAArrow` patch instance is created and
drawn. Valid keys for `~matplotlib.patches.YAArrow` are:
========= ===========================================================
Key Description
========= ===========================================================
width the width of the arrow in points
frac the fraction of the arrow length occupied by the head
headwidth the width of the base of the arrow head in points
shrink oftentimes it is convenient to have the arrowtip
and base a bit away from the text and point being
annotated. If *d* is the distance between the text and
annotated point, shrink will shorten the arrow so the tip
and base are shink percent of the distance *d* away from
the endpoints. i.e., ``shrink=0.05 is 5%%``
? any key for :class:`matplotlib.patches.polygon`
========= ===========================================================
Valid keys for `~matplotlib.patches.FancyArrowPatch` are:
=============== ======================================================
Key Description
=============== ======================================================
arrowstyle the arrow style
connectionstyle the connection style
relpos default is (0.5, 0.5)
patchA default is bounding box of the text
patchB default is None
shrinkA default is 2 points
shrinkB default is 2 points
mutation_scale default is text size (in points)
mutation_aspect default is 1.
? any key for :class:`matplotlib.patches.PathPatch`
=============== ======================================================
*xycoords* and *textcoords* are strings that indicate the
coordinates of *xy* and *xytext*, and may be one of the
following values:
================= ===================================================
Property Description
================= ===================================================
'figure points' points from the lower left corner of the figure
'figure pixels' pixels from the lower left corner of the figure
'figure fraction' 0,0 is lower left of figure and 1,1 is upper right
'axes points' points from lower left corner of axes
'axes pixels' pixels from lower left corner of axes
'axes fraction' 0,0 is lower left of axes and 1,1 is upper right
'data' use the coordinate system of the object being
annotated (default)
'offset points' Specify an offset (in points) from the *xy* value
'polar' you can specify *theta*, *r* for the annotation,
even in cartesian plots. Note that if you
are using a polar axes, you do not need
to specify polar for the coordinate
system since that is the native "data" coordinate
system.
================= ===================================================
If a 'points' or 'pixels' option is specified, values will be
added to the bottom-left and if negative, values will be
subtracted from the top-right. e.g.::
# 10 points to the right of the left border of the axes and
# 5 points below the top border
xy=(10,-5), xycoords='axes points'
You may use an instance of
:class:`~matplotlib.transforms.Transform` or
:class:`~matplotlib.artist.Artist`. See
:ref:`plotting-guide-annotation` for more details.
The *annotation_clip* attribute controls the visibility of the
annotation when it goes outside the axes area. If `True`, the
annotation will only be drawn when the *xy* is inside the
axes. If `False`, the annotation will always be drawn
regardless of its position. The default is `None`, which
behave as `True` only if *xycoords* is "data".
Additional kwargs are `~matplotlib.text.Text` properties:
%(Text)s
"""
_AnnotationBase.__init__(self,
xy,
xycoords=xycoords,
annotation_clip=annotation_clip)
# warn about wonky input data
if (xytext is None and
textcoords is not None and
textcoords != xycoords):
warnings.warn("You have used the `textcoords` kwarg, but not "
"the `xytext` kwarg. This can lead to surprising "
"results.")
# clean up textcoords and assign default
if textcoords is None:
textcoords = self.xycoords
self._textcoords = textcoords
# cleanup xytext defaults
if xytext is None:
xytext = self.xy
x, y = xytext
Text.__init__(self, x, y, s, **kwargs)
self.arrowprops = arrowprops
self.arrow = None
if arrowprops:
if "arrowstyle" in arrowprops:
arrowprops = self.arrowprops.copy()
self._arrow_relpos = arrowprops.pop("relpos", (0.5, 0.5))
else:
# modified YAArrow API to be used with FancyArrowPatch
shapekeys = ('width', 'headwidth', 'headlength',
'shrink', 'frac')
arrowprops = dict()
for key, val in self.arrowprops.items():
if key not in shapekeys:
arrowprops[key] = val # basic Patch properties
self.arrow_patch = FancyArrowPatch((0, 0), (1, 1),
**arrowprops)
else:
self.arrow_patch = None
def contains(self, event):
contains, tinfo = Text.contains(self, event)
if self.arrow is not None:
in_arrow, _ = self.arrow.contains(event)
contains = contains or in_arrow
if self.arrow_patch is not None:
in_patch, _ = self.arrow_patch.contains(event)
contains = contains or in_patch
return contains, tinfo
@property
def xyann(self):
return self.get_position()
@xyann.setter
def xyann(self, xytext):
self.set_position(xytext)
@property
def anncoords(self):
return self._textcoords
@anncoords.setter
def anncoords(self, coords):
self._textcoords = coords
def set_figure(self, fig):
if self.arrow is not None:
self.arrow.set_figure(fig)
if self.arrow_patch is not None:
self.arrow_patch.set_figure(fig)
Artist.set_figure(self, fig)
def update_positions(self, renderer):
""""Update the pixel positions of the annotated point and the
text.
"""
xy_pixel = self._get_position_xy(renderer)
self._update_position_xytext(renderer, xy_pixel)
def _update_position_xytext(self, renderer, xy_pixel):
"""Update the pixel positions of the annotation text and the arrow
patch.
"""
# generate transformation,
self.set_transform(self._get_xy_transform(renderer, self.anncoords))
ox0, oy0 = self._get_xy_display()
ox1, oy1 = xy_pixel
if self.arrowprops:
x0, y0 = xy_pixel
l, b, w, h = Text.get_window_extent(self, renderer).bounds
r = l + w
t = b + h
xc = 0.5 * (l + r)
yc = 0.5 * (b + t)
d = self.arrowprops.copy()
ms = d.pop("mutation_scale", self.get_size())
ms = renderer.points_to_pixels(ms)
self.arrow_patch.set_mutation_scale(ms)
if "arrowstyle" not in d:
# Approximately simulate the YAArrow.
# Pop its kwargs:
shrink = d.pop('shrink', 0.0)
width = d.pop('width', 4)
headwidth = d.pop('headwidth', 12)
# Ignore frac--it is useless.
frac = d.pop('frac', None)
if frac is not None:
warnings.warn(
"'frac' option in 'arrowstyle' is no longer supported;"
" use 'headlength' to set the head length in points.")
headlength = d.pop('headlength', 12)
to_style = self.figure.dpi / (72 * ms)
stylekw = dict(head_length=headlength * to_style,
head_width=headwidth * to_style,
tail_width=width * to_style)
self.arrow_patch.set_arrowstyle('simple', **stylekw)
# using YAArrow style:
# pick the x,y corner of the text bbox closest to point
# annotated
xpos = ((l, 0), (xc, 0.5), (r, 1))
ypos = ((b, 0), (yc, 0.5), (t, 1))
dsu = [(abs(val[0] - x0), val) for val in xpos]
dsu.sort()
_, (x, relposx) = dsu[0]
dsu = [(abs(val[0] - y0), val) for val in ypos]
dsu.sort()
_, (y, relposy) = dsu[0]
self._arrow_relpos = (relposx, relposy)
r = np.hypot((y - y0), (x - x0))
shrink_pts = shrink * r / renderer.points_to_pixels(1)
self.arrow_patch.shrinkA = shrink_pts
self.arrow_patch.shrinkB = shrink_pts
# adjust the starting point of the arrow relative to
# the textbox.
# TODO : Rotation needs to be accounted.
relpos = self._arrow_relpos
bbox = Text.get_window_extent(self, renderer)
ox0 = bbox.x0 + bbox.width * relpos[0]
oy0 = bbox.y0 + bbox.height * relpos[1]
# The arrow will be drawn from (ox0, oy0) to (ox1,
# oy1). It will be first clipped by patchA and patchB.
# Then it will be shrunk by shirnkA and shrinkB
# (in points). If patch A is not set, self.bbox_patch
# is used.
self.arrow_patch.set_positions((ox0, oy0), (ox1, oy1))
if "patchA" in d:
self.arrow_patch.set_patchA(d.pop("patchA"))
else:
if self._bbox_patch:
self.arrow_patch.set_patchA(self._bbox_patch)
else:
pad = renderer.points_to_pixels(4)
if self.get_text().strip() == "":
self.arrow_patch.set_patchA(None)
return
bbox = Text.get_window_extent(self, renderer)
l, b, w, h = bbox.bounds
l -= pad / 2.
b -= pad / 2.
w += pad
h += pad
r = Rectangle(xy=(l, b),
width=w,
height=h,
)
r.set_transform(mtransforms.IdentityTransform())
r.set_clip_on(False)
self.arrow_patch.set_patchA(r)
@allow_rasterization
def draw(self, renderer):
"""
Draw the :class:`Annotation` object to the given *renderer*.
"""
if renderer is not None:
self._renderer = renderer
if not self.get_visible():
return
xy_pixel = self._get_position_xy(renderer)
if not self._check_xy(renderer, xy_pixel):
return
self._update_position_xytext(renderer, xy_pixel)
self.update_bbox_position_size(renderer)
if self.arrow_patch is not None: # FancyArrowPatch
if self.arrow_patch.figure is None and self.figure is not None:
self.arrow_patch.figure = self.figure
self.arrow_patch.draw(renderer)
# Draw text, including FancyBboxPatch, after FancyArrowPatch.
# Otherwise, a wedge arrowstyle can land partly on top of the Bbox.
Text.draw(self, renderer)
def get_window_extent(self, renderer=None):
'''
Return a :class:`~matplotlib.transforms.Bbox` object bounding
the text and arrow annotation, in display units.
*renderer* defaults to the _renderer attribute of the text
object. This is not assigned until the first execution of
:meth:`draw`, so you must use this kwarg if you want
to call :meth:`get_window_extent` prior to the first
:meth:`draw`. For getting web page regions, it is
simpler to call the method after saving the figure. The
*dpi* used defaults to self.figure.dpi; the renderer dpi is
irrelevant.
'''
if not self.get_visible():
return Bbox.unit()
arrow = self.arrow
arrow_patch = self.arrow_patch
text_bbox = Text.get_window_extent(self, renderer=renderer)
bboxes = [text_bbox]
if self.arrow is not None:
bboxes.append(arrow.get_window_extent(renderer=renderer))
elif self.arrow_patch is not None:
bboxes.append(arrow_patch.get_window_extent(renderer=renderer))
return Bbox.union(bboxes)
docstring.interpd.update(Annotation=Annotation.__init__.__doc__)
| mit |
rtavenar/tslearn | tslearn/metrics.py | 1 | 57713 | """
The :mod:`tslearn.metrics` module gathers time series similarity metrics.
"""
import warnings
import numpy
from joblib import Parallel, delayed
from numba import njit, prange
from scipy.spatial.distance import pdist, cdist
from sklearn.metrics.pairwise import euclidean_distances
from sklearn.utils import check_random_state
from tslearn.soft_dtw_fast import _soft_dtw, _soft_dtw_grad, \
_jacobian_product_sq_euc
from tslearn.utils import to_time_series, to_time_series_dataset, ts_size, \
check_equal_size
__author__ = 'Romain Tavenard romain.tavenard[at]univ-rennes2.fr'
GLOBAL_CONSTRAINT_CODE = {None: 0, "": 0, "itakura": 1, "sakoe_chiba": 2}
VARIABLE_LENGTH_METRICS = ["dtw", "gak", "softdtw"]
@njit()
def _local_squared_dist(x, y):
dist = 0.
for di in range(x.shape[0]):
diff = (x[di] - y[di])
dist += diff * diff
return dist
@njit()
def njit_accumulated_matrix(s1, s2, mask):
"""Compute the accumulated cost matrix score between two time series.
Parameters
----------
s1 : array, shape = (sz1,)
First time series.
s2 : array, shape = (sz2,)
Second time series
mask : array, shape = (sz1, sz2)
Mask. Unconsidered cells must have infinite values.
Returns
-------
mat : array, shape = (sz1, sz2)
Accumulated cost matrix.
"""
l1 = s1.shape[0]
l2 = s2.shape[0]
cum_sum = numpy.full((l1 + 1, l2 + 1), numpy.inf)
cum_sum[0, 0] = 0.
for i in range(l1):
for j in range(l2):
if numpy.isfinite(mask[i, j]):
cum_sum[i + 1, j + 1] = _local_squared_dist(s1[i], s2[j])
cum_sum[i + 1, j + 1] += min(cum_sum[i, j + 1],
cum_sum[i + 1, j],
cum_sum[i, j])
return cum_sum[1:, 1:]
@njit(nogil=True)
def njit_dtw(s1, s2, mask):
"""Compute the dynamic time warping score between two time series.
Parameters
----------
s1 : array, shape = (sz1,)
First time series.
s2 : array, shape = (sz2,)
Second time series
mask : array, shape = (sz1, sz2)
Mask. Unconsidered cells must have infinite values.
Returns
-------
dtw_score : float
Dynamic Time Warping score between both time series.
"""
cum_sum = njit_accumulated_matrix(s1, s2, mask)
return numpy.sqrt(cum_sum[-1, -1])
@njit()
def _return_path(acc_cost_mat):
sz1, sz2 = acc_cost_mat.shape
path = [(sz1 - 1, sz2 - 1)]
while path[-1] != (0, 0):
i, j = path[-1]
if i == 0:
path.append((0, j - 1))
elif j == 0:
path.append((i - 1, 0))
else:
arr = numpy.array([acc_cost_mat[i - 1][j - 1],
acc_cost_mat[i - 1][j],
acc_cost_mat[i][j - 1]])
argmin = numpy.argmin(arr)
if argmin == 0:
path.append((i - 1, j - 1))
elif argmin == 1:
path.append((i - 1, j))
else:
path.append((i, j - 1))
return path[::-1]
def dtw_path(s1, s2, global_constraint=None, sakoe_chiba_radius=None,
itakura_max_slope=None):
r"""Compute Dynamic Time Warping (DTW) similarity measure between
(possibly multidimensional) time series and return both the path and the
similarity.
DTW is computed as the Euclidean distance between aligned time series,
i.e., if :math:`P` is the alignment path:
.. math::
DTW(X, Y) = \sqrt{\sum_{(i, j) \in P} (X_{i} - Y_{j})^2}
It is not required that both time series share the same size, but they must
be the same dimension. DTW was originally presented in [1]_.
Parameters
----------
s1
A time series.
s2
Another time series.
If not given, self-similarity of dataset1 is returned.
global_constraint : {"itakura", "sakoe_chiba"} or None (default: None)
Global constraint to restrict admissible paths for DTW.
sakoe_chiba_radius : int or None (default: None)
Radius to be used for Sakoe-Chiba band global constraint.
If None and `global_constraint` is set to "sakoe_chiba", a radius of
1 is used.
If both `sakoe_chiba_radius` and `itakura_max_slope` are set,
`global_constraint` is used to infer which constraint to use among the
two. In this case, if `global_constraint` corresponds to no global
constraint, a `RuntimeWarning` is raised and no global constraint is
used.
itakura_max_slope : float or None (default: None)
Maximum slope for the Itakura parallelogram constraint.
If None and `global_constraint` is set to "itakura", a maximum slope
of 2. is used.
If both `sakoe_chiba_radius` and `itakura_max_slope` are set,
`global_constraint` is used to infer which constraint to use among the
two. In this case, if `global_constraint` corresponds to no global
constraint, a `RuntimeWarning` is raised and no global constraint is
used.
Returns
-------
list of integer pairs
Matching path represented as a list of index pairs. In each pair, the
first index corresponds to s1 and the second one corresponds to s2
float
Similarity score
Examples
--------
>>> path, dist = dtw_path([1, 2, 3], [1., 2., 2., 3.])
>>> path
[(0, 0), (1, 1), (1, 2), (2, 3)]
>>> dist
0.0
>>> dtw_path([1, 2, 3], [1., 2., 2., 3., 4.])[1]
1.0
See Also
--------
dtw : Get only the similarity score for DTW
cdist_dtw : Cross similarity matrix between time series datasets
References
----------
.. [1] H. Sakoe, S. Chiba, "Dynamic programming algorithm optimization for
spoken word recognition," IEEE Transactions on Acoustics, Speech and
Signal Processing, vol. 26(1), pp. 43--49, 1978.
"""
s1 = to_time_series(s1, remove_nans=True)
s2 = to_time_series(s2, remove_nans=True)
mask = compute_mask(
s1, s2, GLOBAL_CONSTRAINT_CODE[global_constraint],
sakoe_chiba_radius, itakura_max_slope
)
acc_cost_mat = njit_accumulated_matrix(s1, s2, mask=mask)
path = _return_path(acc_cost_mat)
return path, numpy.sqrt(acc_cost_mat[-1, -1])
def dtw(s1, s2, global_constraint=None, sakoe_chiba_radius=None,
itakura_max_slope=None):
r"""Compute Dynamic Time Warping (DTW) similarity measure between
(possibly multidimensional) time series and return it.
DTW is computed as the Euclidean distance between aligned time series,
i.e., if :math:`P` is the optimal alignment path:
.. math::
DTW(X, Y) = \sqrt{\sum_{(i, j) \in P} \|X_{i} - Y_{j}\|^2}
Note that this formula is still valid for the multivariate case.
It is not required that both time series share the same size, but they must
be the same dimension. DTW was originally presented in [1]_.
Parameters
----------
s1
A time series.
s2
Another time series.
global_constraint : {"itakura", "sakoe_chiba"} or None (default: None)
Global constraint to restrict admissible paths for DTW.
sakoe_chiba_radius : int or None (default: None)
Radius to be used for Sakoe-Chiba band global constraint.
If None and `global_constraint` is set to "sakoe_chiba", a radius of
1 is used.
If both `sakoe_chiba_radius` and `itakura_max_slope` are set,
`global_constraint` is used to infer which constraint to use among the
two. In this case, if `global_constraint` corresponds to no global
constraint, a `RuntimeWarning` is raised and no global constraint is
used.
itakura_max_slope : float or None (default: None)
Maximum slope for the Itakura parallelogram constraint.
If None and `global_constraint` is set to "itakura", a maximum slope
of 2. is used.
If both `sakoe_chiba_radius` and `itakura_max_slope` are set,
`global_constraint` is used to infer which constraint to use among the
two. In this case, if `global_constraint` corresponds to no global
constraint, a `RuntimeWarning` is raised and no global constraint is
used.
Returns
-------
float
Similarity score
Examples
--------
>>> dtw([1, 2, 3], [1., 2., 2., 3.])
0.0
>>> dtw([1, 2, 3], [1., 2., 2., 3., 4.])
1.0
See Also
--------
dtw_path : Get both the matching path and the similarity score for DTW
cdist_dtw : Cross similarity matrix between time series datasets
References
----------
.. [1] H. Sakoe, S. Chiba, "Dynamic programming algorithm optimization for
spoken word recognition," IEEE Transactions on Acoustics, Speech and
Signal Processing, vol. 26(1), pp. 43--49, 1978.
"""
s1 = to_time_series(s1, remove_nans=True)
s2 = to_time_series(s2, remove_nans=True)
mask = compute_mask(
s1, s2,
GLOBAL_CONSTRAINT_CODE[global_constraint],
sakoe_chiba_radius=sakoe_chiba_radius,
itakura_max_slope=itakura_max_slope)
return njit_dtw(s1, s2, mask=mask)
def _max_steps(i, j, max_length, length_1, length_2):
"""Maximum number of steps required in a L-DTW process to reach a given
cell.
Parameters
----------
i : int
Cell row index
j : int
Cell column index
max_length : int
Maximum allowed length
length_1 : int
Length of the first time series
length_2 : int
Length of the second time series
Returns
-------
int
Number of steps
"""
candidate_1 = i + j
candidate_2 = max_length - max(length_1 - i - 1, length_2 - j - 1)
return min(candidate_1, candidate_2)
def _limited_warping_length_cost(s1, s2, max_length):
r"""Compute accumulated scores necessary fo L-DTW.
Parameters
----------
s1
A time series.
s2
Another time series.
max_length : int
Maximum allowed warping path length. Should be an integer between
XXX and YYY. # TODO
Returns
-------
dict
Accumulated scores. This dict associates (i, j) pairs (keys) to
dictionnaries with desired length as key and associated score as value.
"""
dict_costs = {}
for i in range(s1.shape[0]):
for j in range(s2.shape[0]):
dict_costs[i, j] = {}
# Init
dict_costs[0, 0][0] = _local_squared_dist(s1[0], s2[0])
for i in range(1, s1.shape[0]):
pred = dict_costs[i - 1, 0][i - 1]
dict_costs[i, 0][i] = pred + _local_squared_dist(s1[i], s2[0])
for j in range(1, s2.shape[0]):
pred = dict_costs[0, j - 1][j - 1]
dict_costs[0, j][j] = pred + _local_squared_dist(s1[0], s2[j])
# Main loop
for i in range(1, s1.shape[0]):
for j in range(1, s2.shape[0]):
min_s = max(i, j)
max_s = _max_steps(i, j, max_length - 1, s1.shape[0], s2.shape[0])
for s in range(min_s, max_s + 1):
dict_costs[i, j][s] = _local_squared_dist(s1[i], s2[j])
dict_costs[i, j][s] += min(
dict_costs[i, j - 1].get(s - 1, numpy.inf),
dict_costs[i - 1, j].get(s - 1, numpy.inf),
dict_costs[i - 1, j - 1].get(s - 1, numpy.inf)
)
return dict_costs
def dtw_limited_warping_length(s1, s2, max_length):
r"""Compute Dynamic Time Warping (DTW) similarity measure between
(possibly multidimensional) time series under an upper bound constraint on
the resulting path length and return the similarity cost.
DTW is computed as the Euclidean distance between aligned time series,
i.e., if :math:`P` is the optimal alignment path:
.. math::
DTW(X, Y) = \sqrt{\sum_{(i, j) \in P} \|X_{i} - Y_{j}\|^2}
Note that this formula is still valid for the multivariate case.
It is not required that both time series share the same size, but they must
be the same dimension. DTW was originally presented in [1]_.
This constrained-length variant was introduced in [2]_.
Parameters
----------
s1
A time series.
s2
Another time series.
max_length : int
Maximum allowed warping path length.
If greater than len(s1) + len(s2), then it is equivalent to
unconstrained DTW.
If lower than max(len(s1), len(s2)), no path can be found and a
ValueError is raised.
Returns
-------
float
Similarity score
Examples
--------
>>> dtw_limited_warping_length([1, 2, 3], [1., 2., 2., 3.], 5)
0.0
>>> dtw_limited_warping_length([1, 2, 3], [1., 2., 2., 3., 4.], 5)
1.0
See Also
--------
dtw : Get the similarity score for DTW
dtw_path_limited_warping_length : Get both the warping path and the
similarity score for DTW with limited warping path length
References
----------
.. [1] H. Sakoe, S. Chiba, "Dynamic programming algorithm optimization for
spoken word recognition," IEEE Transactions on Acoustics, Speech and
Signal Processing, vol. 26(1), pp. 43--49, 1978.
.. [2] Z. Zhang, R. Tavenard, A. Bailly, X. Tang, P. Tang, T. Corpetti
Dynamic time warping under limited warping path length.
Information Sciences, vol. 393, pp. 91--107, 2017.
"""
s1 = to_time_series(s1, remove_nans=True)
s2 = to_time_series(s2, remove_nans=True)
if max_length < max(s1.shape[0], s2.shape[0]):
raise ValueError("Cannot find a path of length {} to align given "
"time series.".format(max_length))
accumulated_costs = _limited_warping_length_cost(s1, s2, max_length)
idx_pair = (s1.shape[0] - 1, s2.shape[0] - 1)
optimal_cost = min(accumulated_costs[idx_pair].values())
return numpy.sqrt(optimal_cost)
def _return_path_limited_warping_length(accum_costs, target_indices,
optimal_length):
path = [target_indices]
cur_length = optimal_length
while path[-1] != (0, 0):
i, j = path[-1]
if i == 0:
path.append((0, j - 1))
elif j == 0:
path.append((i - 1, 0))
else:
arr = numpy.array(
[accum_costs[i - 1, j - 1].get(cur_length - 1, numpy.inf),
accum_costs[i - 1, j].get(cur_length - 1, numpy.inf),
accum_costs[i, j - 1].get(cur_length - 1, numpy.inf)]
)
argmin = numpy.argmin(arr)
if argmin == 0:
path.append((i - 1, j - 1))
elif argmin == 1:
path.append((i - 1, j))
else:
path.append((i, j - 1))
cur_length -= 1
return path[::-1]
def dtw_path_limited_warping_length(s1, s2, max_length):
r"""Compute Dynamic Time Warping (DTW) similarity measure between
(possibly multidimensional) time series under an upper bound constraint on
the resulting path length and return the path as well as the similarity
cost.
DTW is computed as the Euclidean distance between aligned time series,
i.e., if :math:`P` is the optimal alignment path:
.. math::
DTW(X, Y) = \sqrt{\sum_{(i, j) \in P} \|X_{i} - Y_{j}\|^2}
Note that this formula is still valid for the multivariate case.
It is not required that both time series share the same size, but they must
be the same dimension. DTW was originally presented in [1]_.
This constrained-length variant was introduced in [2]_.
Parameters
----------
s1
A time series.
s2
Another time series.
max_length : int
Maximum allowed warping path length.
If greater than len(s1) + len(s2), then it is equivalent to
unconstrained DTW.
If lower than max(len(s1), len(s2)), no path can be found and a
ValueError is raised.
Returns
-------
list of integer pairs
Optimal path
float
Similarity score
Examples
--------
>>> path, cost = dtw_path_limited_warping_length([1, 2, 3],
... [1., 2., 2., 3.], 5)
>>> cost
0.0
>>> path
[(0, 0), (1, 1), (1, 2), (2, 3)]
>>> path, cost = dtw_path_limited_warping_length([1, 2, 3],
... [1., 2., 2., 3., 4.], 5)
>>> cost
1.0
>>> path
[(0, 0), (1, 1), (1, 2), (2, 3), (2, 4)]
See Also
--------
dtw_limited_warping_length : Get the similarity score for DTW with limited
warping path length
dtw_path : Get both the matching path and the similarity score for DTW
References
----------
.. [1] H. Sakoe, S. Chiba, "Dynamic programming algorithm optimization for
spoken word recognition," IEEE Transactions on Acoustics, Speech and
Signal Processing, vol. 26(1), pp. 43--49, 1978.
.. [2] Z. Zhang, R. Tavenard, A. Bailly, X. Tang, P. Tang, T. Corpetti
Dynamic time warping under limited warping path length.
Information Sciences, vol. 393, pp. 91--107, 2017.
"""
s1 = to_time_series(s1, remove_nans=True)
s2 = to_time_series(s2, remove_nans=True)
if max_length < max(s1.shape[0], s2.shape[0]):
raise ValueError("Cannot find a path of length {} to align given "
"time series.".format(max_length))
accumulated_costs = _limited_warping_length_cost(s1, s2, max_length)
idx_pair = (s1.shape[0] - 1, s2.shape[0] - 1)
optimal_length = -1
optimal_cost = numpy.inf
for k, v in accumulated_costs[idx_pair].items():
if v < optimal_cost:
optimal_cost = v
optimal_length = k
path = _return_path_limited_warping_length(accumulated_costs,
idx_pair,
optimal_length)
return path, numpy.sqrt(optimal_cost)
@njit()
def _subsequence_cost_matrix(subseq, longseq):
l1 = subseq.shape[0]
l2 = longseq.shape[0]
cum_sum = numpy.full((l1 + 1, l2 + 1), numpy.inf)
cum_sum[0, :] = 0.
for i in range(l1):
for j in range(l2):
cum_sum[i + 1, j + 1] = _local_squared_dist(subseq[i], longseq[j])
cum_sum[i + 1, j + 1] += min(cum_sum[i, j + 1],
cum_sum[i + 1, j],
cum_sum[i, j])
return cum_sum[1:, 1:]
def subsequence_cost_matrix(subseq, longseq):
"""Compute the accumulated cost matrix score between a subsequence and
a reference time series.
Parameters
----------
subseq : array, shape = (sz1, d)
Subsequence time series.
longseq : array, shape = (sz2, d)
Reference time series
Returns
-------
mat : array, shape = (sz1, sz2)
Accumulated cost matrix.
"""
return _subsequence_cost_matrix(subseq, longseq)
@njit()
def _subsequence_path(acc_cost_mat, idx_path_end):
sz1, sz2 = acc_cost_mat.shape
path = [(sz1 - 1, idx_path_end)]
while path[-1][0] != 0:
i, j = path[-1]
if i == 0:
path.append((0, j - 1))
elif j == 0:
path.append((i - 1, 0))
else:
arr = numpy.array([acc_cost_mat[i - 1][j - 1],
acc_cost_mat[i - 1][j],
acc_cost_mat[i][j - 1]])
argmin = numpy.argmin(arr)
if argmin == 0:
path.append((i - 1, j - 1))
elif argmin == 1:
path.append((i - 1, j))
else:
path.append((i, j - 1))
return path[::-1]
def subsequence_path(acc_cost_mat, idx_path_end):
"""Compute the optimal path through a accumulated cost matrix given the
endpoint of the sequence.
Parameters
----------
acc_cost_mat: array, shape = (sz1, sz2)
The accumulated cost matrix comparing subsequence from a longer sequence
idx_path_end: int
The end position of the matched subsequence in the longer sequence.
Returns
-------
path: list of tuples of integer pairs
Matching path represented as a list of index pairs. In each pair, the
first index corresponds to `subseq` and the second one corresponds to
`longseq`. The startpoint of the Path is :math:`P_0 = (0, ?)` and it
ends at :math:`P_L = (len(subseq)-1, idx\_path\_end)`
Examples
--------
>>> acc_cost_mat = numpy.array([[1., 0., 0., 1., 4.],
... [5., 1., 1., 0., 1.]])
>>> # calculate the globally optimal path
>>> optimal_end_point = numpy.argmin(acc_cost_mat[-1, :])
>>> path = subsequence_path(acc_cost_mat, optimal_end_point)
>>> path
[(0, 2), (1, 3)]
See Also
--------
dtw_subsequence_path : Get the similarity score for DTW
subsequence_cost_matrix: Calculate the required cost matrix
"""
return _subsequence_path(acc_cost_mat, idx_path_end)
def dtw_subsequence_path(subseq, longseq):
r"""Compute sub-sequence Dynamic Time Warping (DTW) similarity measure
between a (possibly multidimensional) query and a long time series and
return both the path and the similarity.
DTW is computed as the Euclidean distance between aligned time series,
i.e., if :math:`P` is the alignment path:
.. math::
DTW(X, Y) = \sqrt{\sum_{(i, j) \in P} (X_{i} - Y_{j})^2}
Compared to traditional DTW, here, border constraints on admissible paths
:math:`P` are relaxed such that :math:`P_0 = (0, ?)` and
:math:`P_L = (N-1, ?)` where :math:`L` is the length of the considered path
and :math:`N` is the length of the subsequence time series.
It is not required that both time series share the same size, but they must
be the same dimension. This implementation finds the best matching starting
and ending positions for `subseq` inside `longseq`.
Parameters
----------
subseq : array, shape = (sz1, d)
A query time series.
longseq : array, shape = (sz2, d)
A reference (supposed to be longer than `subseq`) time series.
Returns
-------
list of integer pairs
Matching path represented as a list of index pairs. In each pair, the
first index corresponds to `subseq` and the second one corresponds to
`longseq`.
float
Similarity score
Examples
--------
>>> path, dist = dtw_subsequence_path([2., 3.], [1., 2., 2., 3., 4.])
>>> path
[(0, 2), (1, 3)]
>>> dist
0.0
See Also
--------
dtw : Get the similarity score for DTW
subsequence_cost_matrix: Calculate the required cost matrix
subsequence_path: Calculate a matching path manually
"""
subseq = to_time_series(subseq)
longseq = to_time_series(longseq)
acc_cost_mat = subsequence_cost_matrix(subseq=subseq,
longseq=longseq)
global_optimal_match = numpy.argmin(acc_cost_mat[-1, :])
path = subsequence_path(acc_cost_mat, global_optimal_match)
return path, numpy.sqrt(acc_cost_mat[-1, :][global_optimal_match])
@njit()
def sakoe_chiba_mask(sz1, sz2, radius=1):
"""Compute the Sakoe-Chiba mask.
Parameters
----------
sz1 : int
The size of the first time series
sz2 : int
The size of the second time series.
radius : int
The radius of the band.
Returns
-------
mask : array, shape = (sz1, sz2)
Sakoe-Chiba mask.
Examples
--------
>>> sakoe_chiba_mask(4, 4, 1)
array([[ 0., 0., inf, inf],
[ 0., 0., 0., inf],
[inf, 0., 0., 0.],
[inf, inf, 0., 0.]])
>>> sakoe_chiba_mask(7, 3, 1)
array([[ 0., 0., inf],
[ 0., 0., 0.],
[ 0., 0., 0.],
[ 0., 0., 0.],
[ 0., 0., 0.],
[ 0., 0., 0.],
[inf, 0., 0.]])
"""
mask = numpy.full((sz1, sz2), numpy.inf)
if sz1 > sz2:
width = sz1 - sz2 + radius
for i in prange(sz2):
lower = max(0, i - radius)
upper = min(sz1, i + width) + 1
mask[lower:upper, i] = 0.
else:
width = sz2 - sz1 + radius
for i in prange(sz1):
lower = max(0, i - radius)
upper = min(sz2, i + width) + 1
mask[i, lower:upper] = 0.
return mask
@njit()
def _njit_itakura_mask(sz1, sz2, max_slope=2.):
"""Compute the Itakura mask without checking that the constraints
are feasible. In most cases, you should use itakura_mask instead.
Parameters
----------
sz1 : int
The size of the first time series
sz2 : int
The size of the second time series.
max_slope : float (default = 2)
The maximum slope of the parallelogram.
Returns
-------
mask : array, shape = (sz1, sz2)
Itakura mask.
"""
min_slope = 1 / float(max_slope)
max_slope *= (float(sz1) / float(sz2))
min_slope *= (float(sz1) / float(sz2))
lower_bound = numpy.empty((2, sz2))
lower_bound[0] = min_slope * numpy.arange(sz2)
lower_bound[1] = ((sz1 - 1) - max_slope * (sz2 - 1)
+ max_slope * numpy.arange(sz2))
lower_bound_ = numpy.empty(sz2)
for i in prange(sz2):
lower_bound_[i] = max(round(lower_bound[0, i], 2),
round(lower_bound[1, i], 2))
lower_bound_ = numpy.ceil(lower_bound_)
upper_bound = numpy.empty((2, sz2))
upper_bound[0] = max_slope * numpy.arange(sz2)
upper_bound[1] = ((sz1 - 1) - min_slope * (sz2 - 1)
+ min_slope * numpy.arange(sz2))
upper_bound_ = numpy.empty(sz2)
for i in prange(sz2):
upper_bound_[i] = min(round(upper_bound[0, i], 2),
round(upper_bound[1, i], 2))
upper_bound_ = numpy.floor(upper_bound_ + 1)
mask = numpy.full((sz1, sz2), numpy.inf)
for i in prange(sz2):
mask[int(lower_bound_[i]):int(upper_bound_[i]), i] = 0.
return mask
def itakura_mask(sz1, sz2, max_slope=2.):
"""Compute the Itakura mask.
Parameters
----------
sz1 : int
The size of the first time series
sz2 : int
The size of the second time series.
max_slope : float (default = 2)
The maximum slope of the parallelogram.
Returns
-------
mask : array, shape = (sz1, sz2)
Itakura mask.
Examples
--------
>>> itakura_mask(6, 6)
array([[ 0., inf, inf, inf, inf, inf],
[inf, 0., 0., inf, inf, inf],
[inf, 0., 0., 0., inf, inf],
[inf, inf, 0., 0., 0., inf],
[inf, inf, inf, 0., 0., inf],
[inf, inf, inf, inf, inf, 0.]])
"""
mask = _njit_itakura_mask(sz1, sz2, max_slope=max_slope)
# Post-check
raise_warning = False
for i in prange(sz1):
if not numpy.any(numpy.isfinite(mask[i])):
raise_warning = True
break
if not raise_warning:
for j in prange(sz2):
if not numpy.any(numpy.isfinite(mask[:, j])):
raise_warning = True
break
if raise_warning:
warnings.warn("'itakura_max_slope' constraint is unfeasible "
"(ie. leads to no admissible path) for the "
"provided time series sizes",
RuntimeWarning)
return mask
def compute_mask(s1, s2, global_constraint=0,
sakoe_chiba_radius=None, itakura_max_slope=None):
"""Compute the mask (region constraint).
Parameters
----------
s1 : array
A time series.
s2: array
Another time series.
global_constraint : {0, 1, 2} (default: 0)
Global constraint to restrict admissible paths for DTW:
- "itakura" if 1
- "sakoe_chiba" if 2
- no constraint otherwise
sakoe_chiba_radius : int or None (default: None)
Radius to be used for Sakoe-Chiba band global constraint.
If None and `global_constraint` is set to 2 (sakoe-chiba), a radius of
1 is used.
If both `sakoe_chiba_radius` and `itakura_max_slope` are set,
`global_constraint` is used to infer which constraint to use among the
two. In this case, if `global_constraint` corresponds to no global
constraint, a `RuntimeWarning` is raised and no global constraint is
used.
itakura_max_slope : float or None (default: None)
Maximum slope for the Itakura parallelogram constraint.
If None and `global_constraint` is set to 1 (itakura), a maximum slope
of 2. is used.
If both `sakoe_chiba_radius` and `itakura_max_slope` are set,
`global_constraint` is used to infer which constraint to use among the
two. In this case, if `global_constraint` corresponds to no global
constraint, a `RuntimeWarning` is raised and no global constraint is
used.
Returns
-------
mask : array
Constraint region.
"""
sz1 = s1.shape[0]
sz2 = s2.shape[0]
if (global_constraint == 0 and sakoe_chiba_radius is not None
and itakura_max_slope is not None):
raise RuntimeWarning("global_constraint is not set for DTW, but both "
"sakoe_chiba_radius and itakura_max_slope are "
"set, hence global_constraint cannot be inferred "
"and no global constraint will be used.")
if global_constraint == 2 or (global_constraint == 0
and sakoe_chiba_radius is not None):
if sakoe_chiba_radius is None:
sakoe_chiba_radius = 1
mask = sakoe_chiba_mask(sz1, sz2, radius=sakoe_chiba_radius)
elif global_constraint == 1 or (global_constraint == 0
and itakura_max_slope is not None):
if itakura_max_slope is None:
itakura_max_slope = 2.
mask = itakura_mask(sz1, sz2, max_slope=itakura_max_slope)
else:
mask = numpy.zeros((sz1, sz2))
return mask
def cdist_dtw(dataset1, dataset2=None, global_constraint=None,
sakoe_chiba_radius=None, itakura_max_slope=None, n_jobs=None, verbose=0):
r"""Compute cross-similarity matrix using Dynamic Time Warping (DTW)
similarity measure.
DTW is computed as the Euclidean distance between aligned time series,
i.e., if :math:`P` is the alignment path:
.. math::
DTW(X, Y) = \sqrt{\sum_{(i, j) \in P} (X_{i} - Y_{j})^2}
DTW was originally presented in [1]_.
Parameters
----------
dataset1 : array-like
A dataset of time series
dataset2 : array-like (default: None)
Another dataset of time series. If `None`, self-similarity of
`dataset1` is returned.
global_constraint : {"itakura", "sakoe_chiba"} or None (default: None)
Global constraint to restrict admissible paths for DTW.
sakoe_chiba_radius : int or None (default: None)
Radius to be used for Sakoe-Chiba band global constraint.
If None and `global_constraint` is set to "sakoe_chiba", a radius of
1 is used.
If both `sakoe_chiba_radius` and `itakura_max_slope` are set,
`global_constraint` is used to infer which constraint to use among the
two. In this case, if `global_constraint` corresponds to no global
constraint, a `RuntimeWarning` is raised and no global constraint is
used.
itakura_max_slope : float or None (default: None)
Maximum slope for the Itakura parallelogram constraint.
If None and `global_constraint` is set to "itakura", a maximum slope
of 2. is used.
If both `sakoe_chiba_radius` and `itakura_max_slope` are set,
`global_constraint` is used to infer which constraint to use among the
two. In this case, if `global_constraint` corresponds to no global
constraint, a `RuntimeWarning` is raised and no global constraint is
used.
n_jobs : int or None, optional (default=None)
The number of jobs to run in parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See scikit-learns'
`Glossary <https://scikit-learn.org/stable/glossary.html#term-n-jobs>`_
for more details.
verbose : int, optional (default=0)
The verbosity level: if non zero, progress messages are printed.
Above 50, the output is sent to stdout.
The frequency of the messages increases with the verbosity level.
If it more than 10, all iterations are reported.
`Glossary <https://joblib.readthedocs.io/en/latest/parallel.html#parallel-reference-documentation>`__
for more details.
Returns
-------
cdist : numpy.ndarray
Cross-similarity matrix
Examples
--------
>>> cdist_dtw([[1, 2, 2, 3], [1., 2., 3., 4.]])
array([[0., 1.],
[1., 0.]])
>>> cdist_dtw([[1, 2, 2, 3], [1., 2., 3., 4.]], [[1, 2, 3], [2, 3, 4, 5]])
array([[0. , 2.44948974],
[1. , 1.41421356]])
See Also
--------
dtw : Get DTW similarity score
References
----------
.. [1] H. Sakoe, S. Chiba, "Dynamic programming algorithm optimization for
spoken word recognition," IEEE Transactions on Acoustics, Speech and
Signal Processing, vol. 26(1), pp. 43--49, 1978.
""" # noqa: E501
dataset1 = to_time_series_dataset(dataset1)
if dataset2 is None:
# Inspired from code by @GillesVandewiele:
# https://github.com/rtavenar/tslearn/pull/128#discussion_r314978479
matrix = numpy.zeros((len(dataset1), len(dataset1)))
indices = numpy.triu_indices(len(dataset1), k=1, m=len(dataset1))
matrix[indices] = Parallel(n_jobs=n_jobs, prefer="threads", verbose=verbose)(
delayed(dtw)(
dataset1[i], dataset1[j],
global_constraint=global_constraint,
sakoe_chiba_radius=sakoe_chiba_radius,
itakura_max_slope=itakura_max_slope)
for i in range(len(dataset1)) for j in range(i + 1, len(dataset1))
)
return matrix + matrix.T
else:
dataset2 = to_time_series_dataset(dataset2)
matrix = Parallel(n_jobs=n_jobs, prefer="threads", verbose=verbose)(
delayed(dtw)(
dataset1[i], dataset2[j],
global_constraint=global_constraint,
sakoe_chiba_radius=sakoe_chiba_radius,
itakura_max_slope=itakura_max_slope)
for i in range(len(dataset1)) for j in range(len(dataset2))
)
return numpy.array(matrix).reshape((len(dataset1), -1))
@njit(nogil=True)
def njit_gak(s1, s2, gram):
l1 = s1.shape[0]
l2 = s2.shape[0]
cum_sum = numpy.zeros((l1 + 1, l2 + 1))
cum_sum[0, 0] = 1.
for i in range(l1):
for j in range(l2):
cum_sum[i + 1, j + 1] = (cum_sum[i, j + 1] +
cum_sum[i + 1, j] +
cum_sum[i, j]) * gram[i, j]
return cum_sum[l1, l2]
def _gak_gram(s1, s2, sigma=1.):
gram = - cdist(s1, s2, "sqeuclidean") / (2 * sigma ** 2)
gram -= numpy.log(2 - numpy.exp(gram))
return numpy.exp(gram)
def unnormalized_gak(s1, s2, sigma=1.):
r"""Compute Global Alignment Kernel (GAK) between (possibly
multidimensional) time series and return it.
It is not required that both time series share the same size, but they must
be the same dimension. GAK was
originally presented in [1]_.
This is an unnormalized version.
Parameters
----------
s1
A time series
s2
Another time series
sigma : float (default 1.)
Bandwidth of the internal gaussian kernel used for GAK
Returns
-------
float
Kernel value
Examples
--------
>>> unnormalized_gak([1, 2, 3],
... [1., 2., 2., 3.],
... sigma=2.) # doctest: +ELLIPSIS
15.358...
>>> unnormalized_gak([1, 2, 3],
... [1., 2., 2., 3., 4.]) # doctest: +ELLIPSIS
3.166...
See Also
--------
gak : normalized version of GAK that ensures that k(x,x) = 1
cdist_gak : Compute cross-similarity matrix using Global Alignment kernel
References
----------
.. [1] M. Cuturi, "Fast global alignment kernels," ICML 2011.
"""
s1 = to_time_series(s1, remove_nans=True)
s2 = to_time_series(s2, remove_nans=True)
gram = _gak_gram(s1, s2, sigma=sigma)
gak_val = njit_gak(s1, s2, gram)
return gak_val
def gak(s1, s2, sigma=1.): # TODO: better doc (formula for the kernel)
r"""Compute Global Alignment Kernel (GAK) between (possibly
multidimensional) time series and return it.
It is not required that both time series share the same size, but they must
be the same dimension. GAK was
originally presented in [1]_.
This is a normalized version that ensures that :math:`k(x,x)=1` for all
:math:`x` and :math:`k(x,y) \in [0, 1]` for all :math:`x, y`.
Parameters
----------
s1
A time series
s2
Another time series
sigma : float (default 1.)
Bandwidth of the internal gaussian kernel used for GAK
Returns
-------
float
Kernel value
Examples
--------
>>> gak([1, 2, 3], [1., 2., 2., 3.], sigma=2.) # doctest: +ELLIPSIS
0.839...
>>> gak([1, 2, 3], [1., 2., 2., 3., 4.]) # doctest: +ELLIPSIS
0.273...
See Also
--------
cdist_gak : Compute cross-similarity matrix using Global Alignment kernel
References
----------
.. [1] M. Cuturi, "Fast global alignment kernels," ICML 2011.
"""
denom = numpy.sqrt(unnormalized_gak(s1, s1, sigma=sigma) *
unnormalized_gak(s2, s2, sigma=sigma))
return unnormalized_gak(s1, s2, sigma=sigma) / denom
def cdist_gak(dataset1, dataset2=None, sigma=1., n_jobs=None, verbose=0):
r"""Compute cross-similarity matrix using Global Alignment kernel (GAK).
GAK was originally presented in [1]_.
Parameters
----------
dataset1
A dataset of time series
dataset2
Another dataset of time series
sigma : float (default 1.)
Bandwidth of the internal gaussian kernel used for GAK
n_jobs : int or None, optional (default=None)
The number of jobs to run in parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See scikit-learns'
`Glossary <https://scikit-learn.org/stable/glossary.html#term-n-jobs>`_
for more details.
verbose : int, optional (default=0)
The verbosity level: if non zero, progress messages are printed.
Above 50, the output is sent to stdout.
The frequency of the messages increases with the verbosity level.
If it more than 10, all iterations are reported.
`Glossary <https://joblib.readthedocs.io/en/latest/parallel.html#parallel-reference-documentation>`__
for more details.
Returns
-------
numpy.ndarray
Cross-similarity matrix
Examples
--------
>>> cdist_gak([[1, 2, 2, 3], [1., 2., 3., 4.]], sigma=2.)
array([[1. , 0.65629661],
[0.65629661, 1. ]])
>>> cdist_gak([[1, 2, 2], [1., 2., 3., 4.]],
... [[1, 2, 2, 3], [1., 2., 3., 4.], [1, 2, 2, 3]],
... sigma=2.)
array([[0.71059484, 0.29722877, 0.71059484],
[0.65629661, 1. , 0.65629661]])
See Also
--------
gak : Compute Global Alignment kernel
References
----------
.. [1] M. Cuturi, "Fast global alignment kernels," ICML 2011.
""" # noqa: E501
dataset1 = to_time_series_dataset(dataset1)
if dataset2 is None:
# Inspired from code by @GillesVandewiele:
# https://github.com/rtavenar/tslearn/pull/128#discussion_r314978479
matrix = numpy.zeros((len(dataset1), len(dataset1)))
indices = numpy.triu_indices(len(dataset1), k=0, m=len(dataset1))
matrix[indices] = Parallel(n_jobs=n_jobs, prefer="threads", verbose=verbose)(
delayed(unnormalized_gak)(dataset1[i], dataset1[j], sigma=sigma)
for i in range(len(dataset1)) for j in range(i, len(dataset1))
)
indices = numpy.tril_indices(len(dataset1), k=-1, m=len(dataset1))
matrix[indices] = matrix.T[indices]
diagonal = numpy.diag(numpy.sqrt(1. / numpy.diag(matrix)))
diagonal_left = diagonal_right = diagonal
else:
dataset2 = to_time_series_dataset(dataset2)
matrix = Parallel(n_jobs=n_jobs, prefer="threads", verbose=verbose)(
delayed(unnormalized_gak)(dataset1[i], dataset2[j], sigma=sigma)
for i in range(len(dataset1)) for j in range(len(dataset2))
)
matrix = numpy.array(matrix).reshape((len(dataset1), -1))
diagonal_left = Parallel(n_jobs=n_jobs, prefer="threads", verbose=verbose)(
delayed(unnormalized_gak)(dataset1[i], dataset1[i], sigma=sigma)
for i in range(len(dataset1))
)
diagonal_right = Parallel(n_jobs=n_jobs, prefer="threads", verbose=verbose)(
delayed(unnormalized_gak)(dataset2[j], dataset2[j], sigma=sigma)
for j in range(len(dataset2))
)
diagonal_left = numpy.diag(1. / numpy.sqrt(diagonal_left))
diagonal_right = numpy.diag(1. / numpy.sqrt(diagonal_right))
return (diagonal_left.dot(matrix)).dot(diagonal_right)
def sigma_gak(dataset, n_samples=100, random_state=None):
r"""Compute sigma value to be used for GAK.
This method was originally presented in [1]_.
Parameters
----------
dataset
A dataset of time series
n_samples : int (default: 100)
Number of samples on which median distance should be estimated
random_state : integer or numpy.RandomState or None (default: None)
The generator used to draw the samples. If an integer is given, it
fixes the seed. Defaults to the global numpy random number generator.
Returns
-------
float
Suggested bandwidth (:math:`\\sigma`) for the Global Alignment kernel
Examples
--------
>>> dataset = [[1, 2, 2, 3], [1., 2., 3., 4.]]
>>> sigma_gak(dataset=dataset,
... n_samples=200,
... random_state=0) # doctest: +ELLIPSIS
2.0...
See Also
--------
gak : Compute Global Alignment kernel
cdist_gak : Compute cross-similarity matrix using Global Alignment kernel
References
----------
.. [1] M. Cuturi, "Fast global alignment kernels," ICML 2011.
"""
random_state = check_random_state(random_state)
dataset = to_time_series_dataset(dataset)
n_ts, sz, d = dataset.shape
if not check_equal_size(dataset):
sz = numpy.min([ts_size(ts) for ts in dataset])
if n_ts * sz < n_samples:
replace = True
else:
replace = False
sample_indices = random_state.choice(n_ts * sz,
size=n_samples,
replace=replace)
dists = pdist(dataset[:, :sz, :].reshape((-1, d))[sample_indices],
metric="euclidean")
return numpy.median(dists) * numpy.sqrt(sz)
def gamma_soft_dtw(dataset, n_samples=100, random_state=None):
r"""Compute gamma value to be used for GAK/Soft-DTW.
This method was originally presented in [1]_.
Parameters
----------
dataset
A dataset of time series
n_samples : int (default: 100)
Number of samples on which median distance should be estimated
random_state : integer or numpy.RandomState or None (default: None)
The generator used to draw the samples. If an integer is given, it
fixes the seed. Defaults to the global numpy random number generator.
Returns
-------
float
Suggested :math:`\\gamma` parameter for the Soft-DTW
Examples
--------
>>> dataset = [[1, 2, 2, 3], [1., 2., 3., 4.]]
>>> gamma_soft_dtw(dataset=dataset,
... n_samples=200,
... random_state=0) # doctest: +ELLIPSIS
8.0...
See Also
--------
sigma_gak : Compute sigma parameter for Global Alignment kernel
References
----------
.. [1] M. Cuturi, "Fast global alignment kernels," ICML 2011.
"""
return 2. * sigma_gak(dataset=dataset,
n_samples=n_samples,
random_state=random_state) ** 2
def lb_keogh(ts_query, ts_candidate=None, radius=1, envelope_candidate=None):
r"""Compute LB_Keogh.
LB_Keogh was originally presented in [1]_.
Parameters
----------
ts_query : array-like
Query time-series to compare to the envelope of the candidate.
ts_candidate : array-like or None (default: None)
Candidate time-series. None means the envelope is provided via
`envelope_candidate` parameter and hence does not
need to be computed again.
radius : int (default: 1)
Radius to be used for the envelope generation (the envelope at time
index i will be generated based on
all observations from the candidate time series at indices comprised
between i-radius and i+radius). Not used
if `ts_candidate` is None.
envelope_candidate: pair of array-like (envelope_down, envelope_up) or None
(default: None)
Pre-computed envelope of the candidate time series. If set to None, it
is computed based on `ts_candidate`.
Notes
-----
This method requires a `ts_query` and `ts_candidate` (or
`envelope_candidate`, depending on the call) to be of equal size.
Returns
-------
float
Distance between the query time series and the envelope of the
candidate time series.
Examples
--------
>>> ts1 = [1, 2, 3, 2, 1]
>>> ts2 = [0, 0, 0, 0, 0]
>>> env_low, env_up = lb_envelope(ts1, radius=1)
>>> lb_keogh(ts_query=ts2,
... envelope_candidate=(env_low, env_up)) # doctest: +ELLIPSIS
2.8284...
>>> lb_keogh(ts_query=ts2,
... ts_candidate=ts1,
... radius=1) # doctest: +ELLIPSIS
2.8284...
See also
--------
lb_envelope : Compute LB_Keogh-related envelope
References
----------
.. [1] Keogh, E. Exact indexing of dynamic time warping. In International
Conference on Very Large Data Bases, 2002. pp 406-417.
"""
if ts_candidate is None:
envelope_down, envelope_up = envelope_candidate
else:
ts_candidate = to_time_series(ts_candidate)
assert ts_candidate.shape[1] == 1, \
"LB_Keogh is available only for monodimensional time series"
envelope_down, envelope_up = lb_envelope(ts_candidate, radius)
ts_query = to_time_series(ts_query)
assert ts_query.shape[1] == 1, \
"LB_Keogh is available only for monodimensional time series"
indices_up = ts_query[:, 0] > envelope_up[:, 0]
indices_down = ts_query[:, 0] < envelope_down[:, 0]
return numpy.sqrt(numpy.linalg.norm(ts_query[indices_up, 0] -
envelope_up[indices_up, 0]) ** 2 +
numpy.linalg.norm(ts_query[indices_down, 0] -
envelope_down[indices_down, 0]) ** 2)
@njit()
def njit_lb_envelope(time_series, radius):
sz, d = time_series.shape
enveloppe_up = numpy.empty((sz, d))
enveloppe_down = numpy.empty((sz, d))
for i in prange(sz):
min_idx = i - radius
max_idx = i + radius + 1
if min_idx < 0:
min_idx = 0
if max_idx > sz:
max_idx = sz
for di in prange(d):
enveloppe_down[i, di] = numpy.min(time_series[min_idx:max_idx, di])
enveloppe_up[i, di] = numpy.max(time_series[min_idx:max_idx, di])
return enveloppe_down, enveloppe_up
def lb_envelope(ts, radius=1):
r"""Compute time-series envelope as required by LB_Keogh.
LB_Keogh was originally presented in [1]_.
Parameters
----------
ts : array-like
Time-series for which the envelope should be computed.
radius : int (default: 1)
Radius to be used for the envelope generation (the envelope at time
index i will be generated based on
all observations from the time series at indices comprised between
i-radius and i+radius).
Returns
-------
array-like
Lower-side of the envelope.
array-like
Upper-side of the envelope.
Examples
--------
>>> ts1 = [1, 2, 3, 2, 1]
>>> env_low, env_up = lb_envelope(ts1, radius=1)
>>> env_low
array([[1.],
[1.],
[2.],
[1.],
[1.]])
>>> env_up
array([[2.],
[3.],
[3.],
[3.],
[2.]])
See also
--------
lb_keogh : Compute LB_Keogh similarity
References
----------
.. [1] Keogh, E. Exact indexing of dynamic time warping. In International
Conference on Very Large Data Bases, 2002. pp 406-417.
"""
return njit_lb_envelope(to_time_series(ts), radius=radius)
def soft_dtw(ts1, ts2, gamma=1.):
r"""Compute Soft-DTW metric between two time series.
Soft-DTW was originally presented in [1]_.
Parameters
----------
ts1
A time series
ts2
Another time series
gamma : float (default 1.)
Gamma paraneter for Soft-DTW
Returns
-------
float
Similarity
Examples
--------
>>> soft_dtw([1, 2, 2, 3],
... [1., 2., 3., 4.],
... gamma=1.) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
-0.89...
>>> soft_dtw([1, 2, 3, 3],
... [1., 2., 2.1, 3.2],
... gamma=0.01) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
0.089...
See Also
--------
cdist_soft_dtw : Cross similarity matrix between time series datasets
References
----------
.. [1] M. Cuturi, M. Blondel "Soft-DTW: a Differentiable Loss Function for
Time-Series," ICML 2017.
"""
if gamma == 0.:
return dtw(ts1, ts2)
return SoftDTW(SquaredEuclidean(ts1[:ts_size(ts1)], ts2[:ts_size(ts2)]),
gamma=gamma).compute()
def cdist_soft_dtw(dataset1, dataset2=None, gamma=1.):
r"""Compute cross-similarity matrix using Soft-DTW metric.
Soft-DTW was originally presented in [1]_.
Parameters
----------
dataset1
A dataset of time series
dataset2
Another dataset of time series
gamma : float (default 1.)
Gamma paraneter for Soft-DTW
Returns
-------
numpy.ndarray
Cross-similarity matrix
Examples
--------
>>> cdist_soft_dtw([[1, 2, 2, 3], [1., 2., 3., 4.]], gamma=.01)
array([[-0.01098612, 1. ],
[ 1. , 0. ]])
>>> cdist_soft_dtw([[1, 2, 2, 3], [1., 2., 3., 4.]],
... [[1, 2, 2, 3], [1., 2., 3., 4.]], gamma=.01)
array([[-0.01098612, 1. ],
[ 1. , 0. ]])
See Also
--------
soft_dtw : Compute Soft-DTW
cdist_soft_dtw_normalized : Cross similarity matrix between time series
datasets using a normalized version of Soft-DTW
References
----------
.. [1] M. Cuturi, M. Blondel "Soft-DTW: a Differentiable Loss Function for
Time-Series," ICML 2017.
"""
dataset1 = to_time_series_dataset(dataset1, dtype=numpy.float64)
self_similarity = False
if dataset2 is None:
dataset2 = dataset1
self_similarity = True
else:
dataset2 = to_time_series_dataset(dataset2, dtype=numpy.float64)
dists = numpy.empty((dataset1.shape[0], dataset2.shape[0]))
equal_size_ds1 = check_equal_size(dataset1)
equal_size_ds2 = check_equal_size(dataset2)
for i, ts1 in enumerate(dataset1):
if equal_size_ds1:
ts1_short = ts1
else:
ts1_short = ts1[:ts_size(ts1)]
for j, ts2 in enumerate(dataset2):
if equal_size_ds2:
ts2_short = ts2
else:
ts2_short = ts2[:ts_size(ts2)]
if self_similarity and j < i:
dists[i, j] = dists[j, i]
else:
dists[i, j] = soft_dtw(ts1_short, ts2_short, gamma=gamma)
return dists
def cdist_soft_dtw_normalized(dataset1, dataset2=None, gamma=1.):
"""Compute cross-similarity matrix using a normalized version of the
Soft-DTW metric.
Soft-DTW was originally presented in [1]_.
This normalized version is defined as:
`sdtw_(x,y) := sdtw(x,y) - 1/2(sdtw(x,x)+sdtw(y,y))`
and ensures that all returned values are positive and that
`sdtw_(x,x) == 0`.
Parameters
----------
dataset1
A dataset of time series
dataset2
Another dataset of time series
gamma : float (default 1.)
Gamma paraneter for Soft-DTW
Returns
-------
numpy.ndarray
Cross-similarity matrix
Examples
--------
>>> time_series = numpy.random.randn(10, 15, 1)
>>> numpy.alltrue(cdist_soft_dtw_normalized(time_series) >= 0.)
True
See Also
--------
soft_dtw : Compute Soft-DTW
cdist_soft_dtw : Cross similarity matrix between time series
datasets using the unnormalized version of Soft-DTW
References
----------
.. [1] M. Cuturi, M. Blondel "Soft-DTW: a Differentiable Loss Function for
Time-Series," ICML 2017.
"""
dists = cdist_soft_dtw(dataset1, dataset2=dataset2, gamma=gamma)
d_ii = numpy.diag(dists)
dists -= .5 * (d_ii.reshape((-1, 1)) + d_ii.reshape((1, -1)))
return dists
class SoftDTW(object):
def __init__(self, D, gamma=1.):
"""
Parameters
----------
gamma: float
Regularization parameter.
Lower is less smoothed (closer to true DTW).
Attributes
----------
self.R_: array, shape = [m + 2, n + 2]
Accumulated cost matrix (stored after calling `compute`).
"""
if hasattr(D, "compute"):
self.D = D.compute()
else:
self.D = D
self.D = self.D.astype(numpy.float64)
# Allocate memory.
# We need +2 because we use indices starting from 1
# and to deal with edge cases in the backward recursion.
m, n = self.D.shape
self.R_ = numpy.zeros((m + 2, n + 2), dtype=numpy.float64)
self.computed = False
self.gamma = numpy.float64(gamma)
def compute(self):
"""Compute soft-DTW by dynamic programming.
Returns
-------
sdtw: float
soft-DTW discrepancy.
"""
m, n = self.D.shape
_soft_dtw(self.D, self.R_, gamma=self.gamma)
self.computed = True
return self.R_[m, n]
def grad(self):
"""Compute gradient of soft-DTW w.r.t. D by dynamic programming.
Returns
-------
grad: array, shape = [m, n]
Gradient w.r.t. D.
"""
if not self.computed:
raise ValueError("Needs to call compute() first.")
m, n = self.D.shape
# Add an extra row and an extra column to D.
# Needed to deal with edge cases in the recursion.
D = numpy.vstack((self.D, numpy.zeros(n)))
D = numpy.hstack((D, numpy.zeros((m + 1, 1))))
# Allocate memory.
# We need +2 because we use indices starting from 1
# and to deal with edge cases in the recursion.
E = numpy.zeros((m + 2, n + 2), dtype=numpy.float64)
_soft_dtw_grad(D, self.R_, E, gamma=self.gamma)
return E[1:-1, 1:-1]
class SquaredEuclidean(object):
def __init__(self, X, Y):
"""
Parameters
----------
X: array, shape = [m, d]
First time series.
Y: array, shape = [n, d]
Second time series.
Examples
--------
>>> SquaredEuclidean([1, 2, 2, 3], [1, 2, 3, 4]).compute()
array([[0., 1., 4., 9.],
[1., 0., 1., 4.],
[1., 0., 1., 4.],
[4., 1., 0., 1.]])
"""
self.X = to_time_series(X).astype(numpy.float64)
self.Y = to_time_series(Y).astype(numpy.float64)
def compute(self):
"""Compute distance matrix.
Returns
-------
D: array, shape = [m, n]
Distance matrix.
"""
return euclidean_distances(self.X, self.Y, squared=True)
def jacobian_product(self, E):
"""Compute the product between the Jacobian
(a linear map from m x d to m x n) and a matrix E.
Parameters
----------
E: array, shape = [m, n]
Second time series.
Returns
-------
G: array, shape = [m, d]
Product with Jacobian
([m x d, m x n] * [m x n] = [m x d]).
"""
G = numpy.zeros_like(self.X, dtype=numpy.float64)
_jacobian_product_sq_euc(self.X, self.Y, E.astype(numpy.float64), G)
return G
| bsd-2-clause |
wzbozon/scikit-learn | sklearn/utils/metaestimators.py | 283 | 2353 | """Utilities for meta-estimators"""
# Author: Joel Nothman
# Andreas Mueller
# Licence: BSD
from operator import attrgetter
from functools import update_wrapper
__all__ = ['if_delegate_has_method']
class _IffHasAttrDescriptor(object):
"""Implements a conditional property using the descriptor protocol.
Using this class to create a decorator will raise an ``AttributeError``
if the ``attribute_name`` is not present on the base object.
This allows ducktyping of the decorated method based on ``attribute_name``.
See https://docs.python.org/3/howto/descriptor.html for an explanation of
descriptors.
"""
def __init__(self, fn, attribute_name):
self.fn = fn
self.get_attribute = attrgetter(attribute_name)
# update the docstring of the descriptor
update_wrapper(self, fn)
def __get__(self, obj, type=None):
# raise an AttributeError if the attribute is not present on the object
if obj is not None:
# delegate only on instances, not the classes.
# this is to allow access to the docstrings.
self.get_attribute(obj)
# lambda, but not partial, allows help() to work with update_wrapper
out = lambda *args, **kwargs: self.fn(obj, *args, **kwargs)
# update the docstring of the returned function
update_wrapper(out, self.fn)
return out
def if_delegate_has_method(delegate):
"""Create a decorator for methods that are delegated to a sub-estimator
This enables ducktyping by hasattr returning True according to the
sub-estimator.
>>> from sklearn.utils.metaestimators import if_delegate_has_method
>>>
>>>
>>> class MetaEst(object):
... def __init__(self, sub_est):
... self.sub_est = sub_est
...
... @if_delegate_has_method(delegate='sub_est')
... def predict(self, X):
... return self.sub_est.predict(X)
...
>>> class HasPredict(object):
... def predict(self, X):
... return X.sum(axis=1)
...
>>> class HasNoPredict(object):
... pass
...
>>> hasattr(MetaEst(HasPredict()), 'predict')
True
>>> hasattr(MetaEst(HasNoPredict()), 'predict')
False
"""
return lambda fn: _IffHasAttrDescriptor(fn, '%s.%s' % (delegate, fn.__name__))
| bsd-3-clause |
heli522/scikit-learn | sklearn/metrics/cluster/supervised.py | 207 | 27395 | """Utilities to evaluate the clustering performance of models
Functions named as *_score return a scalar value to maximize: the higher the
better.
"""
# Authors: Olivier Grisel <olivier.grisel@ensta.org>
# Wei LI <kuantkid@gmail.com>
# Diego Molla <dmolla-aliod@gmail.com>
# License: BSD 3 clause
from math import log
from scipy.misc import comb
from scipy.sparse import coo_matrix
import numpy as np
from .expected_mutual_info_fast import expected_mutual_information
from ...utils.fixes import bincount
def comb2(n):
# the exact version is faster for k == 2: use it by default globally in
# this module instead of the float approximate variant
return comb(n, 2, exact=1)
def check_clusterings(labels_true, labels_pred):
"""Check that the two clusterings matching 1D integer arrays"""
labels_true = np.asarray(labels_true)
labels_pred = np.asarray(labels_pred)
# input checks
if labels_true.ndim != 1:
raise ValueError(
"labels_true must be 1D: shape is %r" % (labels_true.shape,))
if labels_pred.ndim != 1:
raise ValueError(
"labels_pred must be 1D: shape is %r" % (labels_pred.shape,))
if labels_true.shape != labels_pred.shape:
raise ValueError(
"labels_true and labels_pred must have same size, got %d and %d"
% (labels_true.shape[0], labels_pred.shape[0]))
return labels_true, labels_pred
def contingency_matrix(labels_true, labels_pred, eps=None):
"""Build a contengency matrix describing the relationship between labels.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
eps: None or float
If a float, that value is added to all values in the contingency
matrix. This helps to stop NaN propagation.
If ``None``, nothing is adjusted.
Returns
-------
contingency: array, shape=[n_classes_true, n_classes_pred]
Matrix :math:`C` such that :math:`C_{i, j}` is the number of samples in
true class :math:`i` and in predicted class :math:`j`. If
``eps is None``, the dtype of this array will be integer. If ``eps`` is
given, the dtype will be float.
"""
classes, class_idx = np.unique(labels_true, return_inverse=True)
clusters, cluster_idx = np.unique(labels_pred, return_inverse=True)
n_classes = classes.shape[0]
n_clusters = clusters.shape[0]
# Using coo_matrix to accelerate simple histogram calculation,
# i.e. bins are consecutive integers
# Currently, coo_matrix is faster than histogram2d for simple cases
contingency = coo_matrix((np.ones(class_idx.shape[0]),
(class_idx, cluster_idx)),
shape=(n_classes, n_clusters),
dtype=np.int).toarray()
if eps is not None:
# don't use += as contingency is integer
contingency = contingency + eps
return contingency
# clustering measures
def adjusted_rand_score(labels_true, labels_pred):
"""Rand index adjusted for chance
The Rand Index computes a similarity measure between two clusterings
by considering all pairs of samples and counting pairs that are
assigned in the same or different clusters in the predicted and
true clusterings.
The raw RI score is then "adjusted for chance" into the ARI score
using the following scheme::
ARI = (RI - Expected_RI) / (max(RI) - Expected_RI)
The adjusted Rand index is thus ensured to have a value close to
0.0 for random labeling independently of the number of clusters and
samples and exactly 1.0 when the clusterings are identical (up to
a permutation).
ARI is a symmetric measure::
adjusted_rand_score(a, b) == adjusted_rand_score(b, a)
Read more in the :ref:`User Guide <adjusted_rand_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
Returns
-------
ari : float
Similarity score between -1.0 and 1.0. Random labelings have an ARI
close to 0.0. 1.0 stands for perfect match.
Examples
--------
Perfectly maching labelings have a score of 1 even
>>> from sklearn.metrics.cluster import adjusted_rand_score
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_rand_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not always pure, hence penalized::
>>> adjusted_rand_score([0, 0, 1, 2], [0, 0, 1, 1]) # doctest: +ELLIPSIS
0.57...
ARI is symmetric, so labelings that have pure clusters with members
coming from the same classes but unnecessary splits are penalized::
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 2]) # doctest: +ELLIPSIS
0.57...
If classes members are completely split across different clusters, the
assignment is totally incomplete, hence the ARI is very low::
>>> adjusted_rand_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [Hubert1985] `L. Hubert and P. Arabie, Comparing Partitions,
Journal of Classification 1985`
http://www.springerlink.com/content/x64124718341j1j0/
.. [wk] http://en.wikipedia.org/wiki/Rand_index#Adjusted_Rand_index
See also
--------
adjusted_mutual_info_score: Adjusted Mutual Information
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split;
# or trivial clustering where each document is assigned a unique cluster.
# These are perfect matches hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0
or classes.shape[0] == clusters.shape[0] == len(labels_true)):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
# Compute the ARI using the contingency data
sum_comb_c = sum(comb2(n_c) for n_c in contingency.sum(axis=1))
sum_comb_k = sum(comb2(n_k) for n_k in contingency.sum(axis=0))
sum_comb = sum(comb2(n_ij) for n_ij in contingency.flatten())
prod_comb = (sum_comb_c * sum_comb_k) / float(comb(n_samples, 2))
mean_comb = (sum_comb_k + sum_comb_c) / 2.
return ((sum_comb - prod_comb) / (mean_comb - prod_comb))
def homogeneity_completeness_v_measure(labels_true, labels_pred):
"""Compute the homogeneity and completeness and V-Measure scores at once
Those metrics are based on normalized conditional entropy measures of
the clustering labeling to evaluate given the knowledge of a Ground
Truth class labels of the same samples.
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
Both scores have positive values between 0.0 and 1.0, larger values
being desirable.
Those 3 metrics are independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score values in any way.
V-Measure is furthermore symmetric: swapping ``labels_true`` and
``label_pred`` will give the same score. This does not hold for
homogeneity and completeness.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
homogeneity: float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
completeness: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
v_measure: float
harmonic mean of the first two
See also
--------
homogeneity_score
completeness_score
v_measure_score
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
if len(labels_true) == 0:
return 1.0, 1.0, 1.0
entropy_C = entropy(labels_true)
entropy_K = entropy(labels_pred)
MI = mutual_info_score(labels_true, labels_pred)
homogeneity = MI / (entropy_C) if entropy_C else 1.0
completeness = MI / (entropy_K) if entropy_K else 1.0
if homogeneity + completeness == 0.0:
v_measure_score = 0.0
else:
v_measure_score = (2.0 * homogeneity * completeness
/ (homogeneity + completeness))
return homogeneity, completeness, v_measure_score
def homogeneity_score(labels_true, labels_pred):
"""Homogeneity metric of a cluster labeling given a ground truth
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`completeness_score` which will be different in
general.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
homogeneity: float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
completeness_score
v_measure_score
Examples
--------
Perfect labelings are homogeneous::
>>> from sklearn.metrics.cluster import homogeneity_score
>>> homogeneity_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that further split classes into more clusters can be
perfectly homogeneous::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 1, 2]))
... # doctest: +ELLIPSIS
1.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
1.0...
Clusters that include samples from different classes do not make for an
homogeneous labeling::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 0, 1]))
... # doctest: +ELLIPSIS
0.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 0, 0]))
... # doctest: +ELLIPSIS
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[0]
def completeness_score(labels_true, labels_pred):
"""Completeness metric of a cluster labeling given a ground truth
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`homogeneity_score` which will be different in
general.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
completeness: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
homogeneity_score
v_measure_score
Examples
--------
Perfect labelings are complete::
>>> from sklearn.metrics.cluster import completeness_score
>>> completeness_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that assign all classes members to the same clusters
are still complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 0, 0, 0]))
1.0
>>> print(completeness_score([0, 1, 2, 3], [0, 0, 1, 1]))
1.0
If classes members are split across different clusters, the
assignment cannot be complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 1, 0, 1]))
0.0
>>> print(completeness_score([0, 0, 0, 0], [0, 1, 2, 3]))
0.0
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[1]
def v_measure_score(labels_true, labels_pred):
"""V-measure cluster labeling given a ground truth.
This score is identical to :func:`normalized_mutual_info_score`.
The V-measure is the harmonic mean between homogeneity and completeness::
v = 2 * (homogeneity * completeness) / (homogeneity + completeness)
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
v_measure: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
homogeneity_score
completeness_score
Examples
--------
Perfect labelings are both homogeneous and complete, hence have score 1.0::
>>> from sklearn.metrics.cluster import v_measure_score
>>> v_measure_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> v_measure_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not homogeneous, hence penalized::
>>> print("%.6f" % v_measure_score([0, 0, 1, 2], [0, 0, 1, 1]))
... # doctest: +ELLIPSIS
0.8...
>>> print("%.6f" % v_measure_score([0, 1, 2, 3], [0, 0, 1, 1]))
... # doctest: +ELLIPSIS
0.66...
Labelings that have pure clusters with members coming from the same
classes are homogeneous but un-necessary splits harms completeness
and thus penalize V-measure as well::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 1, 2]))
... # doctest: +ELLIPSIS
0.8...
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
0.66...
If classes members are completely split across different clusters,
the assignment is totally incomplete, hence the V-Measure is null::
>>> print("%.6f" % v_measure_score([0, 0, 0, 0], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
0.0...
Clusters that include samples from totally different classes totally
destroy the homogeneity of the labeling, hence::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 0, 0]))
... # doctest: +ELLIPSIS
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[2]
def mutual_info_score(labels_true, labels_pred, contingency=None):
"""Mutual Information between two clusterings
The Mutual Information is a measure of the similarity between two labels of
the same data. Where :math:`P(i)` is the probability of a random sample
occurring in cluster :math:`U_i` and :math:`P'(j)` is the probability of a
random sample occurring in cluster :math:`V_j`, the Mutual Information
between clusterings :math:`U` and :math:`V` is given as:
.. math::
MI(U,V)=\sum_{i=1}^R \sum_{j=1}^C P(i,j)\log\\frac{P(i,j)}{P(i)P'(j)}
This is equal to the Kullback-Leibler divergence of the joint distribution
with the product distribution of the marginals.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
contingency: None or array, shape = [n_classes_true, n_classes_pred]
A contingency matrix given by the :func:`contingency_matrix` function.
If value is ``None``, it will be computed, otherwise the given value is
used, with ``labels_true`` and ``labels_pred`` ignored.
Returns
-------
mi: float
Mutual information, a non-negative value
See also
--------
adjusted_mutual_info_score: Adjusted against chance Mutual Information
normalized_mutual_info_score: Normalized Mutual Information
"""
if contingency is None:
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
contingency_sum = np.sum(contingency)
pi = np.sum(contingency, axis=1)
pj = np.sum(contingency, axis=0)
outer = np.outer(pi, pj)
nnz = contingency != 0.0
# normalized contingency
contingency_nm = contingency[nnz]
log_contingency_nm = np.log(contingency_nm)
contingency_nm /= contingency_sum
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
log_outer = -np.log(outer[nnz]) + log(pi.sum()) + log(pj.sum())
mi = (contingency_nm * (log_contingency_nm - log(contingency_sum))
+ contingency_nm * log_outer)
return mi.sum()
def adjusted_mutual_info_score(labels_true, labels_pred):
"""Adjusted Mutual Information between two clusterings
Adjusted Mutual Information (AMI) is an adjustment of the Mutual
Information (MI) score to account for chance. It accounts for the fact that
the MI is generally higher for two clusterings with a larger number of
clusters, regardless of whether there is actually more information shared.
For two clusterings :math:`U` and :math:`V`, the AMI is given as::
AMI(U, V) = [MI(U, V) - E(MI(U, V))] / [max(H(U), H(V)) - E(MI(U, V))]
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Be mindful that this function is an order of magnitude slower than other
metrics, such as the Adjusted Rand Index.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
Returns
-------
ami: float(upperlimited by 1.0)
The AMI returns a value of 1 when the two partitions are identical
(ie perfectly matched). Random partitions (independent labellings) have
an expected AMI around 0 on average hence can be negative.
See also
--------
adjusted_rand_score: Adjusted Rand Index
mutual_information_score: Mutual Information (not adjusted for chance)
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import adjusted_mutual_info_score
>>> adjusted_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the AMI is null::
>>> adjusted_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [1] `Vinh, Epps, and Bailey, (2010). Information Theoretic Measures for
Clusterings Comparison: Variants, Properties, Normalization and
Correction for Chance, JMLR
<http://jmlr.csail.mit.edu/papers/volume11/vinh10a/vinh10a.pdf>`_
.. [2] `Wikipedia entry for the Adjusted Mutual Information
<http://en.wikipedia.org/wiki/Adjusted_Mutual_Information>`_
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred,
contingency=contingency)
# Calculate the expected value for the mutual information
emi = expected_mutual_information(contingency, n_samples)
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
ami = (mi - emi) / (max(h_true, h_pred) - emi)
return ami
def normalized_mutual_info_score(labels_true, labels_pred):
"""Normalized Mutual Information between two clusterings
Normalized Mutual Information (NMI) is an normalization of the Mutual
Information (MI) score to scale the results between 0 (no mutual
information) and 1 (perfect correlation). In this function, mutual
information is normalized by ``sqrt(H(labels_true) * H(labels_pred))``
This measure is not adjusted for chance. Therefore
:func:`adjusted_mustual_info_score` might be preferred.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
Returns
-------
nmi: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
See also
--------
adjusted_rand_score: Adjusted Rand Index
adjusted_mutual_info_score: Adjusted Mutual Information (adjusted
against chance)
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import normalized_mutual_info_score
>>> normalized_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> normalized_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the NMI is null::
>>> normalized_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred,
contingency=contingency)
# Calculate the expected value for the mutual information
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
nmi = mi / max(np.sqrt(h_true * h_pred), 1e-10)
return nmi
def entropy(labels):
"""Calculates the entropy for a labeling."""
if len(labels) == 0:
return 1.0
label_idx = np.unique(labels, return_inverse=True)[1]
pi = bincount(label_idx).astype(np.float)
pi = pi[pi > 0]
pi_sum = np.sum(pi)
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
return -np.sum((pi / pi_sum) * (np.log(pi) - log(pi_sum)))
| bsd-3-clause |
lewisc/spark-tk | regression-tests/sparktkregtests/testcases/frames/cumulative_tally_test.py | 13 | 5020 | # vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Test cumulative tally functions, hand calculated baselines"""
import unittest
from sparktkregtests.lib import sparktk_test
class TestCumulativeTally(sparktk_test.SparkTKTestCase):
def setUp(self):
super(TestCumulativeTally, self).setUp()
data_tally = self.get_file("cumu_tally_seq.csv")
schema_tally = [("sequence", int),
("user_id", int),
("vertex_type", str),
("movie_id", int),
("rating", int),
("splits", str),
("count", int),
("percent_count", float)]
self.tally_frame = self.context.frame.import_csv(data_tally,
schema=schema_tally)
def test_tally_and_tally_percent(self):
"""Test tally and tally percent"""
self.tally_frame.tally("rating", '5')
self.tally_frame.tally_percent("rating", '5')
pd_frame = self.tally_frame.to_pandas(self.tally_frame.count())
for index, row in pd_frame.iterrows():
self.assertAlmostEqual(
row['percent_count'], row['rating_tally_percent'], delta=.0001)
self.assertEqual(row['count'], row['rating_tally'])
def test_tally_colname_collision(self):
"""Test tally column names collide gracefully"""
# repeatedly run tally to force collisions
self.tally_frame.tally("rating", '5')
self.tally_frame.tally_percent("rating", '5')
self.tally_frame.tally("rating", '5')
self.tally_frame.tally_percent("rating", '5')
self.tally_frame.tally("rating", '5')
self.tally_frame.tally_percent("rating", '5')
columns = [u'sequence',
u'user_id',
u'vertex_type',
u'movie_id',
u'rating',
u'splits',
u'count',
u'percent_count',
u'rating_tally',
u'rating_tally_percent',
u'rating_tally_0',
u'rating_tally_percent_0',
u'rating_tally_1',
u'rating_tally_percent_1']
self.assertItemsEqual(self.tally_frame.column_names, columns)
def test_tally_no_column(self):
"""Test errors on non-existant column"""
with self.assertRaisesRegexp(Exception, "Invalid column name"):
self.tally_frame.tally("no_such_column", '5')
def test_tally_no_column_percent(self):
with self.assertRaisesRegexp(Exception, "Invalid column name"):
self.tally_frame.tally_percent("no_such_column", '5')
def test_tally_none(self):
"""Test tally none column errors"""
with self.assertRaisesRegexp(Exception,
"column name for sample is required"):
self.tally_frame.tally(None, '5')
def test_tally_none_percent(self):
with self.assertRaisesRegexp(Exception,
"column name for sample is required"):
self.tally_frame.tally_percent(None, '5')
def test_tally_bad_type(self):
"""Test tally on incorrect type errors"""
with self.assertRaisesRegexp(Exception, "does not exist"):
self.tally_frame.tally("rating", 5)
def test_tally_bad_type_percent(self):
with self.assertRaisesRegexp(Exception, "does not exist"):
self.tally_frame.tally_percent("rating", 5)
def test_tally_value_none(self):
"""Test tally on none errors"""
with self.assertRaisesRegexp(Exception,
"count value for the sample is required"):
self.tally_frame.tally("rating", None)
def test_tally_value_none_percent(self):
with self.assertRaisesRegexp(Exception,
"count value for the sample is required"):
self.tally_frame.tally_percent("rating", None)
def test_tally_no_element(self):
"""Test tallying on non-present element is correct"""
self.tally_frame.tally_percent("rating", "12")
local_frame = self.tally_frame.to_pandas(self.tally_frame.count())
for index, row in local_frame.iterrows():
self.assertEqual(row["rating_tally_percent"], 1.0)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
hsiaoyi0504/scikit-learn | sklearn/decomposition/tests/test_dict_learning.py | 85 | 8565 | import numpy as np
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import TempMemmap
from sklearn.decomposition import DictionaryLearning
from sklearn.decomposition import MiniBatchDictionaryLearning
from sklearn.decomposition import SparseCoder
from sklearn.decomposition import dict_learning_online
from sklearn.decomposition import sparse_encode
rng_global = np.random.RandomState(0)
n_samples, n_features = 10, 8
X = rng_global.randn(n_samples, n_features)
def test_dict_learning_shapes():
n_components = 5
dico = DictionaryLearning(n_components, random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_overcomplete():
n_components = 12
dico = DictionaryLearning(n_components, random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_reconstruction():
n_components = 12
dico = DictionaryLearning(n_components, transform_algorithm='omp',
transform_alpha=0.001, random_state=0)
code = dico.fit(X).transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X)
dico.set_params(transform_algorithm='lasso_lars')
code = dico.transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2)
# used to test lars here too, but there's no guarantee the number of
# nonzero atoms is right.
def test_dict_learning_reconstruction_parallel():
# regression test that parallel reconstruction works with n_jobs=-1
n_components = 12
dico = DictionaryLearning(n_components, transform_algorithm='omp',
transform_alpha=0.001, random_state=0, n_jobs=-1)
code = dico.fit(X).transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X)
dico.set_params(transform_algorithm='lasso_lars')
code = dico.transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2)
def test_dict_learning_lassocd_readonly_data():
n_components = 12
with TempMemmap(X) as X_read_only:
dico = DictionaryLearning(n_components, transform_algorithm='lasso_cd',
transform_alpha=0.001, random_state=0, n_jobs=-1)
code = dico.fit(X_read_only).transform(X_read_only)
assert_array_almost_equal(np.dot(code, dico.components_), X_read_only, decimal=2)
def test_dict_learning_nonzero_coefs():
n_components = 4
dico = DictionaryLearning(n_components, transform_algorithm='lars',
transform_n_nonzero_coefs=3, random_state=0)
code = dico.fit(X).transform(X[1])
assert_true(len(np.flatnonzero(code)) == 3)
dico.set_params(transform_algorithm='omp')
code = dico.transform(X[1])
assert_equal(len(np.flatnonzero(code)), 3)
def test_dict_learning_unknown_fit_algorithm():
n_components = 5
dico = DictionaryLearning(n_components, fit_algorithm='<unknown>')
assert_raises(ValueError, dico.fit, X)
def test_dict_learning_split():
n_components = 5
dico = DictionaryLearning(n_components, transform_algorithm='threshold',
random_state=0)
code = dico.fit(X).transform(X)
dico.split_sign = True
split_code = dico.transform(X)
assert_array_equal(split_code[:, :n_components] -
split_code[:, n_components:], code)
def test_dict_learning_online_shapes():
rng = np.random.RandomState(0)
n_components = 8
code, dictionary = dict_learning_online(X, n_components=n_components,
alpha=1, random_state=rng)
assert_equal(code.shape, (n_samples, n_components))
assert_equal(dictionary.shape, (n_components, n_features))
assert_equal(np.dot(code, dictionary).shape, X.shape)
def test_dict_learning_online_verbosity():
n_components = 5
# test verbosity
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
try:
sys.stdout = StringIO()
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=1,
random_state=0)
dico.fit(X)
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=2,
random_state=0)
dico.fit(X)
dict_learning_online(X, n_components=n_components, alpha=1, verbose=1,
random_state=0)
dict_learning_online(X, n_components=n_components, alpha=1, verbose=2,
random_state=0)
finally:
sys.stdout = old_stdout
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_estimator_shapes():
n_components = 5
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, random_state=0)
dico.fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_overcomplete():
n_components = 12
dico = MiniBatchDictionaryLearning(n_components, n_iter=20,
random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_initialization():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features)
dico = MiniBatchDictionaryLearning(n_components, n_iter=0,
dict_init=V, random_state=0).fit(X)
assert_array_equal(dico.components_, V)
def test_dict_learning_online_partial_fit():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
dict1 = MiniBatchDictionaryLearning(n_components, n_iter=10 * len(X),
batch_size=1,
alpha=1, shuffle=False, dict_init=V,
random_state=0).fit(X)
dict2 = MiniBatchDictionaryLearning(n_components, alpha=1,
n_iter=1, dict_init=V,
random_state=0)
for i in range(10):
for sample in X:
dict2.partial_fit(sample)
assert_true(not np.all(sparse_encode(X, dict1.components_, alpha=1) ==
0))
assert_array_almost_equal(dict1.components_, dict2.components_,
decimal=2)
def test_sparse_encode_shapes():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
for algo in ('lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'):
code = sparse_encode(X, V, algorithm=algo)
assert_equal(code.shape, (n_samples, n_components))
def test_sparse_encode_error():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
code = sparse_encode(X, V, alpha=0.001)
assert_true(not np.all(code == 0))
assert_less(np.sqrt(np.sum((np.dot(code, V) - X) ** 2)), 0.1)
def test_sparse_encode_error_default_sparsity():
rng = np.random.RandomState(0)
X = rng.randn(100, 64)
D = rng.randn(2, 64)
code = ignore_warnings(sparse_encode)(X, D, algorithm='omp',
n_nonzero_coefs=None)
assert_equal(code.shape, (100, 2))
def test_unknown_method():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
assert_raises(ValueError, sparse_encode, X, V, algorithm="<unknown>")
def test_sparse_coder_estimator():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
code = SparseCoder(dictionary=V, transform_algorithm='lasso_lars',
transform_alpha=0.001).transform(X)
assert_true(not np.all(code == 0))
assert_less(np.sqrt(np.sum((np.dot(code, V) - X) ** 2)), 0.1) | bsd-3-clause |
sonnyhu/scikit-learn | sklearn/ensemble/tests/test_partial_dependence.py | 365 | 6996 | """
Testing for the partial dependence module.
"""
import numpy as np
from numpy.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import if_matplotlib
from sklearn.ensemble.partial_dependence import partial_dependence
from sklearn.ensemble.partial_dependence import plot_partial_dependence
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import GradientBoostingRegressor
from sklearn import datasets
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the boston dataset
boston = datasets.load_boston()
# also load the iris dataset
iris = datasets.load_iris()
def test_partial_dependence_classifier():
# Test partial dependence for classifier
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
pdp, axes = partial_dependence(clf, [0], X=X, grid_resolution=5)
# only 4 grid points instead of 5 because only 4 unique X[:,0] vals
assert pdp.shape == (1, 4)
assert axes[0].shape[0] == 4
# now with our own grid
X_ = np.asarray(X)
grid = np.unique(X_[:, 0])
pdp_2, axes = partial_dependence(clf, [0], grid=grid)
assert axes is None
assert_array_equal(pdp, pdp_2)
def test_partial_dependence_multiclass():
# Test partial dependence for multi-class classifier
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, iris.target)
grid_resolution = 25
n_classes = clf.n_classes_
pdp, axes = partial_dependence(
clf, [0], X=iris.data, grid_resolution=grid_resolution)
assert pdp.shape == (n_classes, grid_resolution)
assert len(axes) == 1
assert axes[0].shape[0] == grid_resolution
def test_partial_dependence_regressor():
# Test partial dependence for regressor
clf = GradientBoostingRegressor(n_estimators=10, random_state=1)
clf.fit(boston.data, boston.target)
grid_resolution = 25
pdp, axes = partial_dependence(
clf, [0], X=boston.data, grid_resolution=grid_resolution)
assert pdp.shape == (1, grid_resolution)
assert axes[0].shape[0] == grid_resolution
def test_partial_dependecy_input():
# Test input validation of partial dependence.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
assert_raises(ValueError, partial_dependence,
clf, [0], grid=None, X=None)
assert_raises(ValueError, partial_dependence,
clf, [0], grid=[0, 1], X=X)
# first argument must be an instance of BaseGradientBoosting
assert_raises(ValueError, partial_dependence,
{}, [0], X=X)
# Gradient boosting estimator must be fit
assert_raises(ValueError, partial_dependence,
GradientBoostingClassifier(), [0], X=X)
assert_raises(ValueError, partial_dependence, clf, [-1], X=X)
assert_raises(ValueError, partial_dependence, clf, [100], X=X)
# wrong ndim for grid
grid = np.random.rand(10, 2, 1)
assert_raises(ValueError, partial_dependence, clf, [0], grid=grid)
@if_matplotlib
def test_plot_partial_dependence():
# Test partial dependence plot function.
clf = GradientBoostingRegressor(n_estimators=10, random_state=1)
clf.fit(boston.data, boston.target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, boston.data, [0, 1, (0, 1)],
grid_resolution=grid_resolution,
feature_names=boston.feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
# check with str features and array feature names
fig, axs = plot_partial_dependence(clf, boston.data, ['CRIM', 'ZN',
('CRIM', 'ZN')],
grid_resolution=grid_resolution,
feature_names=boston.feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
# check with list feature_names
feature_names = boston.feature_names.tolist()
fig, axs = plot_partial_dependence(clf, boston.data, ['CRIM', 'ZN',
('CRIM', 'ZN')],
grid_resolution=grid_resolution,
feature_names=feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
@if_matplotlib
def test_plot_partial_dependence_input():
# Test partial dependence plot function input checks.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
# not fitted yet
assert_raises(ValueError, plot_partial_dependence,
clf, X, [0])
clf.fit(X, y)
assert_raises(ValueError, plot_partial_dependence,
clf, np.array(X)[:, :0], [0])
# first argument must be an instance of BaseGradientBoosting
assert_raises(ValueError, plot_partial_dependence,
{}, X, [0])
# must be larger than -1
assert_raises(ValueError, plot_partial_dependence,
clf, X, [-1])
# too large feature value
assert_raises(ValueError, plot_partial_dependence,
clf, X, [100])
# str feature but no feature_names
assert_raises(ValueError, plot_partial_dependence,
clf, X, ['foobar'])
# not valid features value
assert_raises(ValueError, plot_partial_dependence,
clf, X, [{'foo': 'bar'}])
@if_matplotlib
def test_plot_partial_dependence_multiclass():
# Test partial dependence plot function on multi-class input.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, iris.target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, iris.data, [0, 1],
label=0,
grid_resolution=grid_resolution)
assert len(axs) == 2
assert all(ax.has_data for ax in axs)
# now with symbol labels
target = iris.target_names[iris.target]
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, iris.data, [0, 1],
label='setosa',
grid_resolution=grid_resolution)
assert len(axs) == 2
assert all(ax.has_data for ax in axs)
# label not in gbrt.classes_
assert_raises(ValueError, plot_partial_dependence,
clf, iris.data, [0, 1], label='foobar',
grid_resolution=grid_resolution)
# label not provided
assert_raises(ValueError, plot_partial_dependence,
clf, iris.data, [0, 1],
grid_resolution=grid_resolution)
| bsd-3-clause |
YetAnotherTomek/egfrd | samples/mapk/plot_ss2.py | 6 | 3633 | #!/usr/bin/env python
import sys
import os
import string
import numpy
import scipy.io
from matplotlib.pylab import *
N_A = 6.0221367e23
E2 = 5
V = 1e-15
def load_theory():
data = load('ss2_ode.dat')
ti = data[0:len(data):2][:,0]
data0 = data[0:len(data):2][:,1]
data1 = data[1:len(data):2][:,1]
return ti, data0, data1
def file_mean(filename, skip):
ycolumns = [1, ]
#ycolumns = [2,6]
#ycolumns = [3,5]
#ycolumns = [2,6,3,5]
f = open(filename)
f.seek(-1000, os.SEEK_END)
lines = f.readlines()
lastline = lines[-1]
lastlinedata = lastline.split()
if lastlinedata[0] < skip-1:
raise 'oops'
y = float(lastlinedata[1])
return y
# data = load(filename)
# x = data[:,0]
# y = data[:,ycolumns[0]]
# start = x.searchsorted(skip) - 1
# if len(x)<=start:
# return None
# return y[start]
# x = x[start:]
# y = y[start:]
# #print x[-1]
# xdiff = x[1:] - x[:-1]
# yscaled = y[:-1] * xdiff
# yscaledmean = yscaled.sum() / (x[-1] - x[0])
# print yscaledmean, y.mean()
# #return y.mean()
# return yscaledmean
import glob
import fnmatch
import os
model = 'mapk4'
V_str = '1e-15'
D_ratio_str = '1'
mode = 'fixed'
N_K_total_str = '300'
#ti_str = '1e-2'
#ti_str = '0'
T = '300'
skip = float(T) #*0.95
#dir = sys.argv[1]
dir = '11/data'
#outdir = sys.argv[2]
#pattern = sys.argv[2]
#globpattern = pattern.replace('ALL','*') + '_*.dat'
#os.chdir(dir)
x_all = []
mean_all = []
std_err_all = []
for Kpp_ratio_str in ['0','.3','.7','1']:
x = []
mean = []
std_err = []
for ti_str in ['0','1e-6','1e-5','1e-4','1e-3','1e-2','1e-1']:
globpattern = \
'_'.join((model, V_str, D_ratio_str, mode, N_K_total_str,
Kpp_ratio_str, ti_str, 'normal',
'*')) +\
'_tc.dat'
filelist = glob.glob(dir + os.sep + globpattern)
if not filelist:
continue
#print globpattern
data = []
for file in filelist:
print file
res = file_mean(file, skip)
data.append(res)
data = numpy.array(data)
data /= int(N_K_total_str)
x.append(float(ti_str))
mean.append(data.mean())
std_err.append(data.std()/math.sqrt(len(data)))
print x, mean, std_err
x_all.append(x)
mean_all.append(mean)
std_err_all.append(std_err)
ti, theory0, theory1 = load_theory()
axes([.15,.13,.1,.8])
#plot([1e-6,1], [0,1])
for i in range(len(x_all)):
errorbar(numpy.array(x_all[i])+1e-18, mean_all[i], yerr=std_err_all[i],
fmt='s')
plot(ti[:2],theory0[:2],'k--')
plot(ti[:2],theory1[:2],'k--')
xlim([-1e-7,1e-7])
ylim([-0.02, 1.01])
xticks([0, ], ['$0$', ], size=22)
yticks([0,0.2,0.4,0.6,0.8,1],
['$0$', '$0.2$', '$0.4$', '$0.6$', '$0.8$', '$1.0$'], size=22)
ylabel(r'$\rm{[Kpp] / [K]_{total}}$', size=28)
#xscale('symlog')
axes([.26,.13,.7,.8])
#semilogx([5e-7,1], [0,1])
for i in range(len(x_all)):
errorbar(numpy.array(x_all[i])+1e-18, mean_all[i], yerr=std_err_all[i],
fmt='s')
semilogx(ti,theory0,'k--')
semilogx(ti,theory1,'k--')
xscale('log')
xlim([1e-7,0.5])
ylim([-0.02, 1.01])
xticks([1e-6,1e-5,1e-4,1e-3,1e-2,1e-1],
[r'$1 \mu s$', '$10$', '$100$', r'$1 ms$', '$10$', '$100$'],size=22)
#xticks([1e-6, 1e-3, 1e0], ['1 us', '1 ms', '1 s'], size=22)
yticks([],[])
xlabel(r'${\tau}_{\rm rel}$', size=28)
show()
#savefig(outdir + '/' + figtitle + '.png', dpi=80)
| gpl-2.0 |
ZENGXH/scikit-learn | sklearn/tests/test_pipeline.py | 162 | 14875 | """
Test the pipeline module.
"""
import numpy as np
from scipy import sparse
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_raises, assert_raises_regex, assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.base import clone
from sklearn.pipeline import Pipeline, FeatureUnion, make_pipeline, make_union
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import LinearRegression
from sklearn.cluster import KMeans
from sklearn.feature_selection import SelectKBest, f_classif
from sklearn.decomposition import PCA, RandomizedPCA, TruncatedSVD
from sklearn.datasets import load_iris
from sklearn.preprocessing import StandardScaler
from sklearn.feature_extraction.text import CountVectorizer
JUNK_FOOD_DOCS = (
"the pizza pizza beer copyright",
"the pizza burger beer copyright",
"the the pizza beer beer copyright",
"the burger beer beer copyright",
"the coke burger coke copyright",
"the coke burger burger",
)
class IncorrectT(object):
"""Small class to test parameter dispatching.
"""
def __init__(self, a=None, b=None):
self.a = a
self.b = b
class T(IncorrectT):
def fit(self, X, y):
return self
def get_params(self, deep=False):
return {'a': self.a, 'b': self.b}
def set_params(self, **params):
self.a = params['a']
return self
class TransfT(T):
def transform(self, X, y=None):
return X
class FitParamT(object):
"""Mock classifier
"""
def __init__(self):
self.successful = False
pass
def fit(self, X, y, should_succeed=False):
self.successful = should_succeed
def predict(self, X):
return self.successful
def test_pipeline_init():
# Test the various init parameters of the pipeline.
assert_raises(TypeError, Pipeline)
# Check that we can't instantiate pipelines with objects without fit
# method
pipe = assert_raises(TypeError, Pipeline, [('svc', IncorrectT)])
# Smoke test with only an estimator
clf = T()
pipe = Pipeline([('svc', clf)])
assert_equal(pipe.get_params(deep=True),
dict(svc__a=None, svc__b=None, svc=clf,
**pipe.get_params(deep=False)
))
# Check that params are set
pipe.set_params(svc__a=0.1)
assert_equal(clf.a, 0.1)
assert_equal(clf.b, None)
# Smoke test the repr:
repr(pipe)
# Test with two objects
clf = SVC()
filter1 = SelectKBest(f_classif)
pipe = Pipeline([('anova', filter1), ('svc', clf)])
# Check that we can't use the same stage name twice
assert_raises(ValueError, Pipeline, [('svc', SVC()), ('svc', SVC())])
# Check that params are set
pipe.set_params(svc__C=0.1)
assert_equal(clf.C, 0.1)
# Smoke test the repr:
repr(pipe)
# Check that params are not set when naming them wrong
assert_raises(ValueError, pipe.set_params, anova__C=0.1)
# Test clone
pipe2 = clone(pipe)
assert_false(pipe.named_steps['svc'] is pipe2.named_steps['svc'])
# Check that apart from estimators, the parameters are the same
params = pipe.get_params(deep=True)
params2 = pipe2.get_params(deep=True)
for x in pipe.get_params(deep=False):
params.pop(x)
for x in pipe2.get_params(deep=False):
params2.pop(x)
# Remove estimators that where copied
params.pop('svc')
params.pop('anova')
params2.pop('svc')
params2.pop('anova')
assert_equal(params, params2)
def test_pipeline_methods_anova():
# Test the various methods of the pipeline (anova).
iris = load_iris()
X = iris.data
y = iris.target
# Test with Anova + LogisticRegression
clf = LogisticRegression()
filter1 = SelectKBest(f_classif, k=2)
pipe = Pipeline([('anova', filter1), ('logistic', clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y)
def test_pipeline_fit_params():
# Test that the pipeline can take fit parameters
pipe = Pipeline([('transf', TransfT()), ('clf', FitParamT())])
pipe.fit(X=None, y=None, clf__should_succeed=True)
# classifier should return True
assert_true(pipe.predict(None))
# and transformer params should not be changed
assert_true(pipe.named_steps['transf'].a is None)
assert_true(pipe.named_steps['transf'].b is None)
def test_pipeline_raise_set_params_error():
# Test pipeline raises set params error message for nested models.
pipe = Pipeline([('cls', LinearRegression())])
# expected error message
error_msg = ('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.')
assert_raise_message(ValueError,
error_msg % ('fake', 'Pipeline'),
pipe.set_params,
fake='nope')
# nested model check
assert_raise_message(ValueError,
error_msg % ("fake", pipe),
pipe.set_params,
fake__estimator='nope')
def test_pipeline_methods_pca_svm():
# Test the various methods of the pipeline (pca + svm).
iris = load_iris()
X = iris.data
y = iris.target
# Test with PCA + SVC
clf = SVC(probability=True, random_state=0)
pca = PCA(n_components='mle', whiten=True)
pipe = Pipeline([('pca', pca), ('svc', clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y)
def test_pipeline_methods_preprocessing_svm():
# Test the various methods of the pipeline (preprocessing + svm).
iris = load_iris()
X = iris.data
y = iris.target
n_samples = X.shape[0]
n_classes = len(np.unique(y))
scaler = StandardScaler()
pca = RandomizedPCA(n_components=2, whiten=True)
clf = SVC(probability=True, random_state=0)
for preprocessing in [scaler, pca]:
pipe = Pipeline([('preprocess', preprocessing), ('svc', clf)])
pipe.fit(X, y)
# check shapes of various prediction functions
predict = pipe.predict(X)
assert_equal(predict.shape, (n_samples,))
proba = pipe.predict_proba(X)
assert_equal(proba.shape, (n_samples, n_classes))
log_proba = pipe.predict_log_proba(X)
assert_equal(log_proba.shape, (n_samples, n_classes))
decision_function = pipe.decision_function(X)
assert_equal(decision_function.shape, (n_samples, n_classes))
pipe.score(X, y)
def test_fit_predict_on_pipeline():
# test that the fit_predict method is implemented on a pipeline
# test that the fit_predict on pipeline yields same results as applying
# transform and clustering steps separately
iris = load_iris()
scaler = StandardScaler()
km = KMeans(random_state=0)
# first compute the transform and clustering step separately
scaled = scaler.fit_transform(iris.data)
separate_pred = km.fit_predict(scaled)
# use a pipeline to do the transform and clustering in one step
pipe = Pipeline([('scaler', scaler), ('Kmeans', km)])
pipeline_pred = pipe.fit_predict(iris.data)
assert_array_almost_equal(pipeline_pred, separate_pred)
def test_fit_predict_on_pipeline_without_fit_predict():
# tests that a pipeline does not have fit_predict method when final
# step of pipeline does not have fit_predict defined
scaler = StandardScaler()
pca = PCA()
pipe = Pipeline([('scaler', scaler), ('pca', pca)])
assert_raises_regex(AttributeError,
"'PCA' object has no attribute 'fit_predict'",
getattr, pipe, 'fit_predict')
def test_feature_union():
# basic sanity check for feature union
iris = load_iris()
X = iris.data
X -= X.mean(axis=0)
y = iris.target
svd = TruncatedSVD(n_components=2, random_state=0)
select = SelectKBest(k=1)
fs = FeatureUnion([("svd", svd), ("select", select)])
fs.fit(X, y)
X_transformed = fs.transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 3))
# check if it does the expected thing
assert_array_almost_equal(X_transformed[:, :-1], svd.fit_transform(X))
assert_array_equal(X_transformed[:, -1],
select.fit_transform(X, y).ravel())
# test if it also works for sparse input
# We use a different svd object to control the random_state stream
fs = FeatureUnion([("svd", svd), ("select", select)])
X_sp = sparse.csr_matrix(X)
X_sp_transformed = fs.fit_transform(X_sp, y)
assert_array_almost_equal(X_transformed, X_sp_transformed.toarray())
# test setting parameters
fs.set_params(select__k=2)
assert_equal(fs.fit_transform(X, y).shape, (X.shape[0], 4))
# test it works with transformers missing fit_transform
fs = FeatureUnion([("mock", TransfT()), ("svd", svd), ("select", select)])
X_transformed = fs.fit_transform(X, y)
assert_equal(X_transformed.shape, (X.shape[0], 8))
def test_make_union():
pca = PCA()
mock = TransfT()
fu = make_union(pca, mock)
names, transformers = zip(*fu.transformer_list)
assert_equal(names, ("pca", "transft"))
assert_equal(transformers, (pca, mock))
def test_pipeline_transform():
# Test whether pipeline works with a transformer at the end.
# Also test pipeline.transform and pipeline.inverse_transform
iris = load_iris()
X = iris.data
pca = PCA(n_components=2)
pipeline = Pipeline([('pca', pca)])
# test transform and fit_transform:
X_trans = pipeline.fit(X).transform(X)
X_trans2 = pipeline.fit_transform(X)
X_trans3 = pca.fit_transform(X)
assert_array_almost_equal(X_trans, X_trans2)
assert_array_almost_equal(X_trans, X_trans3)
X_back = pipeline.inverse_transform(X_trans)
X_back2 = pca.inverse_transform(X_trans)
assert_array_almost_equal(X_back, X_back2)
def test_pipeline_fit_transform():
# Test whether pipeline works with a transformer missing fit_transform
iris = load_iris()
X = iris.data
y = iris.target
transft = TransfT()
pipeline = Pipeline([('mock', transft)])
# test fit_transform:
X_trans = pipeline.fit_transform(X, y)
X_trans2 = transft.fit(X, y).transform(X)
assert_array_almost_equal(X_trans, X_trans2)
def test_make_pipeline():
t1 = TransfT()
t2 = TransfT()
pipe = make_pipeline(t1, t2)
assert_true(isinstance(pipe, Pipeline))
assert_equal(pipe.steps[0][0], "transft-1")
assert_equal(pipe.steps[1][0], "transft-2")
pipe = make_pipeline(t1, t2, FitParamT())
assert_true(isinstance(pipe, Pipeline))
assert_equal(pipe.steps[0][0], "transft-1")
assert_equal(pipe.steps[1][0], "transft-2")
assert_equal(pipe.steps[2][0], "fitparamt")
def test_feature_union_weights():
# test feature union with transformer weights
iris = load_iris()
X = iris.data
y = iris.target
pca = RandomizedPCA(n_components=2, random_state=0)
select = SelectKBest(k=1)
# test using fit followed by transform
fs = FeatureUnion([("pca", pca), ("select", select)],
transformer_weights={"pca": 10})
fs.fit(X, y)
X_transformed = fs.transform(X)
# test using fit_transform
fs = FeatureUnion([("pca", pca), ("select", select)],
transformer_weights={"pca": 10})
X_fit_transformed = fs.fit_transform(X, y)
# test it works with transformers missing fit_transform
fs = FeatureUnion([("mock", TransfT()), ("pca", pca), ("select", select)],
transformer_weights={"mock": 10})
X_fit_transformed_wo_method = fs.fit_transform(X, y)
# check against expected result
# We use a different pca object to control the random_state stream
assert_array_almost_equal(X_transformed[:, :-1], 10 * pca.fit_transform(X))
assert_array_equal(X_transformed[:, -1],
select.fit_transform(X, y).ravel())
assert_array_almost_equal(X_fit_transformed[:, :-1],
10 * pca.fit_transform(X))
assert_array_equal(X_fit_transformed[:, -1],
select.fit_transform(X, y).ravel())
assert_equal(X_fit_transformed_wo_method.shape, (X.shape[0], 7))
def test_feature_union_parallel():
# test that n_jobs work for FeatureUnion
X = JUNK_FOOD_DOCS
fs = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
])
fs_parallel = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
], n_jobs=2)
fs_parallel2 = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
], n_jobs=2)
fs.fit(X)
X_transformed = fs.transform(X)
assert_equal(X_transformed.shape[0], len(X))
fs_parallel.fit(X)
X_transformed_parallel = fs_parallel.transform(X)
assert_equal(X_transformed.shape, X_transformed_parallel.shape)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel.toarray()
)
# fit_transform should behave the same
X_transformed_parallel2 = fs_parallel2.fit_transform(X)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel2.toarray()
)
# transformers should stay fit after fit_transform
X_transformed_parallel2 = fs_parallel2.transform(X)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel2.toarray()
)
def test_feature_union_feature_names():
word_vect = CountVectorizer(analyzer="word")
char_vect = CountVectorizer(analyzer="char_wb", ngram_range=(3, 3))
ft = FeatureUnion([("chars", char_vect), ("words", word_vect)])
ft.fit(JUNK_FOOD_DOCS)
feature_names = ft.get_feature_names()
for feat in feature_names:
assert_true("chars__" in feat or "words__" in feat)
assert_equal(len(feature_names), 35)
def test_classes_property():
iris = load_iris()
X = iris.data
y = iris.target
reg = make_pipeline(SelectKBest(k=1), LinearRegression())
reg.fit(X, y)
assert_raises(AttributeError, getattr, reg, "classes_")
clf = make_pipeline(SelectKBest(k=1), LogisticRegression(random_state=0))
assert_raises(AttributeError, getattr, clf, "classes_")
clf.fit(X, y)
assert_array_equal(clf.classes_, np.unique(y))
| bsd-3-clause |
google-research/policy-learning-landscape | analysis_tools/common_plotting.py | 1 | 2473 | # coding=utf-8
# Copyright 2018 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common tools for plotting."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import matplotlib.pyplot as plt
import seaborn as sns
# Colorblind friendly colors thanks to Utku and the internet.
FRIENDLY_COLORS = ['#377eb8', '#f781bf', '#4daf4a', '#a65628', '#984ea3',
'#ff7f00', '#999999', '#e41a1c', '#dede00']
# Stds used in most of our experiments.
STDS = [0.0, 0.1, 0.25, 0.5, 0.75, 1.0, 2.0, 0.01, 'default']
STD2COLOR = dict(zip(map(str, STDS), FRIENDLY_COLORS))
def get_colors(n_colors, palette='colorblind'):
"""Abstraction to ensure all our plots have consistent colors.
Args:
n_colors: The number of colors to return.
palette: The palette to use. Recommended {Dark2, colorblind}
Returns:
A list of colors that can be used to plotting.
"""
return sns.color_palette(palette, n_colors)
def get_standardized_color(std):
"""Returns a color based on a fixed color palette that maps std to color."""
std = str(std)
if std in STD2COLOR:
return STD2COLOR[str(std)]
else:
return STD2COLOR['default']
def get_ax(ax, figsize=None):
"""Create or reuse axes if it exists.
Use this when you want to dynamically handle creating a new axis if it doesn't
exist or reuse an existing one. For example:
```
def add_curve(c, ax=None):
ax = get_ax(ax)
ax.plot(np.random.normal(c, 1, size=(100, )))
return ax
ax = add_curve(c=4)
ax = add_curve(c=100, ax=ax)
ax = add_curve(c=200, ax=ax)
```
Args:
ax: A matplotlib axes or None. If None, a figure will be created.
figsize: The size of the figure to create if an ax is not given.
Returns:
A matplotlib axes.
"""
if ax is None:
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111)
return ax
if __name__ == '__main__':
pass
| apache-2.0 |
Achuth17/scikit-learn | sklearn/tests/test_qda.py | 155 | 3481 | import numpy as np
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn import qda
# Data is just 6 separable points in the plane
X = np.array([[0, 0], [-2, -2], [-2, -1], [-1, -1], [-1, -2],
[1, 3], [1, 2], [2, 1], [2, 2]])
y = np.array([1, 1, 1, 1, 1, 2, 2, 2, 2])
y3 = np.array([1, 2, 3, 2, 3, 1, 2, 3, 1])
# Degenerate data with 1 feature (still should be separable)
X1 = np.array([[-3, ], [-2, ], [-1, ], [-1, ], [0, ], [1, ], [1, ],
[2, ], [3, ]])
# Data that has zero variance in one dimension and needs regularization
X2 = np.array([[-3, 0], [-2, 0], [-1, 0], [-1, 0], [0, 0], [1, 0], [1, 0],
[2, 0], [3, 0]])
# One element class
y4 = np.array([1, 1, 1, 1, 1, 1, 1, 1, 2])
# Data with less samples in a class than n_features
X5 = np.c_[np.arange(8), np.zeros((8,3))]
y5 = np.array([0, 0, 0, 0, 0, 1, 1, 1])
def test_qda():
# QDA classification.
# This checks that QDA implements fit and predict and returns
# correct values for a simple toy dataset.
clf = qda.QDA()
y_pred = clf.fit(X, y).predict(X)
assert_array_equal(y_pred, y)
# Assure that it works with 1D data
y_pred1 = clf.fit(X1, y).predict(X1)
assert_array_equal(y_pred1, y)
# Test probas estimates
y_proba_pred1 = clf.predict_proba(X1)
assert_array_equal((y_proba_pred1[:, 1] > 0.5) + 1, y)
y_log_proba_pred1 = clf.predict_log_proba(X1)
assert_array_almost_equal(np.exp(y_log_proba_pred1), y_proba_pred1, 8)
y_pred3 = clf.fit(X, y3).predict(X)
# QDA shouldn't be able to separate those
assert_true(np.any(y_pred3 != y3))
# Classes should have at least 2 elements
assert_raises(ValueError, clf.fit, X, y4)
def test_qda_priors():
clf = qda.QDA()
y_pred = clf.fit(X, y).predict(X)
n_pos = np.sum(y_pred == 2)
neg = 1e-10
clf = qda.QDA(priors=np.array([neg, 1 - neg]))
y_pred = clf.fit(X, y).predict(X)
n_pos2 = np.sum(y_pred == 2)
assert_greater(n_pos2, n_pos)
def test_qda_store_covariances():
# The default is to not set the covariances_ attribute
clf = qda.QDA().fit(X, y)
assert_true(not hasattr(clf, 'covariances_'))
# Test the actual attribute:
clf = qda.QDA().fit(X, y, store_covariances=True)
assert_true(hasattr(clf, 'covariances_'))
assert_array_almost_equal(
clf.covariances_[0],
np.array([[0.7, 0.45], [0.45, 0.7]])
)
assert_array_almost_equal(
clf.covariances_[1],
np.array([[0.33333333, -0.33333333], [-0.33333333, 0.66666667]])
)
def test_qda_regularization():
# the default is reg_param=0. and will cause issues
# when there is a constant variable
clf = qda.QDA()
with ignore_warnings():
y_pred = clf.fit(X2, y).predict(X2)
assert_true(np.any(y_pred != y))
# adding a little regularization fixes the problem
clf = qda.QDA(reg_param=0.01)
with ignore_warnings():
clf.fit(X2, y)
y_pred = clf.predict(X2)
assert_array_equal(y_pred, y)
# Case n_samples_in_a_class < n_features
clf = qda.QDA(reg_param=0.1)
with ignore_warnings():
clf.fit(X5, y5)
y_pred5 = clf.predict(X5)
assert_array_equal(y_pred5, y5)
| bsd-3-clause |
cbmoore/statsmodels | statsmodels/tsa/tests/test_stattools.py | 26 | 12110 | from statsmodels.compat.python import lrange
from statsmodels.tsa.stattools import (adfuller, acf, pacf_ols, pacf_yw,
pacf, grangercausalitytests,
coint, acovf,
arma_order_select_ic)
from statsmodels.tsa.base.datetools import dates_from_range
import numpy as np
from numpy.testing import (assert_almost_equal, assert_equal, assert_raises,
dec, assert_)
from numpy import genfromtxt#, concatenate
from statsmodels.datasets import macrodata, sunspots
from pandas import Series, Index, DataFrame
import os
DECIMAL_8 = 8
DECIMAL_6 = 6
DECIMAL_5 = 5
DECIMAL_4 = 4
DECIMAL_3 = 3
DECIMAL_2 = 2
DECIMAL_1 = 1
class CheckADF(object):
"""
Test Augmented Dickey-Fuller
Test values taken from Stata.
"""
levels = ['1%', '5%', '10%']
data = macrodata.load()
x = data.data['realgdp']
y = data.data['infl']
def test_teststat(self):
assert_almost_equal(self.res1[0], self.teststat, DECIMAL_5)
def test_pvalue(self):
assert_almost_equal(self.res1[1], self.pvalue, DECIMAL_5)
def test_critvalues(self):
critvalues = [self.res1[4][lev] for lev in self.levels]
assert_almost_equal(critvalues, self.critvalues, DECIMAL_2)
class TestADFConstant(CheckADF):
"""
Dickey-Fuller test for unit root
"""
def __init__(self):
self.res1 = adfuller(self.x, regression="c", autolag=None,
maxlag=4)
self.teststat = .97505319
self.pvalue = .99399563
self.critvalues = [-3.476, -2.883, -2.573]
class TestADFConstantTrend(CheckADF):
"""
"""
def __init__(self):
self.res1 = adfuller(self.x, regression="ct", autolag=None,
maxlag=4)
self.teststat = -1.8566374
self.pvalue = .67682968
self.critvalues = [-4.007, -3.437, -3.137]
#class TestADFConstantTrendSquared(CheckADF):
# """
# """
# pass
#TODO: get test values from R?
class TestADFNoConstant(CheckADF):
"""
"""
def __init__(self):
self.res1 = adfuller(self.x, regression="nc", autolag=None,
maxlag=4)
self.teststat = 3.5227498
self.pvalue = .99999 # Stata does not return a p-value for noconstant.
# Tau^max in MacKinnon (1994) is missing, so it is
# assumed that its right-tail is well-behaved
self.critvalues = [-2.587, -1.950, -1.617]
# No Unit Root
class TestADFConstant2(CheckADF):
def __init__(self):
self.res1 = adfuller(self.y, regression="c", autolag=None,
maxlag=1)
self.teststat = -4.3346988
self.pvalue = .00038661
self.critvalues = [-3.476, -2.883, -2.573]
class TestADFConstantTrend2(CheckADF):
def __init__(self):
self.res1 = adfuller(self.y, regression="ct", autolag=None,
maxlag=1)
self.teststat = -4.425093
self.pvalue = .00199633
self.critvalues = [-4.006, -3.437, -3.137]
class TestADFNoConstant2(CheckADF):
def __init__(self):
self.res1 = adfuller(self.y, regression="nc", autolag=None,
maxlag=1)
self.teststat = -2.4511596
self.pvalue = 0.013747 # Stata does not return a p-value for noconstant
# this value is just taken from our results
self.critvalues = [-2.587,-1.950,-1.617]
class CheckCorrGram(object):
"""
Set up for ACF, PACF tests.
"""
data = macrodata.load()
x = data.data['realgdp']
filename = os.path.dirname(os.path.abspath(__file__))+\
"/results/results_corrgram.csv"
results = genfromtxt(open(filename, "rb"), delimiter=",", names=True,dtype=float)
#not needed: add 1. for lag zero
#self.results['acvar'] = np.concatenate(([1.], self.results['acvar']))
class TestACF(CheckCorrGram):
"""
Test Autocorrelation Function
"""
def __init__(self):
self.acf = self.results['acvar']
#self.acf = np.concatenate(([1.], self.acf))
self.qstat = self.results['Q1']
self.res1 = acf(self.x, nlags=40, qstat=True, alpha=.05)
self.confint_res = self.results[['acvar_lb','acvar_ub']].view((float,
2))
def test_acf(self):
assert_almost_equal(self.res1[0][1:41], self.acf, DECIMAL_8)
def test_confint(self):
centered = self.res1[1] - self.res1[1].mean(1)[:,None]
assert_almost_equal(centered[1:41], self.confint_res, DECIMAL_8)
def test_qstat(self):
assert_almost_equal(self.res1[2][:40], self.qstat, DECIMAL_3)
# 3 decimal places because of stata rounding
# def pvalue(self):
# pass
#NOTE: shouldn't need testing if Q stat is correct
class TestACF_FFT(CheckCorrGram):
"""
Test Autocorrelation Function using FFT
"""
def __init__(self):
self.acf = self.results['acvarfft']
self.qstat = self.results['Q1']
self.res1 = acf(self.x, nlags=40, qstat=True, fft=True)
def test_acf(self):
assert_almost_equal(self.res1[0][1:], self.acf, DECIMAL_8)
def test_qstat(self):
#todo why is res1/qstat 1 short
assert_almost_equal(self.res1[1], self.qstat, DECIMAL_3)
class TestPACF(CheckCorrGram):
def __init__(self):
self.pacfols = self.results['PACOLS']
self.pacfyw = self.results['PACYW']
def test_ols(self):
pacfols, confint = pacf(self.x, nlags=40, alpha=.05, method="ols")
assert_almost_equal(pacfols[1:], self.pacfols, DECIMAL_6)
centered = confint - confint.mean(1)[:,None]
# from edited Stata ado file
res = [[-.1375625, .1375625]] * 40
assert_almost_equal(centered[1:41], res, DECIMAL_6)
# check lag 0
assert_equal(centered[0], [0., 0.])
assert_equal(confint[0], [1, 1])
assert_equal(pacfols[0], 1)
def test_yw(self):
pacfyw = pacf_yw(self.x, nlags=40, method="mle")
assert_almost_equal(pacfyw[1:], self.pacfyw, DECIMAL_8)
def test_ld(self):
pacfyw = pacf_yw(self.x, nlags=40, method="mle")
pacfld = pacf(self.x, nlags=40, method="ldb")
assert_almost_equal(pacfyw, pacfld, DECIMAL_8)
pacfyw = pacf(self.x, nlags=40, method="yw")
pacfld = pacf(self.x, nlags=40, method="ldu")
assert_almost_equal(pacfyw, pacfld, DECIMAL_8)
class CheckCoint(object):
"""
Test Cointegration Test Results for 2-variable system
Test values taken from Stata
"""
levels = ['1%', '5%', '10%']
data = macrodata.load()
y1 = data.data['realcons']
y2 = data.data['realgdp']
def test_tstat(self):
assert_almost_equal(self.coint_t,self.teststat, DECIMAL_4)
class TestCoint_t(CheckCoint):
"""
Get AR(1) parameter on residuals
"""
def __init__(self):
self.coint_t = coint(self.y1, self.y2, regression ="c")[0]
self.teststat = -1.8208817
class TestGrangerCausality(object):
def test_grangercausality(self):
# some example data
mdata = macrodata.load().data
mdata = mdata[['realgdp', 'realcons']]
data = mdata.view((float, 2))
data = np.diff(np.log(data), axis=0)
#R: lmtest:grangertest
r_result = [0.243097, 0.7844328, 195, 2] # f_test
gr = grangercausalitytests(data[:, 1::-1], 2, verbose=False)
assert_almost_equal(r_result, gr[2][0]['ssr_ftest'], decimal=7)
assert_almost_equal(gr[2][0]['params_ftest'], gr[2][0]['ssr_ftest'], decimal=7)
def test_granger_fails_on_nobs_check(self):
# Test that if maxlag is too large, Granger Test raises a clear error.
X = np.random.rand(10, 2)
grangercausalitytests(X, 2, verbose=False) # This should pass.
assert_raises(ValueError, grangercausalitytests, X, 3, verbose=False)
def test_pandasacovf():
s = Series(lrange(1, 11))
assert_almost_equal(acovf(s), acovf(s.values))
def test_acovf2d():
dta = sunspots.load_pandas().data
dta.index = Index(dates_from_range('1700', '2008'))
del dta["YEAR"]
res = acovf(dta)
assert_equal(res, acovf(dta.values))
X = np.random.random((10,2))
assert_raises(ValueError, acovf, X)
def test_acovf_fft_vs_convolution():
np.random.seed(1)
q = np.random.normal(size=100)
for demean in [True, False]:
for unbiased in [True, False]:
F1 = acovf(q, demean=demean, unbiased=unbiased, fft=True)
F2 = acovf(q, demean=demean, unbiased=unbiased, fft=False)
assert_almost_equal(F1, F2, decimal=7)
@dec.slow
def test_arma_order_select_ic():
# smoke test, assumes info-criteria are right
from statsmodels.tsa.arima_process import arma_generate_sample
import statsmodels.api as sm
arparams = np.array([.75, -.25])
maparams = np.array([.65, .35])
arparams = np.r_[1, -arparams]
maparam = np.r_[1, maparams]
nobs = 250
np.random.seed(2014)
y = arma_generate_sample(arparams, maparams, nobs)
res = arma_order_select_ic(y, ic=['aic', 'bic'], trend='nc')
# regression tests in case we change algorithm to minic in sas
aic_x = np.array([[ np.nan, 552.7342255 , 484.29687843],
[ 562.10924262, 485.5197969 , 480.32858497],
[ 507.04581344, 482.91065829, 481.91926034],
[ 484.03995962, 482.14868032, 483.86378955],
[ 481.8849479 , 483.8377379 , 485.83756612]])
bic_x = np.array([[ np.nan, 559.77714733, 494.86126118],
[ 569.15216446, 496.08417966, 494.41442864],
[ 517.61019619, 496.99650196, 499.52656493],
[ 498.12580329, 499.75598491, 504.99255506],
[ 499.49225249, 504.96650341, 510.48779255]])
aic = DataFrame(aic_x , index=lrange(5), columns=lrange(3))
bic = DataFrame(bic_x , index=lrange(5), columns=lrange(3))
assert_almost_equal(res.aic.values, aic.values, 5)
assert_almost_equal(res.bic.values, bic.values, 5)
assert_equal(res.aic_min_order, (1, 2))
assert_equal(res.bic_min_order, (1, 2))
assert_(res.aic.index.equals(aic.index))
assert_(res.aic.columns.equals(aic.columns))
assert_(res.bic.index.equals(bic.index))
assert_(res.bic.columns.equals(bic.columns))
res = arma_order_select_ic(y, ic='aic', trend='nc')
assert_almost_equal(res.aic.values, aic.values, 5)
assert_(res.aic.index.equals(aic.index))
assert_(res.aic.columns.equals(aic.columns))
assert_equal(res.aic_min_order, (1, 2))
def test_arma_order_select_ic_failure():
# this should trigger an SVD convergence failure, smoke test that it
# returns, likely platform dependent failure...
# looks like AR roots may be cancelling out for 4, 1?
y = np.array([ 0.86074377817203640006, 0.85316549067906921611,
0.87104653774363305363, 0.60692382068987393851,
0.69225941967301307667, 0.73336177248909339976,
0.03661329261479619179, 0.15693067239962379955,
0.12777403512447857437, -0.27531446294481976 ,
-0.24198139631653581283, -0.23903317951236391359,
-0.26000241325906497947, -0.21282920015519238288,
-0.15943768324388354896, 0.25169301564268781179,
0.1762305709151877342 , 0.12678133368791388857,
0.89755829086753169399, 0.82667068795350151511])
import warnings
with warnings.catch_warnings():
# catch a hessian inversion and convergence failure warning
warnings.simplefilter("ignore")
res = arma_order_select_ic(y)
def test_acf_fft_dataframe():
# regression test #322
result = acf(sunspots.load_pandas().data[['SUNACTIVITY']], fft=True)
assert_equal(result.ndim, 1)
if __name__=="__main__":
import nose
# nose.runmodule(argv=[__file__, '-vvs','-x','-pdb'], exit=False)
import numpy as np
np.testing.run_module_suite()
| bsd-3-clause |
xuguozhi/fast-rcnn | lib/fast_rcnn/test.py | 43 | 11975 | # --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""Test a Fast R-CNN network on an imdb (image database)."""
from fast_rcnn.config import cfg, get_output_dir
import argparse
from utils.timer import Timer
import numpy as np
import cv2
import caffe
from utils.cython_nms import nms
import cPickle
import heapq
from utils.blob import im_list_to_blob
import os
def _get_image_blob(im):
"""Converts an image into a network input.
Arguments:
im (ndarray): a color image in BGR order
Returns:
blob (ndarray): a data blob holding an image pyramid
im_scale_factors (list): list of image scales (relative to im) used
in the image pyramid
"""
im_orig = im.astype(np.float32, copy=True)
im_orig -= cfg.PIXEL_MEANS
im_shape = im_orig.shape
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
processed_ims = []
im_scale_factors = []
for target_size in cfg.TEST.SCALES:
im_scale = float(target_size) / float(im_size_min)
# Prevent the biggest axis from being more than MAX_SIZE
if np.round(im_scale * im_size_max) > cfg.TEST.MAX_SIZE:
im_scale = float(cfg.TEST.MAX_SIZE) / float(im_size_max)
im = cv2.resize(im_orig, None, None, fx=im_scale, fy=im_scale,
interpolation=cv2.INTER_LINEAR)
im_scale_factors.append(im_scale)
processed_ims.append(im)
# Create a blob to hold the input images
blob = im_list_to_blob(processed_ims)
return blob, np.array(im_scale_factors)
def _get_rois_blob(im_rois, im_scale_factors):
"""Converts RoIs into network inputs.
Arguments:
im_rois (ndarray): R x 4 matrix of RoIs in original image coordinates
im_scale_factors (list): scale factors as returned by _get_image_blob
Returns:
blob (ndarray): R x 5 matrix of RoIs in the image pyramid
"""
rois, levels = _project_im_rois(im_rois, im_scale_factors)
rois_blob = np.hstack((levels, rois))
return rois_blob.astype(np.float32, copy=False)
def _project_im_rois(im_rois, scales):
"""Project image RoIs into the image pyramid built by _get_image_blob.
Arguments:
im_rois (ndarray): R x 4 matrix of RoIs in original image coordinates
scales (list): scale factors as returned by _get_image_blob
Returns:
rois (ndarray): R x 4 matrix of projected RoI coordinates
levels (list): image pyramid levels used by each projected RoI
"""
im_rois = im_rois.astype(np.float, copy=False)
if len(scales) > 1:
widths = im_rois[:, 2] - im_rois[:, 0] + 1
heights = im_rois[:, 3] - im_rois[:, 1] + 1
areas = widths * heights
scaled_areas = areas[:, np.newaxis] * (scales[np.newaxis, :] ** 2)
diff_areas = np.abs(scaled_areas - 224 * 224)
levels = diff_areas.argmin(axis=1)[:, np.newaxis]
else:
levels = np.zeros((im_rois.shape[0], 1), dtype=np.int)
rois = im_rois * scales[levels]
return rois, levels
def _get_blobs(im, rois):
"""Convert an image and RoIs within that image into network inputs."""
blobs = {'data' : None, 'rois' : None}
blobs['data'], im_scale_factors = _get_image_blob(im)
blobs['rois'] = _get_rois_blob(rois, im_scale_factors)
return blobs, im_scale_factors
def _bbox_pred(boxes, box_deltas):
"""Transform the set of class-agnostic boxes into class-specific boxes
by applying the predicted offsets (box_deltas)
"""
if boxes.shape[0] == 0:
return np.zeros((0, box_deltas.shape[1]))
boxes = boxes.astype(np.float, copy=False)
widths = boxes[:, 2] - boxes[:, 0] + cfg.EPS
heights = boxes[:, 3] - boxes[:, 1] + cfg.EPS
ctr_x = boxes[:, 0] + 0.5 * widths
ctr_y = boxes[:, 1] + 0.5 * heights
dx = box_deltas[:, 0::4]
dy = box_deltas[:, 1::4]
dw = box_deltas[:, 2::4]
dh = box_deltas[:, 3::4]
pred_ctr_x = dx * widths[:, np.newaxis] + ctr_x[:, np.newaxis]
pred_ctr_y = dy * heights[:, np.newaxis] + ctr_y[:, np.newaxis]
pred_w = np.exp(dw) * widths[:, np.newaxis]
pred_h = np.exp(dh) * heights[:, np.newaxis]
pred_boxes = np.zeros(box_deltas.shape)
# x1
pred_boxes[:, 0::4] = pred_ctr_x - 0.5 * pred_w
# y1
pred_boxes[:, 1::4] = pred_ctr_y - 0.5 * pred_h
# x2
pred_boxes[:, 2::4] = pred_ctr_x + 0.5 * pred_w
# y2
pred_boxes[:, 3::4] = pred_ctr_y + 0.5 * pred_h
return pred_boxes
def _clip_boxes(boxes, im_shape):
"""Clip boxes to image boundaries."""
# x1 >= 0
boxes[:, 0::4] = np.maximum(boxes[:, 0::4], 0)
# y1 >= 0
boxes[:, 1::4] = np.maximum(boxes[:, 1::4], 0)
# x2 < im_shape[1]
boxes[:, 2::4] = np.minimum(boxes[:, 2::4], im_shape[1] - 1)
# y2 < im_shape[0]
boxes[:, 3::4] = np.minimum(boxes[:, 3::4], im_shape[0] - 1)
return boxes
def im_detect(net, im, boxes):
"""Detect object classes in an image given object proposals.
Arguments:
net (caffe.Net): Fast R-CNN network to use
im (ndarray): color image to test (in BGR order)
boxes (ndarray): R x 4 array of object proposals
Returns:
scores (ndarray): R x K array of object class scores (K includes
background as object category 0)
boxes (ndarray): R x (4*K) array of predicted bounding boxes
"""
blobs, unused_im_scale_factors = _get_blobs(im, boxes)
# When mapping from image ROIs to feature map ROIs, there's some aliasing
# (some distinct image ROIs get mapped to the same feature ROI).
# Here, we identify duplicate feature ROIs, so we only compute features
# on the unique subset.
if cfg.DEDUP_BOXES > 0:
v = np.array([1, 1e3, 1e6, 1e9, 1e12])
hashes = np.round(blobs['rois'] * cfg.DEDUP_BOXES).dot(v)
_, index, inv_index = np.unique(hashes, return_index=True,
return_inverse=True)
blobs['rois'] = blobs['rois'][index, :]
boxes = boxes[index, :]
# reshape network inputs
net.blobs['data'].reshape(*(blobs['data'].shape))
net.blobs['rois'].reshape(*(blobs['rois'].shape))
blobs_out = net.forward(data=blobs['data'].astype(np.float32, copy=False),
rois=blobs['rois'].astype(np.float32, copy=False))
if cfg.TEST.SVM:
# use the raw scores before softmax under the assumption they
# were trained as linear SVMs
scores = net.blobs['cls_score'].data
else:
# use softmax estimated probabilities
scores = blobs_out['cls_prob']
if cfg.TEST.BBOX_REG:
# Apply bounding-box regression deltas
box_deltas = blobs_out['bbox_pred']
pred_boxes = _bbox_pred(boxes, box_deltas)
pred_boxes = _clip_boxes(pred_boxes, im.shape)
else:
# Simply repeat the boxes, once for each class
pred_boxes = np.tile(boxes, (1, scores.shape[1]))
if cfg.DEDUP_BOXES > 0:
# Map scores and predictions back to the original set of boxes
scores = scores[inv_index, :]
pred_boxes = pred_boxes[inv_index, :]
return scores, pred_boxes
def vis_detections(im, class_name, dets, thresh=0.3):
"""Visual debugging of detections."""
import matplotlib.pyplot as plt
im = im[:, :, (2, 1, 0)]
for i in xrange(np.minimum(10, dets.shape[0])):
bbox = dets[i, :4]
score = dets[i, -1]
if score > thresh:
plt.cla()
plt.imshow(im)
plt.gca().add_patch(
plt.Rectangle((bbox[0], bbox[1]),
bbox[2] - bbox[0],
bbox[3] - bbox[1], fill=False,
edgecolor='g', linewidth=3)
)
plt.title('{} {:.3f}'.format(class_name, score))
plt.show()
def apply_nms(all_boxes, thresh):
"""Apply non-maximum suppression to all predicted boxes output by the
test_net method.
"""
num_classes = len(all_boxes)
num_images = len(all_boxes[0])
nms_boxes = [[[] for _ in xrange(num_images)]
for _ in xrange(num_classes)]
for cls_ind in xrange(num_classes):
for im_ind in xrange(num_images):
dets = all_boxes[cls_ind][im_ind]
if dets == []:
continue
keep = nms(dets, thresh)
if len(keep) == 0:
continue
nms_boxes[cls_ind][im_ind] = dets[keep, :].copy()
return nms_boxes
def test_net(net, imdb):
"""Test a Fast R-CNN network on an image database."""
num_images = len(imdb.image_index)
# heuristic: keep an average of 40 detections per class per images prior
# to NMS
max_per_set = 40 * num_images
# heuristic: keep at most 100 detection per class per image prior to NMS
max_per_image = 100
# detection thresold for each class (this is adaptively set based on the
# max_per_set constraint)
thresh = -np.inf * np.ones(imdb.num_classes)
# top_scores will hold one minheap of scores per class (used to enforce
# the max_per_set constraint)
top_scores = [[] for _ in xrange(imdb.num_classes)]
# all detections are collected into:
# all_boxes[cls][image] = N x 5 array of detections in
# (x1, y1, x2, y2, score)
all_boxes = [[[] for _ in xrange(num_images)]
for _ in xrange(imdb.num_classes)]
output_dir = get_output_dir(imdb, net)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# timers
_t = {'im_detect' : Timer(), 'misc' : Timer()}
roidb = imdb.roidb
for i in xrange(num_images):
im = cv2.imread(imdb.image_path_at(i))
_t['im_detect'].tic()
scores, boxes = im_detect(net, im, roidb[i]['boxes'])
_t['im_detect'].toc()
_t['misc'].tic()
for j in xrange(1, imdb.num_classes):
inds = np.where((scores[:, j] > thresh[j]) &
(roidb[i]['gt_classes'] == 0))[0]
cls_scores = scores[inds, j]
cls_boxes = boxes[inds, j*4:(j+1)*4]
top_inds = np.argsort(-cls_scores)[:max_per_image]
cls_scores = cls_scores[top_inds]
cls_boxes = cls_boxes[top_inds, :]
# push new scores onto the minheap
for val in cls_scores:
heapq.heappush(top_scores[j], val)
# if we've collected more than the max number of detection,
# then pop items off the minheap and update the class threshold
if len(top_scores[j]) > max_per_set:
while len(top_scores[j]) > max_per_set:
heapq.heappop(top_scores[j])
thresh[j] = top_scores[j][0]
all_boxes[j][i] = \
np.hstack((cls_boxes, cls_scores[:, np.newaxis])) \
.astype(np.float32, copy=False)
if 0:
keep = nms(all_boxes[j][i], 0.3)
vis_detections(im, imdb.classes[j], all_boxes[j][i][keep, :])
_t['misc'].toc()
print 'im_detect: {:d}/{:d} {:.3f}s {:.3f}s' \
.format(i + 1, num_images, _t['im_detect'].average_time,
_t['misc'].average_time)
for j in xrange(1, imdb.num_classes):
for i in xrange(num_images):
inds = np.where(all_boxes[j][i][:, -1] > thresh[j])[0]
all_boxes[j][i] = all_boxes[j][i][inds, :]
det_file = os.path.join(output_dir, 'detections.pkl')
with open(det_file, 'wb') as f:
cPickle.dump(all_boxes, f, cPickle.HIGHEST_PROTOCOL)
print 'Applying NMS to all detections'
nms_dets = apply_nms(all_boxes, cfg.TEST.NMS)
print 'Evaluating detections'
imdb.evaluate_detections(nms_dets, output_dir)
| mit |
mikelum/pyspeckit | pyspeckit/spectrum/showspec.py | 1 | 50193 | """
showspec is my homegrown spectrum plotter, meant to somewhat follow STARLINK's
SPLAT and have functionality similar to GAIA, but with an emphasis on producing
publication-quality plots (which, while splat may do, it does unreproducibly)
TO DO:
-add spectrum arithmetic tools
(as is, you can use numpy.interp with sp.vind and sp.spectrum pretty
easily)
-implement other fitters
-e.g., NH3 hyperfine, Voigt
-move to object-oriented pylab/pyplot implementation (for bulk / speedup work)
-allow for non-plotting fitting work (curious... I've never needed that yet)
-Equivalent Width measurement without gaussian fitting
-probably should be part of the baseline code
-write documentation other people can read
"""
import math
import pylab
#from pylab import indices,figure,clf,savefig,plot,legend,text,axes,title,imshow,connect,get_current_fig_manager
from pylab import *
import matplotlib
from mpfit import mpfit
from collapse_gaussfit import *
from ratosexagesimal import *
try:
from astropy.io import fits as pyfits
except ImportError:
import pyfits
import gaussfitter
from numpy import isnan
#from mad import MAD,nanmedian
def steppify(arr,isX=False,interval=0,sign=+1.0):
"""
*support function*
Converts an array to double-length for step plotting
"""
if isX and interval==0:
interval = abs(arr[1]-arr[0]) / 2.0
newarr = array(zip(arr-sign*interval,arr+sign*interval)).ravel()
return newarr
class SpecPlotter:
"""
SpecPlotter class. Takes in a spectrum or data cube, plotting properties,
and a velocity axis determination function. Look at splat_1d for a wrapper
that might actually be useful.
Whew, needs more documentation
"""
def __init__(self, cube, axis=None, xtol=None, ytol=None, vconv=lambda
x: x, xtora=lambda x: x, ytodec=lambda x: x, specname=None,
dv=None, color='k', hdr=None, errspec=None, maskspec=None,
fig=None, fignum=1, clear=True, title=None, xunits='km/s',
erralpha=0.2, ivconv=None, autorefresh=True, reffreq=None,
gaiafignum=0, gaiafig=None, clickid=None, **kwargs ):
self.vconv = vconv
self.xtora = xtora
self.ytodec = ytodec
self.cube = cube # where(numpy.isnan(cube),0,cube)
if len(self.cube.shape) > 1:
self.spectrum = cube[:,0,0] # spectrum is what's plotted; cube is the "raw data"
else:
self.spectrum = cube # spectrum is what's plotted; cube is the "raw data"
self.specname=specname
self.dv=dv
self.reffreq=reffreq
self.scale=1.0
self.units='K'
self.xunits=xunits
self.voff=0.0
self.offset=0.0
self.continuum=0.0
self.errspec = errspec
self.erralpha=erralpha
self.plotcolor=color
self.specfit = Specfit(self)
self.fitspec = self.specfit
self.baseline = Baseline(self)
#self.fft = FFT(self)
#self.psd = self.fft.psd
self.vmin=None
self.vmax=None
self.title=title
self.ivconv=ivconv
self.autorefresh=autorefresh
self.spectrumplot=None
self.errorplot=None
self.gaiafignum = gaiafignum
self.gaiafig = gaiafig
self.clickid = clickid
self.plotkwargs = kwargs
if maskspec is not None:
self.maskspec = maskspec
else:
self.maskspec = zeros(self.cube.shape)
self.linecollections =[]
self.texts =[]
if hdr: self.header = hdr
# figure out where to put the plot
if fig is None and axis is None:
fig=figure(fignum)
if clear: fig.clf()
self.axis = fig.gca()
elif fig is None and axis is None:
self.axis = pylab.gca()
elif fig is not None and axis is None:
if clear: fig.clf()
self.axis = fig.gca()
elif fig is None and axis is not None:
self.axis = axis
else: # if figure and axis are both set, just use axis
self.axis = axis
if clear: self.axis.clear()
def __call__(self, event):
"""
Connects map cube to specplotter...
"""
if event.inaxes:
clickX = event.xdata
clickY = event.ydata
tb = get_current_fig_manager().toolbar
#if ((self.axis is None) or (self.axis==event.inaxes)) and tb.mode=='':
if event.button==1 and tb.mode=='':
print "OverPlotting spectrum from point %i,%i" % (clickX,clickY)
self.plotspec(clickY,clickX,button=event.button,cube=True)
elif event.button==2:
print "Plotting spectrum from point %i,%i" % (clickX,clickY)
self.plotspec(clickY,clickX,button=event.button,cube=True,clear=True)
elif event.button==3:
print "Disconnecting GAIA-like tool"
self.gaiafig.canvas.mpl_disconnect(self.clickid)
else:
print "Call failed for some reason: "
print "event: ",event
def plotspec(self, i=0, j=0, cube=False, title=None,
button=1, clear=False,color=None, continuum=None,
axis=None, offset=None, scale=None, voff=None, vmin=None,
vmax=None, units=None, xunits=None, erralpha=None, plotpix=False,
errstyle='fill', autorefresh=None, **kwargs):
"""
Plot a spectrum
Originally written to plot spectra from data cubes, hence the i,j parameter
to specify the location in the cube
"""
if kwargs.has_key('fignum'): kwargs.pop('fignum') # HACK because I want __init__ to accept different kwargs
if kwargs.has_key('fig'): kwargs.pop('fig') # is there a better workaround?
if scale is not None: self.scale = scale
if units is not None: self.units = units
if xunits is not None: self.xunits= xunits
if voff is not None: self.voff = voff
if offset is not None: self.offset= offset
if continuum is not None: self.continuum= continuum
if color is not None: self.plotcolor=color
if erralpha is not None: self.erralpha= erralpha
if vmax is not None: self.vmax = vmax
if vmin is not None: self.vmin = vmin
if title is not None: self.title = title
if autorefresh is not None: self.autorefresh = autorefresh
if axis is None: axis=self.axis # allow spectrum to be plotted on other axis
if clear: axis.clear()
if plotpix:
self.vind = arange(self.cube.shape[0])
else:
self.vind = self.vconv(arange(self.cube.shape[0])) + self.voff
if kwargs.has_key('fignum'): kwargs.pop('fignum')
if kwargs.has_key('linewidth'):
linewidth = kwargs.pop('linewidth')
else:
linewidth=0.5
if cube or len(self.cube.shape) == 3:
self.spectrum = self.cube[:,i,j]*self.scale+self.continuum-self.baseline.basespec
self.spectrumplot = axis.plot(self.vind,self.spectrum+self.offset,color=self.plotcolor,
linestyle='steps-mid',linewidth=linewidth,
**kwargs)
else:
if self.maskspec.sum() > 0:
nanmask = where(self.maskspec,numpy.nan,1)
self.spectrum = self.cube*self.scale*nanmask+self.continuum-self.baseline.basespec
self.spectrumplot = axis.plot(self.vind,self.spectrum+self.offset,color=self.plotcolor,
linestyle='steps-mid',linewidth=linewidth,
**kwargs)
else:
self.spectrum = self.cube*self.scale+self.continuum-self.baseline.basespec
self.spectrumplot = axis.plot(self.vind,self.spectrum+self.offset,color=self.plotcolor,
linestyle='steps-mid',linewidth=linewidth,
**kwargs)
if self.errspec is not None:
if errstyle == 'fill':
self.errorplot = [axis.fill_between(steppify(self.vind,isX=True,sign=sign(self.dv)),
steppify(self.spectrum+self.offset-self.errspec*self.scale),
steppify(self.spectrum+self.offset+self.errspec*self.scale),
facecolor=self.plotcolor, alpha=self.erralpha, **kwargs)]
elif errstyle == 'bars':
self.errorplot = axis.errorbar(self.vind, self.spectrum+self.offset,
yerr=self.errspec*self.scale, ecolor=self.plotcolor, fmt=None,
**kwargs)
if vmin is not None: xlo = self.vmin
else: xlo=self.vind.min()
if vmax is not None: xhi = self.vmax
else: xhi=self.vind.max()
axis.set_xlim(xlo,xhi)
if self.title is not None:
axis.set_title(self.title)
elif self.xtora and self.ytodec:
axis.set_title("Spectrum at %s %s" %
(ratos(self.xtora(i)),dectos(self.ytodec(j))))
elif self.specname:
axis.set_title("Spectrum of %s" % self.specname)
if isinstance(self.xunits,str):
axis.set_xlabel(self.xunits)
else:
axis.set_xlabel("V$_{LSR}$ (km s$^{-1}$)")
self.xunits = 'km/s'
if units in ['Ta*','Tastar','K']:
axis.set_ylabel("$T_A^*$ (K)")
elif units == 'mJy':
axis.set_ylabel("$S_\\nu$ (mJy)")
elif units == 'Jy':
axis.set_ylabel("$S_\\nu$ (Jy)")
else:
axis.set_ylabel(self.units)
if self.autorefresh: self.refresh()
def save(self,fname,**kwargs):
"""
Save the current spectrum (useful for saving baselined data)
"""
newfile = pyfits.PrimaryHDU(data=self.cube,header=self.header)
newfile.writeto(fname,**kwargs)
def savefig(self,fname,bbox_inches='tight',**kwargs):
"""
simple wrapper of maplotlib's savefig.
"""
self.axis.figure.savefig(fname,bbox_inches=bbox_inches,**kwargs)
def showlines(self,linefreqs,linenames,ctype='freq',cunit='hz',yscale=0.8,vofflines=0.0,
voffunit='km/s',**kwargs):
"""
Overplot vertical lines and labels at the frequencies (or velocities) of each line
yscale - fraction of maximum at which to label
"""
self.clearlines()
if ctype != 'freq':
print "Sorry, non-frequency units not implemented yet."
return
speedoflight=2.99792458e5
if self.reffreq and self.xunits in ('km/s','m/s'):
linefreqs = -(array(linefreqs)-self.reffreq)/self.reffreq * speedoflight
if 'hz' in cunit or 'Hz' in cunit:
linefreqs *= (1.0 + vofflines / speedoflight)
else:
linefreqs += vofflines
ymax = (self.spectrum[self.spectrum==self.spectrum]).max()
for lf,ln in zip(linefreqs,linenames):
if lf < self.vind.max() and lf > self.vind.min():
self.linecollections.append(vlines(lf,0,ymax,**kwargs))
self.texts.append(text(lf,ymax*yscale,ln,rotation='vertical',**kwargs))
if self.autorefresh: self.refresh()
def clearlines(self):
if len(self.texts) > 0:
for T in self.texts:
if T in self.axis.texts:
self.axis.texts.remove(T)
if len(self.linecollections) > 0:
for LC in self.linecollections:
if LC in self.axis.collections:
self.axis.collections.remove(LC)
def refresh(self):
self.axis.figure.canvas.draw()
class FFT:
def __init__(self,specplotter,fignum=3,axis=None, color='k'):
self.specplotter=specplotter
if axis is None:
self.fignum=fignum
self.figure=figure(self.fignum)
self.axis=gca()
else:
self.axis=axis
self.figure=self.axis.figure
self.fignum=None
#self.axis.clear()
self.color=color
self.fftplot=None
self.setspec()
self.setshift()
self.clear()
def __call__(self,psd=False,shift=True):
self.setspec()
if psd:
self.psd(shift=shift)
else:
self.fft(shift=shift)
def fft(self,shift=True,logplot=False,**kwargs):
self.clear()
self.setshift(shift)
if logplot: self.axis.set_yscale('log')
else: self.axis.set_yscale('linear')
self.fftspec = fft(self.spectofft)
self.realfft = self.fftspec.real
self.imagfft = self.fftspec.imag
self.fftplot = self.axis.plot(self.shiftfunc(self.realfft),
drawstyle='steps-mid',color=self.color,**kwargs)
self.refresh()
def psd(self,logplot=True,shift=True,**kwargs):
self.clear()
if logplot: self.axis.set_yscale('log')
else: self.axis.set_yscale('linear')
self.setshift(shift)
self.psdspec = fft(self.spectofft) * fft(self.spectofft[::-1])
self.psdreal = abs(self.psdspec)
self.fftplot = self.axis.plot(self.shiftfunc(self.psdreal),
drawstyle='steps-mid',color=self.color,**kwargs)
if logplot: self.axis.set_yscale('log')
else: self.axis.set_yscale('linear')
self.refresh()
def setshift(self,shift=True):
if shift: self.shiftfunc = fftshift
else: self.shiftfunc = lambda x: x
def setspec(self):
self.spectofft = copy(self.specplotter.spectrum)
OKmask = (self.spectofft==self.spectofft)
self.spectofft[(~OKmask)] = 0
def clear(self):
if self.fftplot is not None:
for p in self.fftplot:
p.set_visible(False)
if p in self.axis.lines: self.axis.lines.remove(p)
self.axis.clear()
self.refresh()
def refresh(self):
self.axis.figure.canvas.draw()
class PSD(FFT):
def __call__(self,shift=True):
self.setspec()
self.setshift(shift)
self.clear()
self.psd()
self.refresh()
class Baseline:
def __init__(self,specplotter):
self.baselinepars = None
self.order = None
self.basespec = zeros(specplotter.spectrum.shape[0])
self.excludemask = zeros(specplotter.spectrum.shape[0],dtype='bool')
self.OKmask = ones(specplotter.spectrum.shape[0],dtype='bool')
self.specplotter = specplotter
self.blleg = None
self.click = 0
self.nclicks_b1 = 0
self.nclicks_b2 = 0
self.fitregion=[]
self.excludevelo = []
self.excludepix = []
def __call__(self, order=1, annotate=False, excludefit=False, save=True,
exclude=None, exclusionlevel=0.01,
interactive=False, **kwargs):
"""
Fit and remove a polynomial from the spectrum.
It will be saved in the variable "self.basespec"
and the fit parameters will be saved in "self.order"
function baseline(spectrum,xarr=None,xmin=None,xmax=None,order=1,quiet=True,exclude=None):
Subtract a baseline from a spectrum
If xmin,xmax are not specified, defaults to ignoring first and last 10% of spectrum
exclude is a set of start/end indices to ignore when baseline fitting
(ignored by setting error to infinite in fitting procedure)
excludefit creates a mask based on the fitted gaussian model (assuming
that it has a zero-height) using an exclusion level of (exclusionlevel)
* the smallest gaussian peak that was fit
"basespec" is added back to the spectrum before fitting so you can run this
procedure multiple times without losing information
"""
specfit = self.specplotter.specfit
self.order = order
fitp = zeros(self.order+1)
self.spectofit = self.specplotter.spectrum+self.basespec
self.OKmask = (self.spectofit==self.spectofit)
if exclude == 'interactive' or interactive:
self.excludemask[:] = True
self.excludevelo = []
self.excludepix = []
self.click = self.specplotter.axis.figure.canvas.mpl_connect('button_press_event',self.selectregion)
else:
if excludefit and specfit.modelpars is not None:
#vlo = self.specplotter.specfit.modelpars[1] - 2*self.specplotter.specfit.modelpars[2]
#vhi = self.specplotter.specfit.modelpars[1] + 2*self.specplotter.specfit.modelpars[2]
#exclude = [argmin(abs(self.specplotter.vind-vlo)),argmin(abs(self.specplotter.vind-vhi))]
specfit.fullsizemodel() # make sure the spectrum is the right size
self.excludemask = abs(specfit.model) > exclusionlevel*abs(min(specfit.modelpars[0::3]))
else:
self.excludemask[:] = False
self.dofit(exclude=exclude,annotate=annotate,**kwargs)
if save: self.savefit()
def dofit(self, exclude=None, excludeunits='velo', annotate=False,
**kwargs):
"""
Do the baseline fitting and save and plot the results.
Can specify a region to exclude using velocity units or pixel units
"""
if exclude is not None and excludeunits in ['velo','km/s']:
if len(exclude) % 2 == 0:
self.excludevelo = exclude
self.excludepix = []
for vl,vu in zip(exclude[::2],exclude[1::2]):
xl = argmin(abs(self.specplotter.vind-vl))
xu = argmin(abs(self.specplotter.vind-vu))
if xl > xu: xl,xu=xu,xl
self.excludemask[xl:xu] = True
self.excludepix += [xl,xu]
elif excludeunits in ['pix','pixel','chan','channel']:
if len(exclude) % 2 == 0:
self.excludepix = []
for xl,xu in zip(exclude[::2],exclude[1::2]):
if xl > xu: xl,xu=xu,xl
self.excludemask[xl:xu] = True
self.excludepix += [xl,xu]
self.specplotter.spectrum, self.baselinepars = baseline(
self.spectofit,
xarr=self.specplotter.vind,
order=self.order, exclude=None,
mask=(~self.OKmask)+self.excludemask,
**kwargs)
self.basespec = poly1d(self.baselinepars)(self.specplotter.vind)
if self.specplotter.spectrumplot is not None:
[self.specplotter.axis.lines.remove(p) for p in self.specplotter.spectrumplot]
if self.specplotter.errorplot is not None:
[self.specplotter.axis.collections.remove(p) for p in self.specplotter.errorplot if isinstance(p,matplotlib.collections.PolyCollection)]
[self.specplotter.axis.lines.remove(p) for p in self.specplotter.errorplot if isinstance(p,matplotlib.lines.Line2D)]
self.specplotter.plotspec(**self.specplotter.plotkwargs)
self.specplotter.axis.set_ylim(
abs(self.specplotter.spectrum[self.OKmask].min())*1.1*sign(self.specplotter.spectrum[self.OKmask].min()),
abs(self.specplotter.spectrum[self.OKmask].max())*1.1*sign(self.specplotter.spectrum[self.OKmask].max()))
if annotate: self.annotate() # refreshes automatically
elif self.specplotter.autorefresh: self.specplotter.refresh()
def selectregion(self,event,annotate=False):
"""
select regions for baseline fitting
"""
if event.button == 1:
if self.nclicks_b1 == 0:
self.bx1 = argmin(abs(event.xdata-self.specplotter.vind))
self.excludevelo += [self.specplotter.vind]
self.excludepix += [self.bx1]
self.nclicks_b1 += 1
elif self.nclicks_b1 == 1:
self.bx2 = argmin(abs(event.xdata-self.specplotter.vind))
self.nclicks_b1 -= 1
if self.bx1 > self.bx2: self.bx1,self.bx2 = self.bx2,self.bx1
self.fitregion += self.specplotter.axis.plot(
self.specplotter.vind[self.bx1:self.bx2],
self.specplotter.spectrum[self.bx1:self.bx2]+self.specplotter.offset,
drawstyle='steps-mid',
color='g',alpha=0.5)
self.specplotter.refresh()
self.excludemask[self.bx1:self.bx2] = False
self.excludevelo += [self.specplotter.vind]
self.excludepix += [self.bx2]
if event.button in [2,3]:
disconnect(self.click)
self.dofit(exclude=None,annotate=annotate)
for p in self.fitregion:
p.set_visible(False)
self.specplotter.axis.lines.remove(p)
self.fitregion=[] # I should be able to just remove from the list... but it breaks the loop...
self.specplotter.refresh()
def annotate(self,loc='upper left'):
bltext = "bl: $y=$"+"".join(["$%+6.3gx^{%i}$" % (f,self.order-i)
for i,f in enumerate(self.baselinepars)])
#self.blleg = text(xloc,yloc ,bltext,transform = self.specplotter.axis.transAxes)
self.clearlegend()
pl = matplotlib.collections.CircleCollection([0],edgecolors=['k'])
self.blleg = self.specplotter.axis.legend(
(pl,),
(bltext,),loc=loc,markerscale=0.001,
borderpad=0.1, handlelength=0.1, handletextpad=0.1
)
self.specplotter.axis.add_artist(self.blleg)
if self.specplotter.autorefresh: self.specplotter.refresh()
def clearlegend(self):
if self.blleg is not None:
self.blleg.set_visible(False)
if self.blleg in self.specplotter.axis.artists:
self.specplotter.axis.artists.remove(self.blleg)
if self.specplotter.autorefresh: self.specplotter.refresh()
def savefit(self):
if self.baselinepars is not None:
for ii,p in enumerate(self.baselinepars):
self.specplotter.header.update('BLCOEF%0.2i' % (ii),p,comment="Baseline power-law best-fit coefficient x^%i" % (self.order-ii-1))
class Specfit:
def __init__(self,specplotter):
self.model = None
self.modelpars = None
self.modelerrs = None
self.modelplot = None
self.guessplot = []
self.fitregion = []
self.ngauss = 0
self.nclicks_b1 = 0
self.nclicks_b2 = 0
self.gx1 = 0
self.gx2 = specplotter.spectrum.shape[0]
self.guesses = []
self.click = 0
self.fitkwargs = {}
self.auto = False
self.autoannotate = True
self.specplotter = specplotter
self.gaussleg=None
self.residuals=None
self.setfitspec()
#self.seterrspec()
def __call__(self, interactive=False, usemoments=True, fitcolor='r',
multifit=None, guesses=None, annotate=True, save=True,
**kwargs):
"""
Fit gaussians to a spectrum
guesses = [height,amplitude,center,width]
"""
if multifit:
log.warn("The multifit keyword has been deprecated", warnings.DeprecationWarning)
self.fitcolor = fitcolor
self.clear()
self.ngauss = 0
self.fitkwargs = kwargs
if interactive:
print "Left-click twice to select a fitting range, then middle-click twice to select a peak and width"
self.nclicks_b1 = 0
self.nclicks_b2 = 0
self.guesses = []
self.click = self.specplotter.axis.figure.canvas.mpl_connect('button_press_event',self.makeguess)
self.autoannotate = annotate
else:
if guesses is None:
print "You must input guesses when using multifit. Also, baseline first!"
else:
self.guesses = guesses
self.multifit()
self.autoannotate = annotate
# else:
# #print "Non-interactive, 1D fit with automatic guessing"
# if self.specplotter.baseline.order is None:
# self.specplotter.baseline.order=0
# self.onedfit(usemoments=usemoments,annotate=annotate,**kwargs)
# else:
# self.onedfit(usemoments=usemoments,annotate=annotate,
# vheight=False,height=0.0,**kwargs)
# if self.specplotter.autorefresh: self.specplotter.refresh()
if save: self.savefit()
def seterrspec(self,usestd=None,useresiduals=True):
if self.specplotter.errspec is not None and not usestd:
self.errspec = self.specplotter.errspec
elif self.residuals is not None and useresiduals:
self.errspec = ones(self.spectofit.shape[0]) * self.residuals.std()
else: self.errspec = ones(self.spectofit.shape[0]) * self.spectofit.std()
def setfitspec(self):
self.spectofit = copy(self.specplotter.spectrum)
OKmask = (self.spectofit==self.spectofit)
self.spectofit[(~OKmask)] = 0
self.seterrspec()
self.errspec[(~OKmask)] = 1e10
def multifit(self):
self.ngauss = len(self.guesses)/3
self.setfitspec()
if self.fitkwargs.has_key('negamp'): self.fitkwargs.pop('negamp')
mpp,model,mpperr,chi2 = gaussfitter.multigaussfit(
self.specplotter.vind[self.gx1:self.gx2],
self.spectofit[self.gx1:self.gx2],
err=self.errspec[self.gx1:self.gx2],
ngauss=self.ngauss,
params=self.guesses,
**self.fitkwargs)
self.chi2 = chi2
self.dof = self.gx2-self.gx1-self.ngauss*3
self.model = model
self.modelpars = mpp.tolist()
self.modelerrs = mpperr.tolist()
self.modelplot = self.specplotter.axis.plot(
self.specplotter.vind[self.gx1:self.gx2],
self.model+self.specplotter.offset, color=self.fitcolor, linewidth=0.5)
self.residuals = self.spectofit[self.gx1:self.gx2] - self.model
if self.autoannotate:
self.annotate()
def onedfit(self, usemoments=True, annotate=True, vheight=True, height=0, negamp=None,**kwargs):
self.ngauss = 1
self.auto = True
self.setfitspec()
if usemoments: # this can be done within gaussfit but I want to save them
self.guesses = gaussfitter.onedmoments(
self.specplotter.vind[self.gx1:self.gx2],
self.spectofit[self.gx1:self.gx2],
vheight=vheight,negamp=negamp,**kwargs)
if vheight is False: self.guesses = [height]+self.guesses
else:
if negamp: self.guesses = [height,-1,0,1]
else: self.guesses = [height,1,0,1]
mpp,model,mpperr,chi2 = gaussfitter.onedgaussfit(
self.specplotter.vind[self.gx1:self.gx2],
self.spectofit[self.gx1:self.gx2],
err=self.errspec[self.gx1:self.gx2],
vheight=vheight,
params=self.guesses,
**self.fitkwargs)
self.chi2 = chi2
self.dof = self.gx2-self.gx1-self.ngauss*3-vheight
if vheight:
self.specplotter.baseline.baselinepars = mpp[:1] # first item in list form
self.model = model - mpp[0]
else: self.model = model
self.residuals = self.spectofit[self.gx1:self.gx2] - self.model
self.modelpars = mpp[1:].tolist()
self.modelerrs = mpperr[1:].tolist()
self.modelplot = self.specplotter.axis.plot(
self.specplotter.vind[self.gx1:self.gx2],
self.model+self.specplotter.offset, color=self.fitcolor, linewidth=0.5)
if annotate:
self.annotate()
if vheight: self.specplotter.baseline.annotate()
def fullsizemodel(self):
"""
If the gaussian was fit to a sub-region of the spectrum,
expand it (with zeros) to fill the spectrum. You can
always recover the original by:
origmodel = model[gx1:gx2]
"""
if self.model.shape != self.specplotter.spectrum.shape:
temp = zeros(self.specplotter.spectrum.shape)
temp[self.gx1:self.gx2] = self.model
self.model = temp
self.residuals = self.spectofit - self.model
def plotresiduals(self,fig=None,axis=None,clear=True,**kwargs):
"""
Plot residuals of the fit. Specify a figure or
axis; defaults to figure(2).
kwargs are passed to matplotlib plot
"""
if axis is None:
fig=figure(2)
self.residualaxis = gca()
if clear: self.residualaxis.clear()
else:
self.residualaxis = axis
if clear: self.residualaxis.clear()
self.residualplot = self.residualaxis.plot(self.specplotter.vind[self.gx1:self.gx2],
self.residuals,drawstyle='steps-mid',
linewidth=0.5, color='k', **kwargs)
if self.specplotter.vmin is not None and self.specplotter.vmax is not None:
self.residualaxis.set_xlim(self.specplotter.vmin,self.specplotter.vmax)
self.residualaxis.figure.canvas.draw()
def annotate(self,loc='upper right'):
#text(xloc,yloc ,"c=%g" % self.modelpars[1],transform = self.specplotter.axis.transAxes)
#text(xloc,yloc-0.05,"w=%g" % self.modelpars[2],transform = self.specplotter.axis.transAxes)
#text(xloc,yloc-0.10,"a=%g" % self.modelpars[0],transform = self.specplotter.axis.transAxes)
self.clearlegend()
pl = matplotlib.collections.CircleCollection([0],edgecolors=['k'])
self.gaussleg = self.specplotter.axis.legend(
tuple([pl]*3*self.ngauss),
tuple(flatten(
[("c%i=%6.4g $\\pm$ %6.4g" % (jj,self.modelpars[1+jj*3],self.modelerrs[1+jj*3]),
"w%i=%6.4g $\\pm$ %6.4g" % (jj,self.modelpars[2+jj*3],self.modelerrs[2+jj*3]),
"a%i=%6.4g $\\pm$ %6.4g" % (jj,self.modelpars[0+jj*3],self.modelerrs[0+jj*3]))
for jj in range(self.ngauss)])),
loc=loc,markerscale=0.01,
borderpad=0.1, handlelength=0.1, handletextpad=0.1
)
self.gaussleg.draggable(True)
self.specplotter.axis.add_artist(self.gaussleg)
if self.specplotter.autorefresh: self.specplotter.refresh()
def selectregion(self,event):
if self.nclicks_b1 == 0:
self.gx1 = argmin(abs(event.xdata-self.specplotter.vind))
self.nclicks_b1 += 1
elif self.nclicks_b1 == 1:
self.gx2 = argmin(abs(event.xdata-self.specplotter.vind))
self.nclicks_b1 -= 1
if self.gx1 > self.gx2: self.gx1,self.gx2 = self.gx2,self.gx1
if abs(self.gx1-self.gx2) > 3: # can't fit w/ fewer data than pars
self.fitregion = self.specplotter.axis.plot(
self.specplotter.vind[self.gx1:self.gx2],
self.specplotter.spectrum[self.gx1:self.gx2]+self.specplotter.offset,
drawstyle='steps-mid',
color='c')
if self.guesses == []:
self.guesses = gaussfitter.onedmoments(
self.specplotter.vind[self.gx1:self.gx2],
self.spectofit[self.gx1:self.gx2],
vheight=0)
self.ngauss = 1
self.auto = True
else:
print "Fitting region is too small (channels %i:%i). Try again." % (self.gx1,self.gx2)
def guesspeakwidth(self,event):
if self.nclicks_b2 % 2 == 0:
if self.auto:
self.guesses[:2] = [event.ydata,event.xdata]
else:
self.guesses += [event.ydata,event.xdata,1]
self.ngauss += 1
self.nclicks_b2 += 1
self.guessplot += [self.specplotter.axis.scatter(event.xdata,event.ydata,marker='x',c='r')]
elif self.nclicks_b2 % 2 == 1:
self.guesses[-1] = abs(event.xdata-self.guesses[-2])
self.nclicks_b2 += 1
self.guessplot += self.specplotter.axis.plot([event.xdata,
2*self.guesses[-2]-event.xdata],[event.ydata]*2,
color='r')
if self.auto:
self.auto = False
if self.nclicks_b2 / 2 > self.ngauss:
print "There have been %i middle-clicks but there are only %i gaussians" % (self.nclicks_b2,self.ngauss)
self.ngauss += 1
def clear(self,legend=True):
if self.modelplot is not None:
for p in self.modelplot:
p.set_visible(False)
if legend: self.clearlegend()
def makeguess(self,event):
if event.button == 1:
self.selectregion(event)
elif event.button == 2:
self.guesspeakwidth(event)
elif event.button == 3:
disconnect(self.click)
if self.ngauss > 0:
print len(self.guesses)/3," Guesses: ",self.guesses," X channel range: ",self.gx1,self.gx2
if len(self.guesses) % 3 == 0:
self.multifit()
for p in self.guessplot + self.fitregion:
p.set_visible(False)
else:
print "error, wrong # of pars"
if self.specplotter.autorefresh: self.specplotter.refresh()
def clearlegend(self):
if self.gaussleg is not None:
self.gaussleg.set_visible(False)
if self.gaussleg in self.specplotter.axis.artists:
self.specplotter.axis.artists.remove(self.gaussleg)
if self.specplotter.autorefresh: self.specplotter.refresh()
def savefit(self):
if self.modelpars is not None:
for ii,p in enumerate(self.modelpars):
if ii % 3 == 0: self.specplotter.header.update('AMP%1i' % (ii/3),p,comment="Gaussian best fit amplitude #%i" % (ii/3))
if ii % 3 == 1: self.specplotter.header.update('CEN%1i' % (ii/3),p,comment="Gaussian best fit center #%i" % (ii/3))
if ii % 3 == 2: self.specplotter.header.update('WID%1i' % (ii/3),p,comment="Gaussian best fit width #%i" % (ii/3))
def mapplot(plane,cube,vconv=lambda x: x,xtora=lambda x: x,ytodec=lambda x: x, gaiafignum=0, specfignum=1):
gaiafig = figure(gaiafignum)
gaiafig.clf()
gaiaax = gaiafig.add_subplot(111)
gaiaax.imshow(plane)
sp = SpecPlotter(cube, vconv=vconv, xtora=xtora, ytodec=ytodec,
gaiafignum=gaiafignum, fignum=specfignum, gaiafig=gaiafig)
sp.clickid = gaiafig.canvas.mpl_connect('button_press_event',sp)
#connect('button_press_event',sp)
def splat_3d(filename,xi=0,yi=0,vmin=None,vmax=None,button=1,dobaseline=False,exclude=None,
smooth=None,smoothto=None,smoothtype='gaussian',order=1,savepre=None,**kwargs):
"""
Inputs:
vmin,vmax - range over which to baseline and plottransform = ax.transAxes
exclude - (internal) range to exclude from baseline fit
"""
dv,v0,p3,hdr,cube,xtora,ytodec,vconv,xunits,conversion_factor,units = open_3d(filename)
if units is None: units="UNITS"
if xunits is None: xunits="km/s"
if conversion_factor == 0 or conversion_factor is None: conversion_factor=1.0
sp = splat_1d(vpars=[dv, v0, p3], hdr=hdr, spec=cube[:, yi, xi],
xtora=xtora, ytodec=ytodec, vconv=vconv, units=units,
conversion_factor=conversion_factor, xunits=xunits, **kwargs)
sp.cube = cube
return sp
def gaia(filename,estimator='max',axis=0):
f = pyfits.open(filename)
hdr = f[0].header
cube = f[0].data
dv,v0,p3 = hdr.get('CD3_3'),hdr.get('CRVAL3'),hdr.get('CRPIX3')
dr,r0,p1 = hdr.get('CD1_1'),hdr.get('CRVAL1'),hdr.get('CRPIX1')
dd,d0,p2 = hdr.get('CD2_2'),hdr.get('CRVAL2'),hdr.get('CRPIX2')
if dv is None: dv = hdr.get('CDELT3')
if dr is None: dr = hdr.get('CDELT1')
if dd is None: dd = hdr.get('CDELT2')
xtora = lambda x: (x-p1+1)*dr+r0 # convert pixel coordinates to RA/Dec/Velocity
ytodec = lambda y: (y-p2+1)*dd+d0
vconv = lambda v: (v-p3+1)*dv+v0
if axis > 0:
cube = cube.swapaxes(0,axis)
if estimator == 'max':
p = where(isnan(cube),0,cube).max(axis=0)
elif estimator == 'int':
p = where(isnan(cube),0,cube).sum(axis=0) * dv
elif estimator == 'intdivmax':
cut = MAD(cube.ravel()) + nanmedian(cube.ravel())
if cut < 0:
cut = 0
m = where(isnan(cube),0,cube).max(axis=0)
i = where(isnan(cube),0,cube).sum(axis=0) * dv
p = where(i<0,0,i)/where(m<=cut,numpy.inf,m)
elif estimator[-5:] == ".fits":
p = pyfits.open(estimator)[0].data
mapplot(p,cube,vconv,xtora,ytodec)
def baseline_file(filename,outfilename,vmin=None,vmax=None,order=1,crop=False):
f = pyfits.open(filename)
hdr = f[0].header
cube = f[0].data.squeeze()
dv,v0,p3 = hdr.get('CD3_3'),hdr.get('CRVAL3'),hdr.get('CRPIX3')
dr,r0,p1 = hdr.get('CD1_1'),hdr.get('CRVAL1'),hdr.get('CRPIX1')
dd,d0,p2 = hdr.get('CD2_2'),hdr.get('CRVAL2'),hdr.get('CRPIX2')
if dv is None: dv = hdr.get('CDELT3')
if dr is None: dr = hdr.get('CDELT1')
if dd is None: dd = hdr.get('CDELT2')
vconv = lambda v: (v-p3+1)*dv+v0
varr = vconv(arange(cube.shape[-1]))
if vmin is None: argvmin = None
else: argvmin = argmin(abs(varr-vmin))
if vmax is None: argvmax = None
else: argvmax = argmin(abs(varr-vmax))
bspec,bfit = baseline(cube,vmin=argvmin,vmax=argvmax,order=order)
def baseline(spectrum,xarr=None,xmin='default',xmax='default',order=1,quiet=True,exclude=None,
mask=None):
"""
Subtract a baseline from a spectrum
If xmin,xmax are not specified, defaults to ignoring first and last 10% of spectrum
*unless* order > 1, in which case ignoring the ends tends to cause strange effects
exclude is a set of start/end indices to ignore when baseline fitting
(ignored by setting error to infinite in fitting procedure)
"""
if xmin == 'default':
if order <= 1: xmin = floor( spectrum.shape[-1]*0.1 )
else: xmin = 0
elif xmin is None:
xmin = 0
if xmax == 'default':
if order <= 1: xmax = ceil( spectrum.shape[-1]*0.9 )
else: xmax = spectrum.shape[-1]
elif xmax is None:
xmax = spectrum.shape[-1]
pguess = [1]*(order+1)
if xarr is None:
xarr = indices(spectrum.shape).squeeze()
subxarr = xarr[xmin:xmax]
def mpfitfun(data,err):
def f(p,fjac=None): return [0,numpy.ravel((poly1d(p)(subxarr)-data)/err)]
return f
err = ones(spectrum.shape)
if exclude is not None:
err[exclude[0]:exclude[1]] = 1e10
if mask is not None:
if mask.dtype.name != 'bool': mask = mask.astype('bool')
err[mask] = 1e10
spectrum[mask] = 0
if (spectrum!=spectrum).sum() > 0:
print "There is an error in baseline: some values are NaN"
import pdb; pdb.set_trace()
mp = mpfit(mpfitfun(spectrum[xmin:xmax],err[xmin:xmax]),xall=pguess,quiet=quiet)
fitp = mp.params
bestfit = poly1d(fitp)(xarr).squeeze()
return (spectrum-bestfit),fitp
def open_3d(filename):
f = pyfits.open(filename)
hdr = f[0].header
cube = f[0].data
if len(cube.shape) == 4: cube=cube[0,:,:,:]
#cube = reshape(cube.mean(axis=2).mean(axis=1),[cube.shape[0],1,1])
dv,v0,p3 = hdr.get('CD3_3'),hdr.get('CRVAL3'),hdr.get('CRPIX3')
dr,r0,p1 = hdr.get('CD1_1'),hdr.get('CRVAL1'),hdr.get('CRPIX1')
dd,d0,p2 = hdr.get('CD2_2'),hdr.get('CRVAL2'),hdr.get('CRPIX2')
if dv is None: dv = hdr.get('CDELT3')
if dr is None: dr = hdr.get('CDELT1')
if dd is None: dd = hdr.get('CDELT2')
xtora = lambda x: (x-p1+1)*dr+r0 # convert pixel coordinates to RA/Dec/Velocity
ytodec = lambda y: (y-p2+1)*dd+d0
vconv = lambda v: (v-p3+1)*dv+v0
if hdr.get('CUNIT3') in ['m/s','M/S']:
conversion_factor = 1000.0
xunits = 'km/s' # change to km/s because you're converting units
else:
xunits = hdr.get('CUNIT3')
if xunits in ("hz","Hz"):
print "Converting from Hz to GHz"
xunits = "GHz"
conversion_factor = 1.0e9
else:
conversion_factor = 1.0
units = hdr.get('BUNIT')
return dv,v0,p3,hdr,cube,xtora,ytodec,vconv,xunits,conversion_factor,units
def open_1d(filename,specnum=0,wcstype='',errspecnum=None,maskspecnum=None):
"""
Grabs all the relevant pieces of a 1d spectrum for plotting
wcstype is the suffix on the WCS type to get to velocity/frequency/whatever
"""
f = pyfits.open(filename)
hdr = f[0].header
spec = f[0].data
errspec = None
maskspec = None
if hdr.get('NAXIS') == 2:
if errspecnum is not None:
errspec = spec[errspecnum,:]
if maskspecnum is not None:
maskspec = spec[maskspecnum,:]
if isinstance(specnum,list):
spec = spec[specnum,:].mean(axis=0)
elif isinstance(specnum,int):
spec = spec[specnum,:]
else:
raise TypeError("Specnum is of wrong type (not a list of integers or an integer). Type: %s" %
str(type(specnum)))
elif hdr.get('NAXIS') > 2:
raise ValueError("Too many axes for open_1d (splat_1d) - use cube instead")
if hdr.get('CD1_1'+wcstype):
dv,v0,p3 = hdr['CD1_1'+wcstype],hdr['CRVAL1'+wcstype],hdr['CRPIX1'+wcstype]
else:
dv,v0,p3 = hdr['CDELT1'+wcstype],hdr['CRVAL1'+wcstype],hdr['CRPIX1'+wcstype]
if hdr.get('OBJECT'):
specname = hdr.get('OBJECT')
elif hdr.get('GLON') and hdr.get('GLAT'):
specname = "%s %s" % (hdr.get('GLON'),hdr.get('GLAT'))
else:
specname = filename.rstrip(".fits")
if hdr.get('CUNIT1'+wcstype) in ['m/s','M/S']:
conversion_factor = 1000.0
xunits = 'km/s' # change to km/s because you're converting units
else:
xunits = hdr.get('CUNIT1'+wcstype)
if xunits in ("hz","Hz"):
print "Converting from Hz to GHz"
xunits = "GHz"
conversion_factor = 1.0e9
else:
conversion_factor = 1.0
vconv = lambda v: ((v-p3+1)*dv+v0)/conversion_factor
xtora=None
ytodec=None
units = hdr.get('BUNIT').strip()
if hdr.get('CTYPE1'+wcstype):
xtype = hdr.get('CTYPE1'+wcstype)
else:
xtype = 'VLSR'
if hdr.get('REFFREQ'+wcstype):
reffreq = hdr.get('REFFREQ'+wcstype)
else:
reffreq = None
return dv,v0,p3,conversion_factor,hdr,spec,vconv,xtora,ytodec,specname,units,xunits,errspec,maskspec,reffreq
def splat_1d(filename=None,vmin=None,vmax=None,button=1,dobaseline=False,
exclude=None,smooth=None,order=1,savepre=None,vcrop=True,
vconv=None,vpars=None,hdr=None,spec=None,xtora=None,ytodec=None,
specname=None,quiet=True,specnum=0,errspecnum=None,wcstype='',
offset=0.0, continuum=0.0, annotatebaseline=False, plotspectrum=True,
smoothto=None, xunits=None, units=None, conversion_factor=None,
smoothtype='gaussian',convmode='valid',maskspecnum=None,**kwargs):
"""
Wrapper for specplotter creation. Works nicely with 1D spectra with well-defined
FITS headers (i.e., CRVAL1, CRPIX1, CDELT1, and optionally CUNIT1 and CTYPE1)
This documentation needs to be updated a lot... I implemented a lot of features
without documenting them, which was a mistake
Inputs:
vmin,vmax - range over which to baseline and plot
exclude - (internal) range to exclude from baseline fit
vcrop - will vmin/vmax crop out data, or just set the plot limits?
"""
if (vpars and vconv and hdr and spec is not None and xtora and ytodec
and units and xunits and conversion_factor):
dv,v0,p3 = vpars
errspec = None
maskspec = None
reffreq = None
if units is None and kwargs.has_key('units'): units = kwargs.pop('units')
else:
dv,v0,p3,conversion_factor,hdr,spec,vconv,xtora,ytodec,specname_file,units,xunits,errspec,maskspec,reffreq = \
open_1d(filename,specnum=specnum,wcstype=wcstype,errspecnum=errspecnum,maskspecnum=maskspecnum)
if specname is None: specname=specname_file
if units is None and kwargs.has_key('units'): units = kwargs.pop('units')
if type(continuum)==type('str'):
if hdr.get(continuum) is not None:
continuum = hdr.get(continuum)
else:
raise ValueError("Continuum specified but none present.")
varr = vconv(arange(spec.shape[0]))
if vmin is None or vcrop==False: argvmin = 0
else:
argvmin = argmin(abs(varr-vmin))
if dv > 0:
hdr.update('CRPIX1'+wcstype,p3-argvmin)
if vmax is None or vcrop==False: argvmax = spec.shape[0]
else:
argvmax = argmin(abs(varr-vmax))
if dv < 0:
hdr.update('CRPIX1'+wcstype,p3-argvmax)
if argvmin > argvmax:
argvmin,argvmax = argvmax,argvmin
#if exclude is not None: exclude = exclude[::-1]
elif argvmin == argvmax:
raise Exception("Error: no data in velocity range %g:%g for source %s."
% (vmin,vmax,filename))
# these lines were meant to automatically put "exclude" into velocity
# units; this is now done in the baseline code
#if exclude is not None:
# exclude[0] = argmin(abs(varr-exclude[0]))
# exclude[1] = argmin(abs(varr-exclude[1]))
# exclude = array(exclude) - argvmin
vconv = lambda v: ((v-p3+argvmin+1)*dv+v0) / conversion_factor
ivconv = lambda V: p3-1-argvmin+(V*conversion_factor-v0)/dv
specplot = spec[argvmin:argvmax]
if errspec is not None: errspec=errspec[argvmin:argvmax]
if maskspec is not None: maskspec=maskspec[argvmin:argvmax]
if smoothto:
smooth = abs(smoothto/dv)
if smooth:
roundsmooth = round(smooth) # can only downsample by integers
# change fitter first
if smoothtype == 'hanning':
specplot = convolve(specplot,hanning(2+roundsmooth)/hanning(2+roundsmooth).sum(),convmode)[::roundsmooth]
kernsize = smooth
ones_sameshape = zeros(smooth+2)
ones_sameshape[1:-1] = 1
elif smoothtype == 'boxcar':
specplot = convolve(specplot,ones(roundsmooth)/float(roundsmooth),convmode)[::roundsmooth]
kernsize = roundsmooth
ones_sameshape = ones(roundsmooth)
elif smoothtype == 'gaussian':
speclen = specplot.shape[0]
xkern = linspace(-1*smooth,smooth,smooth*3)
kernel = exp(-xkern**2/(2*(smooth/sqrt(8*log(2)))**2))
kernel /= kernel.sum()
kernsize = len(kernel)
specplot = convolve(specplot,kernel,convmode)[::roundsmooth]
ones_sameshape = zeros(roundsmooth*3)
ones_sameshape[roundsmooth:-roundsmooth] = 1
if errspec is not None:
errspec = sqrt(convolve(errspec**2,ones_sameshape,convmode)[::roundsmooth]) / float(roundsmooth)
if maskspec is not None:
maskspec = array(convolve(maskspec,ones_sameshape,convmode)[::roundsmooth],dtype='bool')
if maskspec.shape != specplot.shape: import pdb; pdb.set_trace()
# this bit of code may also make sense, but I'm shifting the center pixel instead
# b/c it's easier (?) to deal with velocity range
#v0 += (abs(dv)*smooth - abs(dv))/2.0 # pixel center moves by half the original pixel size
dv *= roundsmooth
if convmode == 'same':
newrefpix = (p3-argvmin)/roundsmooth
elif convmode == 'full':
newrefpix = (p3-0.5-argvmin+kernsize/2.0)/roundsmooth
elif convmode == 'valid':
newrefpix = (p3-0.5-argvmin-kernsize/2.0)/roundsmooth
# this was resolved by advanced guess-and check
# but also, sort of makes sense: FITS refers to the *center* of a pixel. You want to
# shift 1/2 pixel to the right so that the first pixel goes from 0 to 1
vconv = lambda v: ((v-newrefpix)*dv+v0)/conversion_factor
ivconv = lambda V: newrefpix+(V*conversion_factor-v0)/dv
hdr.update('CRPIX1'+wcstype,newrefpix+1)
hdr.update('CDELT1'+wcstype,dv)
sp = SpecPlotter(specplot, vconv=vconv, xtora=xtora, ytodec=ytodec,
specname=specname, dv=dv/conversion_factor, hdr=hdr, reffreq=reffreq,
errspec=errspec, maskspec=maskspec, xunits=xunits, **kwargs)
if plotspectrum:
sp.plotspec(button=button, cube=False, vmin=vmin, vmax=vmax,
units=units, offset=offset, continuum=continuum,
**kwargs)
if dobaseline:
sp.baseline(exclude=exclude,order=order,quiet=quiet,annotate=annotatebaseline)
if plotspectrum: sp.refresh()
if hdr.get('GLON') and hdr.get('GLAT'):
sp.glon = hdr.get('GLON')
sp.glat = hdr.get('GLAT')
if savepre is not None:
glon,glat = sp.glon,sp.glat
if glat < 0: pm=""
else: pm = "+"
savename = savepre + "G%07.3f%0s%07.3f_" % (glon,pm,glat) + hdr.get('MOLECULE').replace(' ','') + hdr.get('TRANSITI').replace(' ','')
savefig(savename+'.png')
return sp
def splat_tspec(filename,specnum=0,**kwargs):
"""
Same as splat_1d for tspec data
"""
tdata = pyfits.getdata(filename)
theader = pyfits.getheader(filename)
if len(tdata.shape) == 3:
tdata = tdata[specnum,:,:]
wavelength = tdata[0,:]
spectrum = tdata[1,:]
error = tdata[2,:]
vconv = lambda x: wavelength[x]
ivconv = lambda x: argmin(abs(wavelength-x))
specname='TSPEC'
dv = median(wavelength[1:] - wavelength[:-1])
sp = SpecPlotter(spectrum,vconv=vconv,specname=specname,dv=dv,hdr=theader)
sp.plotspec(cube=False,units=theader.get('YUNITS'),xunits=theader.get('XUNITS'),**kwargs)
return sp
| mit |
lbishal/scikit-learn | sklearn/datasets/tests/test_mldata.py | 384 | 5221 | """Test functionality of mldata fetching utilities."""
import os
import shutil
import tempfile
import scipy as sp
from sklearn import datasets
from sklearn.datasets import mldata_filename, fetch_mldata
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_not_in
from sklearn.utils.testing import mock_mldata_urlopen
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import with_setup
from sklearn.utils.testing import assert_array_equal
tmpdir = None
def setup_tmpdata():
# create temporary dir
global tmpdir
tmpdir = tempfile.mkdtemp()
os.makedirs(os.path.join(tmpdir, 'mldata'))
def teardown_tmpdata():
# remove temporary dir
if tmpdir is not None:
shutil.rmtree(tmpdir)
def test_mldata_filename():
cases = [('datasets-UCI iris', 'datasets-uci-iris'),
('news20.binary', 'news20binary'),
('book-crossing-ratings-1.0', 'book-crossing-ratings-10'),
('Nile Water Level', 'nile-water-level'),
('MNIST (original)', 'mnist-original')]
for name, desired in cases:
assert_equal(mldata_filename(name), desired)
@with_setup(setup_tmpdata, teardown_tmpdata)
def test_download():
"""Test that fetch_mldata is able to download and cache a data set."""
_urlopen_ref = datasets.mldata.urlopen
datasets.mldata.urlopen = mock_mldata_urlopen({
'mock': {
'label': sp.ones((150,)),
'data': sp.ones((150, 4)),
},
})
try:
mock = fetch_mldata('mock', data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data"]:
assert_in(n, mock)
assert_equal(mock.target.shape, (150,))
assert_equal(mock.data.shape, (150, 4))
assert_raises(datasets.mldata.HTTPError,
fetch_mldata, 'not_existing_name')
finally:
datasets.mldata.urlopen = _urlopen_ref
@with_setup(setup_tmpdata, teardown_tmpdata)
def test_fetch_one_column():
_urlopen_ref = datasets.mldata.urlopen
try:
dataname = 'onecol'
# create fake data set in cache
x = sp.arange(6).reshape(2, 3)
datasets.mldata.urlopen = mock_mldata_urlopen({dataname: {'x': x}})
dset = fetch_mldata(dataname, data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "data"]:
assert_in(n, dset)
assert_not_in("target", dset)
assert_equal(dset.data.shape, (2, 3))
assert_array_equal(dset.data, x)
# transposing the data array
dset = fetch_mldata(dataname, transpose_data=False, data_home=tmpdir)
assert_equal(dset.data.shape, (3, 2))
finally:
datasets.mldata.urlopen = _urlopen_ref
@with_setup(setup_tmpdata, teardown_tmpdata)
def test_fetch_multiple_column():
_urlopen_ref = datasets.mldata.urlopen
try:
# create fake data set in cache
x = sp.arange(6).reshape(2, 3)
y = sp.array([1, -1])
z = sp.arange(12).reshape(4, 3)
# by default
dataname = 'threecol-default'
datasets.mldata.urlopen = mock_mldata_urlopen({
dataname: (
{
'label': y,
'data': x,
'z': z,
},
['z', 'data', 'label'],
),
})
dset = fetch_mldata(dataname, data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "z"]:
assert_in(n, dset)
assert_not_in("x", dset)
assert_not_in("y", dset)
assert_array_equal(dset.data, x)
assert_array_equal(dset.target, y)
assert_array_equal(dset.z, z.T)
# by order
dataname = 'threecol-order'
datasets.mldata.urlopen = mock_mldata_urlopen({
dataname: ({'y': y, 'x': x, 'z': z},
['y', 'x', 'z']), })
dset = fetch_mldata(dataname, data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "z"]:
assert_in(n, dset)
assert_not_in("x", dset)
assert_not_in("y", dset)
assert_array_equal(dset.data, x)
assert_array_equal(dset.target, y)
assert_array_equal(dset.z, z.T)
# by number
dataname = 'threecol-number'
datasets.mldata.urlopen = mock_mldata_urlopen({
dataname: ({'y': y, 'x': x, 'z': z},
['z', 'x', 'y']),
})
dset = fetch_mldata(dataname, target_name=2, data_name=0,
data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "x"]:
assert_in(n, dset)
assert_not_in("y", dset)
assert_not_in("z", dset)
assert_array_equal(dset.data, z)
assert_array_equal(dset.target, y)
# by name
dset = fetch_mldata(dataname, target_name='y', data_name='z',
data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "x"]:
assert_in(n, dset)
assert_not_in("y", dset)
assert_not_in("z", dset)
finally:
datasets.mldata.urlopen = _urlopen_ref
| bsd-3-clause |
akhilpm/Masters-Project | visual_analysis/tSNE_Embedding/visualize.py | 1 | 4037 | import time
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import offsetbox
from sklearn import manifold, datasets, decomposition, ensemble
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import confusion_matrix
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import accuracy_score
def visualize_digits(trainX):
""" plot the raw digits """
n_img_per_row = 10
img = np.zeros((30 * n_img_per_row, 30 * n_img_per_row))
print img.shape
for i in range(n_img_per_row):
ix = 30 * i + 1
for j in range(n_img_per_row):
iy = 30 * j + 1
img[ix:ix + 28, iy:iy + 28] = np.transpose(trainX[i * n_img_per_row + j].reshape((28, 28)))
plt.imshow(img, cmap=plt.cm.binary)
plt.xticks([])
plt.yticks([])
plt.title('MNIST RAND visualization')
plt.show()
# Scale and visualize the embedding vectors
def plot_embedding(X, Y, title=None):
x_min, x_max = np.min(X, 0), np.max(X, 0)
X = (X - x_min) / (x_max - x_min)
plt.figure()
ax = plt.subplot(111)
for i in range(X.shape[0]):
plt.text(X[i, 0], X[i, 1], str(Y[i]), color=plt.cm.Set1(Y[i] / 10.), fontdict={'weight': 'bold', 'size': 9})
'''
if hasattr(offsetbox, 'AnnotationBbox'):
# only print thumbnails with matplotlib > 1.0
shown_images = np.array([[1., 1.]]) # just something big
for i in range(X.shape[0]):
dist = np.sum((X[i] - shown_images) ** 2, 1)
if np.min(dist) < 4e-3:
# don't show points that are too close
continue
shown_images = np.r_[shown_images, [X[i]]]
imagebox = offsetbox.AnnotationBbox(offsetbox.OffsetImage(digits.images[i], cmap=plt.cm.gray_r),X[i])
ax.add_artist(imagebox)
'''
plt.xticks([]), plt.yticks([])
if title is not None:
plt.title(title)
def compute_tSNE_embedding(trainX, testX):
# t-SNE embedding of the digits dataset
print("Computing t-SNE embedding")
tsne = manifold.TSNE(n_components=10, init='pca', random_state=0)
t0 = time.time()
X_train = tsne.fit_transform(trainX)
print('Training set embedding Time : %f Minutes\n' %((time.time()-t0)/60))
t0 = time.time()
X_test = tsne.fit_transform(testX[0:10000])
print('Testing set embedding Time : %f Minutes\n' %((time.time()-t0)/60))
np.save('X_train', X_train)
np.save('X_test', X_test)
return X_train, X_test
def compute_spectral_embedding(trainX, testX):
''' SpectralEmbedding of digits dataset '''
print("Computing Spectral embedding")
embedder = manifold.SpectralEmbedding(n_components=2, random_state=0, eigen_solver="arpack")
t0 = time.time()
X_train = embedder.fit_transform(trainX)
print('Training set embedding Time : %f Minutes\n' %((time.time()-t0)/60))
t0 = time.time()
X_test = embedder.fit_transform(testX)
print('Testing set embedding Time : %f Minutes\n' %((time.time()-t0)/60))
np.save('X_train', X_train)
np.save('X_test', X_test)
return X_train, X_test
def main():
#set the timer
start = time.time()
#load the data
trainX = np.load('trainX_feat.npy')
print trainX.shape
testX = np.load('testX_feat.npy')
trainY = np.load('trainY_feat.npy')
testY = np.load('testY_feat.npy')
testX = testX[0:10000]
testY = testY[0:10000]
#plot images of the digits
#visualize_digits(trainX)
#X_train = np.load('X_train.npy')
#print X_train.shape
#X_test = np.load('X_test.npy')
X_train, X_test = compute_tSNE_embedding(trainX, testX)
#plot_embedding(X_train, trainY, "tsne embedding of the digits (time %.2fs)" %(time.time() - start))
#plt.show()
parameters = {'n_neighbors' : list(np.arange(20)+1)}
clf = GridSearchCV(KNeighborsClassifier(weights='distance', n_jobs=-1), parameters)
#clf = svm.SVC(kernel=kernel.arc_cosine, cache_size=2048)
clf.fit(X_train, trainY)
pred = clf.predict(X_test)
print accuracy_score(testY, pred)
print confusion_matrix(testY, pred)
#print(clf.best_params_)
print('total : %d, correct : %d, incorrect : %d\n' %(len(pred), np.sum(pred == testY), np.sum(pred != testY)))
print('Test Time : %f Minutes\n' %((time.time()-start)/60))
if __name__ == '__main__':
main() | mit |
csaladenes/blog | kendo romania/scripts/members_loader.py | 4 | 5619 | import pandas as pd, numpy as np, json
import clubs_loader
nyears=8
def get_members(path):
members=pd.read_excel(path,header=[1])
members=members[[231, 'Nr. EKF',
'Club', 'Unnamed: 3',
'Numele', 'Prenumele',
'Gen', 'Data naşterii',
'1 kyu','practică',
'1 dan', '2 dan',
'3 dan', '4 dan',
'5 dan', '6 dan',
'7 dan', '8 dan',
151,
152, '152.1',
175, 179,
197,214,'231.1']]
members.columns=list(members.columns[:-nyears])+list(range(2019-nyears,2019))
return members
def get_transfer(name,tf,verbose=False):
if tf==[]:
return tf
else:
to_blank=[' ','(',')','Transfer:','?','/']
to_replace={'Hungary':'HUN'}
to_year={'Gușu Rebeca':'2010'}
def get_tf_clubs(z):
for t in range(len(to_blank)):
z=z.replace(to_blank[t],'')
for t in to_replace:
z=z.replace(t,to_replace[t])
if ('=>') in z:
from_to=z.find('=>')
to_return={'from':z[from_to-3:from_to],'to':z[from_to+2:from_to+5],'time':z[-4:]}
if verbose:
to_return['orig']=z
else:
print('error with transfer',z,)
to_return=z
##check years
#infer year from wrong note order
if '20' not in to_return['time']:
if '20' in z:
to_return['time']=z[z.find('20'):z.find('20')+4]
#if still not inferred, then manual fix
if '20' not in to_return['time']:
to_return['time']=to_year[name]
to_return['time']=int(to_return['time'])
return to_return
transfers=str(tf).split('\n')
tfr=[]
for i in transfers:
if not i in ('','nan'):
tfr.append(get_tf_clubs(i))
return sorted(tfr, key=lambda k: k['time'])
def cleaner(members):
data={}
replace_active={'Activ':'Active','Inactiv':'Inactive','Free':'Inactive','AS':'Abroad',
'Transferat':'Abroad','Decedat':'Inactive'}
active_redflags=['Deleted']
for i in members.T.iteritems():
active=i[1][231]
if active not in active_redflags:
grades=i[1][['1 kyu','1 dan','2 dan','3 dan','4 dan','5 dan',
'6 dan','7 dan','8 dan']].replace('x',pd.NaT).dropna()
grades0=i[1][['1 dan','2 dan','3 dan','4 dan','5 dan',
'6 dan','7 dan','8 dan']].replace('x',pd.NaT).dropna()
df=pd.DataFrame(grades0)
df.columns=['dan']
df=df.reset_index().set_index('dan').sort_index()
dummy={}
grades=pd.to_datetime(grades.astype(str))
active=replace_active[active]
if len(grades)>0:
mingrade=grades.min().year
maxgrade=grades.max().year
else:
mingrade=2016 #default starting year
maxgrade=2016
if active=='Active':
maxyear=2019 #default active max year
else:
maxyear=min(maxgrade+4,2019) #default active years grace period, if unknown
dummy['name']=i[1]['Numele']+' '+i[1]['Prenumele']
dummy['birth']=str(i[1]['Data naşterii'])[:10]
dummy['gen']=i[1]['Gen']
dummy['ekf']=i[1]['Nr. EKF']
club=i[1]['Club']
dummy['transfer']=get_transfer(dummy['name'],i[1]['Unnamed: 3'])
for year in range(mingrade,maxyear):
if year==maxyear-1:
dummy['active']=active
else:
dummy['active']='Active'
#get year from exams
dummy['dan']=len(df[:str
(year)])
#get active from member list
for j in i[1][-nyears:].index:
if year==j:
if i[1][-nyears:][j]=='Da':
dummy['active']=active
else:
dummy['active']='Inactive'
#get club from transfers
clubs=clubs_loader.get_club_by_year(dummy['transfer'],club,year,mingrade,maxyear)
clubs=clubs[:1] #remove this step to double count. this limits to first club in transfer years
for j in range(len(clubs)):
iclub=clubs_loader.replacer(clubs[j])
dummy['club']=iclub
dummy['age']=year-1-pd.to_datetime(dummy['birth']).year
data=clubs_loader.add_to_club(data,iclub,year,dummy.copy())
all_data=[]
for club in data:
for year in data[club]:
df=pd.DataFrame(data[club][year])
df['year']=year
df['club']=club
df=df.drop('transfer',axis=1)
all_data.append(df)
return pd.concat(all_data).reset_index(drop=True) | mit |
cauchycui/scikit-learn | sklearn/preprocessing/__init__.py | 31 | 1235 | """
The :mod:`sklearn.preprocessing` module includes scaling, centering,
normalization, binarization and imputation methods.
"""
from .data import Binarizer
from .data import KernelCenterer
from .data import MinMaxScaler
from .data import MaxAbsScaler
from .data import Normalizer
from .data import RobustScaler
from .data import StandardScaler
from .data import add_dummy_feature
from .data import binarize
from .data import normalize
from .data import scale
from .data import robust_scale
from .data import maxabs_scale
from .data import minmax_scale
from .data import OneHotEncoder
from .data import PolynomialFeatures
from .label import label_binarize
from .label import LabelBinarizer
from .label import LabelEncoder
from .label import MultiLabelBinarizer
from .imputation import Imputer
__all__ = [
'Binarizer',
'Imputer',
'KernelCenterer',
'LabelBinarizer',
'LabelEncoder',
'MultiLabelBinarizer',
'MinMaxScaler',
'MaxAbsScaler',
'Normalizer',
'OneHotEncoder',
'RobustScaler',
'StandardScaler',
'add_dummy_feature',
'PolynomialFeatures',
'binarize',
'normalize',
'scale',
'robust_scale',
'maxabs_scale',
'minmax_scale',
'label_binarize',
]
| bsd-3-clause |
ilo10/scikit-learn | sklearn/semi_supervised/tests/test_label_propagation.py | 307 | 1974 | """ test the label propagation module """
import nose
import numpy as np
from sklearn.semi_supervised import label_propagation
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
ESTIMATORS = [
(label_propagation.LabelPropagation, {'kernel': 'rbf'}),
(label_propagation.LabelPropagation, {'kernel': 'knn', 'n_neighbors': 2}),
(label_propagation.LabelSpreading, {'kernel': 'rbf'}),
(label_propagation.LabelSpreading, {'kernel': 'knn', 'n_neighbors': 2})
]
def test_fit_transduction():
samples = [[1., 0.], [0., 2.], [1., 3.]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
nose.tools.assert_equal(clf.transduction_[2], 1)
def test_distribution():
samples = [[1., 0.], [0., 1.], [1., 1.]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
if parameters['kernel'] == 'knn':
continue # unstable test; changes in k-NN ordering break it
assert_array_almost_equal(clf.predict_proba([[1., 0.0]]),
np.array([[1., 0.]]), 2)
else:
assert_array_almost_equal(np.asarray(clf.label_distributions_[2]),
np.array([.5, .5]), 2)
def test_predict():
samples = [[1., 0.], [0., 2.], [1., 3.]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
assert_array_equal(clf.predict([[0.5, 2.5]]), np.array([1]))
def test_predict_proba():
samples = [[1., 0.], [0., 1.], [1., 2.5]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
assert_array_almost_equal(clf.predict_proba([[1., 1.]]),
np.array([[0.5, 0.5]]))
| bsd-3-clause |
karstenw/nodebox-pyobjc | examples/Extended Application/matplotlib/examples/lines_bars_and_markers/scatter_with_legend.py | 1 | 1323 | """
===========================
Scatter plots with a legend
===========================
Also demonstrates how transparency of the markers
can be adjusted by giving ``alpha`` a value between
0 and 1.
"""
import matplotlib.pyplot as plt
from numpy.random import rand
# nodebox section
if __name__ == '__builtin__':
# were in nodebox
import os
import tempfile
W = 800
inset = 20
size(W, 600)
plt.cla()
plt.clf()
plt.close('all')
def tempimage():
fob = tempfile.NamedTemporaryFile(mode='w+b', suffix='.png', delete=False)
fname = fob.name
fob.close()
return fname
imgx = 20
imgy = 0
def pltshow(plt, dpi=150):
global imgx, imgy
temppath = tempimage()
plt.savefig(temppath, dpi=dpi)
dx,dy = imagesize(temppath)
w = min(W,dx)
image(temppath,imgx,imgy,width=w)
imgy = imgy + dy + 20
os.remove(temppath)
size(W, HEIGHT+dy+40)
else:
def pltshow(mplpyplot):
mplpyplot.show()
# nodebox section end
fig, ax = plt.subplots()
for color in ['red', 'green', 'blue']:
n = 750
x, y = rand(2, n)
scale = 200.0 * rand(n)
ax.scatter(x, y, c=color, s=scale, label=color,
alpha=0.3, edgecolors='none')
ax.legend()
ax.grid(True)
pltshow(plt)
| mit |
nmartensen/pandas | pandas/util/_decorators.py | 3 | 9679 | from pandas.compat import callable, signature
from pandas._libs.lib import cache_readonly # noqa
import types
import warnings
from textwrap import dedent
from functools import wraps, update_wrapper
def deprecate(name, alternative, alt_name=None, klass=None,
stacklevel=2):
"""
Return a new function that emits a deprecation warning on use.
Parameters
----------
name : str
Name of function to deprecate
alternative : str
Name of function to use instead
alt_name : str, optional
Name to use in preference of alternative.__name__
klass : Warning, default FutureWarning
stacklevel : int, default 2
"""
alt_name = alt_name or alternative.__name__
klass = klass or FutureWarning
def wrapper(*args, **kwargs):
msg = "{name} is deprecated. Use {alt_name} instead".format(
name=name, alt_name=alt_name)
warnings.warn(msg, klass, stacklevel=stacklevel)
return alternative(*args, **kwargs)
return wrapper
def deprecate_kwarg(old_arg_name, new_arg_name, mapping=None, stacklevel=2):
"""
Decorator to deprecate a keyword argument of a function.
Parameters
----------
old_arg_name : str
Name of argument in function to deprecate
new_arg_name : str
Name of preferred argument in function
mapping : dict or callable
If mapping is present, use it to translate old arguments to
new arguments. A callable must do its own value checking;
values not found in a dict will be forwarded unchanged.
Examples
--------
The following deprecates 'cols', using 'columns' instead
>>> @deprecate_kwarg(old_arg_name='cols', new_arg_name='columns')
... def f(columns=''):
... print(columns)
...
>>> f(columns='should work ok')
should work ok
>>> f(cols='should raise warning')
FutureWarning: cols is deprecated, use columns instead
warnings.warn(msg, FutureWarning)
should raise warning
>>> f(cols='should error', columns="can\'t pass do both")
TypeError: Can only specify 'cols' or 'columns', not both
>>> @deprecate_kwarg('old', 'new', {'yes': True, 'no': False})
... def f(new=False):
... print('yes!' if new else 'no!')
...
>>> f(old='yes')
FutureWarning: old='yes' is deprecated, use new=True instead
warnings.warn(msg, FutureWarning)
yes!
"""
if mapping is not None and not hasattr(mapping, 'get') and \
not callable(mapping):
raise TypeError("mapping from old to new argument values "
"must be dict or callable!")
def _deprecate_kwarg(func):
@wraps(func)
def wrapper(*args, **kwargs):
old_arg_value = kwargs.pop(old_arg_name, None)
if old_arg_value is not None:
if mapping is not None:
if hasattr(mapping, 'get'):
new_arg_value = mapping.get(old_arg_value,
old_arg_value)
else:
new_arg_value = mapping(old_arg_value)
msg = ("the {old_name}={old_val!r} keyword is deprecated, "
"use {new_name}={new_val!r} instead"
).format(old_name=old_arg_name,
old_val=old_arg_value,
new_name=new_arg_name,
new_val=new_arg_value)
else:
new_arg_value = old_arg_value
msg = ("the '{old_name}' keyword is deprecated, "
"use '{new_name}' instead"
).format(old_name=old_arg_name,
new_name=new_arg_name)
warnings.warn(msg, FutureWarning, stacklevel=stacklevel)
if kwargs.get(new_arg_name, None) is not None:
msg = ("Can only specify '{old_name}' or '{new_name}', "
"not both").format(old_name=old_arg_name,
new_name=new_arg_name)
raise TypeError(msg)
else:
kwargs[new_arg_name] = new_arg_value
return func(*args, **kwargs)
return wrapper
return _deprecate_kwarg
# Substitution and Appender are derived from matplotlib.docstring (1.1.0)
# module http://matplotlib.org/users/license.html
class Substitution(object):
"""
A decorator to take a function's docstring and perform string
substitution on it.
This decorator should be robust even if func.__doc__ is None
(for example, if -OO was passed to the interpreter)
Usage: construct a docstring.Substitution with a sequence or
dictionary suitable for performing substitution; then
decorate a suitable function with the constructed object. e.g.
sub_author_name = Substitution(author='Jason')
@sub_author_name
def some_function(x):
"%(author)s wrote this function"
# note that some_function.__doc__ is now "Jason wrote this function"
One can also use positional arguments.
sub_first_last_names = Substitution('Edgar Allen', 'Poe')
@sub_first_last_names
def some_function(x):
"%s %s wrote the Raven"
"""
def __init__(self, *args, **kwargs):
if (args and kwargs):
raise AssertionError("Only positional or keyword args are allowed")
self.params = args or kwargs
def __call__(self, func):
func.__doc__ = func.__doc__ and func.__doc__ % self.params
return func
def update(self, *args, **kwargs):
"""
Update self.params with supplied args.
If called, we assume self.params is a dict.
"""
self.params.update(*args, **kwargs)
@classmethod
def from_params(cls, params):
"""
In the case where the params is a mutable sequence (list or dictionary)
and it may change before this class is called, one may explicitly use a
reference to the params rather than using *args or **kwargs which will
copy the values and not reference them.
"""
result = cls()
result.params = params
return result
class Appender(object):
"""
A function decorator that will append an addendum to the docstring
of the target function.
This decorator should be robust even if func.__doc__ is None
(for example, if -OO was passed to the interpreter).
Usage: construct a docstring.Appender with a string to be joined to
the original docstring. An optional 'join' parameter may be supplied
which will be used to join the docstring and addendum. e.g.
add_copyright = Appender("Copyright (c) 2009", join='\n')
@add_copyright
def my_dog(has='fleas'):
"This docstring will have a copyright below"
pass
"""
def __init__(self, addendum, join='', indents=0):
if indents > 0:
self.addendum = indent(addendum, indents=indents)
else:
self.addendum = addendum
self.join = join
def __call__(self, func):
func.__doc__ = func.__doc__ if func.__doc__ else ''
self.addendum = self.addendum if self.addendum else ''
docitems = [func.__doc__, self.addendum]
func.__doc__ = dedent(self.join.join(docitems))
return func
def indent(text, indents=1):
if not text or not isinstance(text, str):
return ''
jointext = ''.join(['\n'] + [' '] * indents)
return jointext.join(text.split('\n'))
def make_signature(func):
"""
Returns a string repr of the arg list of a func call, with any defaults.
Examples
--------
>>> def f(a,b,c=2) :
>>> return a*b*c
>>> print(_make_signature(f))
a,b,c=2
"""
spec = signature(func)
if spec.defaults is None:
n_wo_defaults = len(spec.args)
defaults = ('',) * n_wo_defaults
else:
n_wo_defaults = len(spec.args) - len(spec.defaults)
defaults = ('',) * n_wo_defaults + spec.defaults
args = []
for i, (var, default) in enumerate(zip(spec.args, defaults)):
args.append(var if default == '' else var + '=' + repr(default))
if spec.varargs:
args.append('*' + spec.varargs)
if spec.keywords:
args.append('**' + spec.keywords)
return args, spec.args
class docstring_wrapper(object):
"""
Decorator to wrap a function and provide
a dynamically evaluated doc-string.
Parameters
----------
func : callable
creator : callable
return the doc-string
default : str, optional
return this doc-string on error
"""
_attrs = ['__module__', '__name__',
'__qualname__', '__annotations__']
def __init__(self, func, creator, default=None):
self.func = func
self.creator = creator
self.default = default
update_wrapper(
self, func, [attr for attr in self._attrs
if hasattr(func, attr)])
def __get__(self, instance, cls=None):
# we are called with a class
if instance is None:
return self
# we want to return the actual passed instance
return types.MethodType(self, instance)
def __call__(self, *args, **kwargs):
return self.func(*args, **kwargs)
@property
def __doc__(self):
try:
return self.creator()
except Exception as exc:
msg = self.default or str(exc)
return msg
| bsd-3-clause |
mehdidc/scikit-learn | examples/applications/plot_tomography_l1_reconstruction.py | 45 | 5463 | """
======================================================================
Compressive sensing: tomography reconstruction with L1 prior (Lasso)
======================================================================
This example shows the reconstruction of an image from a set of parallel
projections, acquired along different angles. Such a dataset is acquired in
**computed tomography** (CT).
Without any prior information on the sample, the number of projections
required to reconstruct the image is of the order of the linear size
``l`` of the image (in pixels). For simplicity we consider here a sparse
image, where only pixels on the boundary of objects have a non-zero
value. Such data could correspond for example to a cellular material.
Note however that most images are sparse in a different basis, such as
the Haar wavelets. Only ``l/7`` projections are acquired, therefore it is
necessary to use prior information available on the sample (its
sparsity): this is an example of **compressive sensing**.
The tomography projection operation is a linear transformation. In
addition to the data-fidelity term corresponding to a linear regression,
we penalize the L1 norm of the image to account for its sparsity. The
resulting optimization problem is called the :ref:`lasso`. We use the
class :class:`sklearn.linear_model.Lasso`, that uses the coordinate descent
algorithm. Importantly, this implementation is more computationally efficient
on a sparse matrix, than the projection operator used here.
The reconstruction with L1 penalization gives a result with zero error
(all pixels are successfully labeled with 0 or 1), even if noise was
added to the projections. In comparison, an L2 penalization
(:class:`sklearn.linear_model.Ridge`) produces a large number of labeling
errors for the pixels. Important artifacts are observed on the
reconstructed image, contrary to the L1 penalization. Note in particular
the circular artifact separating the pixels in the corners, that have
contributed to fewer projections than the central disk.
"""
print(__doc__)
# Author: Emmanuelle Gouillart <emmanuelle.gouillart@nsup.org>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from scipy import ndimage
from sklearn.linear_model import Lasso
from sklearn.linear_model import Ridge
import matplotlib.pyplot as plt
def _weights(x, dx=1, orig=0):
x = np.ravel(x)
floor_x = np.floor((x - orig) / dx)
alpha = (x - orig - floor_x * dx) / dx
return np.hstack((floor_x, floor_x + 1)), np.hstack((1 - alpha, alpha))
def _generate_center_coordinates(l_x):
l_x = float(l_x)
X, Y = np.mgrid[:l_x, :l_x]
center = l_x / 2.
X += 0.5 - center
Y += 0.5 - center
return X, Y
def build_projection_operator(l_x, n_dir):
""" Compute the tomography design matrix.
Parameters
----------
l_x : int
linear size of image array
n_dir : int
number of angles at which projections are acquired.
Returns
-------
p : sparse matrix of shape (n_dir l_x, l_x**2)
"""
X, Y = _generate_center_coordinates(l_x)
angles = np.linspace(0, np.pi, n_dir, endpoint=False)
data_inds, weights, camera_inds = [], [], []
data_unravel_indices = np.arange(l_x ** 2)
data_unravel_indices = np.hstack((data_unravel_indices,
data_unravel_indices))
for i, angle in enumerate(angles):
Xrot = np.cos(angle) * X - np.sin(angle) * Y
inds, w = _weights(Xrot, dx=1, orig=X.min())
mask = np.logical_and(inds >= 0, inds < l_x)
weights += list(w[mask])
camera_inds += list(inds[mask] + i * l_x)
data_inds += list(data_unravel_indices[mask])
proj_operator = sparse.coo_matrix((weights, (camera_inds, data_inds)))
return proj_operator
def generate_synthetic_data():
""" Synthetic binary data """
rs = np.random.RandomState(0)
n_pts = 36.
x, y = np.ogrid[0:l, 0:l]
mask_outer = (x - l / 2) ** 2 + (y - l / 2) ** 2 < (l / 2) ** 2
mask = np.zeros((l, l))
points = l * rs.rand(2, n_pts)
mask[(points[0]).astype(np.int), (points[1]).astype(np.int)] = 1
mask = ndimage.gaussian_filter(mask, sigma=l / n_pts)
res = np.logical_and(mask > mask.mean(), mask_outer)
return res - ndimage.binary_erosion(res)
# Generate synthetic images, and projections
l = 128
proj_operator = build_projection_operator(l, l / 7.)
data = generate_synthetic_data()
proj = proj_operator * data.ravel()[:, np.newaxis]
proj += 0.15 * np.random.randn(*proj.shape)
# Reconstruction with L2 (Ridge) penalization
rgr_ridge = Ridge(alpha=0.2)
rgr_ridge.fit(proj_operator, proj.ravel())
rec_l2 = rgr_ridge.coef_.reshape(l, l)
# Reconstruction with L1 (Lasso) penalization
# the best value of alpha was determined using cross validation
# with LassoCV
rgr_lasso = Lasso(alpha=0.001)
rgr_lasso.fit(proj_operator, proj.ravel())
rec_l1 = rgr_lasso.coef_.reshape(l, l)
plt.figure(figsize=(8, 3.3))
plt.subplot(131)
plt.imshow(data, cmap=plt.cm.gray, interpolation='nearest')
plt.axis('off')
plt.title('original image')
plt.subplot(132)
plt.imshow(rec_l2, cmap=plt.cm.gray, interpolation='nearest')
plt.title('L2 penalization')
plt.axis('off')
plt.subplot(133)
plt.imshow(rec_l1, cmap=plt.cm.gray, interpolation='nearest')
plt.title('L1 penalization')
plt.axis('off')
plt.subplots_adjust(hspace=0.01, wspace=0.01, top=1, bottom=0, left=0,
right=1)
plt.show()
| bsd-3-clause |
nicolas998/Op_Interpolated | 06_Codigos/viejos/Cron_Figura_SimSimple.py | 2 | 1327 | #!/usr/bin/env python
import os
import datetime as dt
import pandas as pd
from multiprocessing import Pool
import numpy as np
#-------------------------------------------------------------------
#FUNBCIONES LOCALES
#-------------------------------------------------------------------
ruta_qsim = '/home/renea998/Simulaciones/'
ruta_ejec = '/home/renea998/scripts/Figuras_Qsim.py'
ruta_Figuras = '/home/renea998/FigSimulaciones/'
servidor = 'torresiata@siata.gov.co:/var/www/nicolas/FigSimulaciones/'
#-------------------------------------------------------------------
#GENERA LAS LISTAS DE LOS COMANDOS DE CREAR FIGURA Y ENVIAR FIGURA
#-------------------------------------------------------------------
Lista = range(1,291)
ListaSelect = np.random.choice(Lista,50)
ListaSelect = ListaSelect.tolist()
ListaSelect.insert(0,1)
Lc1 = []
for i in ListaSelect:
Lc1.append( ruta_ejec+' '+ruta_qsim+' '+str(i)+' '+ruta_Figuras+'Qsim_nodo_'+str(i)+'.png')
for i in Lc1:
os.system(i)
#-------------------------------------------------------------------
#EJECUTA LOS COMANDO EN PARALELO
#-------------------------------------------------------------------
#try:
# p = Pool(processes = 10)
# p.map(os.system, Lc1)
#finally:
# p.close()
# p.join()
comando = 'scp '+ruta_Figuras+'Qsim_nodo_*.png '+servidor
os.system(comando)
| gpl-3.0 |
great-expectations/great_expectations | great_expectations/dataset/sqlalchemy_dataset.py | 1 | 85651 | import inspect
import logging
import traceback
import uuid
import warnings
from datetime import datetime
from functools import wraps
from typing import Dict, Iterable, List
import numpy as np
import pandas as pd
from dateutil.parser import parse
from great_expectations.core.util import (
convert_to_json_serializable,
get_sql_dialect_floating_point_infinity_value,
)
from great_expectations.data_asset import DataAsset
from great_expectations.data_asset.util import DocInherit, parse_result_format
from great_expectations.dataset.util import (
check_sql_engine_dialect,
get_approximate_percentile_disc_sql,
)
from great_expectations.util import import_library_module
from .dataset import Dataset
from .pandas_dataset import PandasDataset
logger = logging.getLogger(__name__)
try:
import sqlalchemy as sa
from sqlalchemy.dialects import registry
from sqlalchemy.engine import reflection
from sqlalchemy.engine.default import DefaultDialect
from sqlalchemy.exc import DatabaseError, ProgrammingError
from sqlalchemy.sql.elements import Label, TextClause, WithinGroup, quoted_name
from sqlalchemy.sql.expression import BinaryExpression, literal
from sqlalchemy.sql.operators import custom_op
from sqlalchemy.sql.selectable import CTE, Select
except ImportError:
logger.debug(
"Unable to load SqlAlchemy context; install optional sqlalchemy dependency for support"
)
sa = None
registry = None
reflection = None
BinaryExpression = None
literal = None
Select = None
CTE = None
custom_op = None
Label = None
WithinGroup = None
TextClause = None
DefaultDialect = None
ProgrammingError = None
try:
from sqlalchemy.engine.row import Row
except ImportError:
try:
from sqlalchemy.engine.row import RowProxy
Row = RowProxy
except ImportError:
logger.debug(
"Unable to load SqlAlchemy Row class; please upgrade you sqlalchemy installation to the latest version."
)
RowProxy = None
Row = None
try:
import psycopg2
import sqlalchemy.dialects.postgresql.psycopg2 as sqlalchemy_psycopg2
except (ImportError, KeyError):
sqlalchemy_psycopg2 = None
try:
import sqlalchemy_redshift.dialect
except ImportError:
sqlalchemy_redshift = None
try:
import snowflake.sqlalchemy.snowdialect
# Sometimes "snowflake-sqlalchemy" fails to self-register in certain environments, so we do it explicitly.
# (see https://stackoverflow.com/questions/53284762/nosuchmoduleerror-cant-load-plugin-sqlalchemy-dialectssnowflake)
registry.register("snowflake", "snowflake.sqlalchemy", "dialect")
except (ImportError, KeyError, AttributeError):
snowflake = None
try:
import pybigquery.sqlalchemy_bigquery
# Sometimes "pybigquery.sqlalchemy_bigquery" fails to self-register in certain environments, so we do it explicitly.
# (see https://stackoverflow.com/questions/53284762/nosuchmoduleerror-cant-load-plugin-sqlalchemy-dialectssnowflake)
registry.register("bigquery", "pybigquery.sqlalchemy_bigquery", "BigQueryDialect")
try:
getattr(pybigquery.sqlalchemy_bigquery, "INTEGER")
bigquery_types_tuple = None
except AttributeError:
# In older versions of the pybigquery driver, types were not exported, so we use a hack
logger.warning(
"Old pybigquery driver version detected. Consider upgrading to 0.4.14 or later."
)
from collections import namedtuple
BigQueryTypes = namedtuple(
"BigQueryTypes", sorted(pybigquery.sqlalchemy_bigquery._type_map)
)
bigquery_types_tuple = BigQueryTypes(**pybigquery.sqlalchemy_bigquery._type_map)
except (ImportError, AttributeError):
bigquery_types_tuple = None
pybigquery = None
try:
# SQLAlchemy does not export the "INT" type for the MS SQL Server dialect; however "INT" is supported by the engine.
# Since SQLAlchemy exports the "INTEGER" type for the MS SQL Server dialect, alias "INT" to the "INTEGER" type.
import sqlalchemy.dialects.mssql as mssqltypes
try:
getattr(mssqltypes, "INT")
except AttributeError:
mssqltypes.INT = mssqltypes.INTEGER
except ImportError:
pass
try:
import pyathena.sqlalchemy_athena
except ImportError:
pyathena = None
class SqlAlchemyBatchReference:
def __init__(self, engine, table_name=None, schema=None, query=None):
self._engine = engine
if table_name is None and query is None:
raise ValueError("Table_name or query must be specified")
self._table_name = table_name
self._schema = schema
self._query = query
def get_init_kwargs(self):
if self._table_name and self._query:
# This is allowed in BigQuery where a temporary table name must be provided *with* the
# custom sql to execute.
kwargs = {
"engine": self._engine,
"table_name": self._table_name,
"custom_sql": self._query,
}
elif self._table_name:
kwargs = {"engine": self._engine, "table_name": self._table_name}
else:
kwargs = {"engine": self._engine, "custom_sql": self._query}
if self._schema:
kwargs["schema"] = self._schema
return kwargs
class MetaSqlAlchemyDataset(Dataset):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@classmethod
def column_map_expectation(cls, func):
"""For SqlAlchemy, this decorator allows individual column_map_expectations to simply return the filter
that describes the expected condition on their data.
The decorator will then use that filter to obtain unexpected elements, relevant counts, and return the formatted
object.
"""
argspec = inspect.getfullargspec(func)[0][1:]
@cls.expectation(argspec)
@wraps(func)
def inner_wrapper(
self, column, mostly=None, result_format=None, *args, **kwargs
):
if self.batch_kwargs.get("use_quoted_name"):
column = quoted_name(column, quote=True)
if result_format is None:
result_format = self.default_expectation_args["result_format"]
result_format = parse_result_format(result_format)
if result_format["result_format"] == "COMPLETE":
warnings.warn(
"Setting result format to COMPLETE for a SqlAlchemyDataset can be dangerous because it will not limit the number of returned results."
)
unexpected_count_limit = None
else:
unexpected_count_limit = result_format["partial_unexpected_count"]
expected_condition: BinaryExpression = func(self, column, *args, **kwargs)
# Added to prepare for when an ignore_values argument is added to the expectation
ignore_values: list = [None]
if func.__name__ in [
"expect_column_values_to_not_be_null",
"expect_column_values_to_be_null",
]:
ignore_values = []
# Counting the number of unexpected values can be expensive when there is a large
# number of np.nan values.
# This only happens on expect_column_values_to_not_be_null expectations.
# Since there is no reason to look for most common unexpected values in this case,
# we will instruct the result formatting method to skip this step.
result_format["partial_unexpected_count"] = 0
ignore_values_conditions: List[BinaryExpression] = []
if (
len(ignore_values) > 0
and None not in ignore_values
or len(ignore_values) > 1
and None in ignore_values
):
ignore_values_conditions += [
sa.column(column).in_(
[val for val in ignore_values if val is not None]
)
]
if None in ignore_values:
ignore_values_conditions += [sa.column(column).is_(None)]
ignore_values_condition: BinaryExpression
if len(ignore_values_conditions) > 1:
ignore_values_condition = sa.or_(*ignore_values_conditions)
elif len(ignore_values_conditions) == 1:
ignore_values_condition = ignore_values_conditions[0]
else:
ignore_values_condition = BinaryExpression(
sa.literal(False), sa.literal(True), custom_op("=")
)
count_query: Select
if self.sql_engine_dialect.name.lower() == "mssql":
count_query = self._get_count_query_mssql(
expected_condition=expected_condition,
ignore_values_condition=ignore_values_condition,
)
else:
count_query = self._get_count_query_generic_sqlalchemy(
expected_condition=expected_condition,
ignore_values_condition=ignore_values_condition,
)
count_results: dict = dict(self.engine.execute(count_query).fetchone())
# Handle case of empty table gracefully:
if (
"element_count" not in count_results
or count_results["element_count"] is None
):
count_results["element_count"] = 0
if "null_count" not in count_results or count_results["null_count"] is None:
count_results["null_count"] = 0
if (
"unexpected_count" not in count_results
or count_results["unexpected_count"] is None
):
count_results["unexpected_count"] = 0
# Some engines may return Decimal from count queries (lookin' at you MSSQL)
# Convert to integers
count_results["element_count"] = int(count_results["element_count"])
count_results["null_count"] = int(count_results["null_count"])
count_results["unexpected_count"] = int(count_results["unexpected_count"])
# limit doesn't compile properly for oracle so we will append rownum to query string later
if self.engine.dialect.name.lower() == "oracle":
raw_query = (
sa.select([sa.column(column)])
.select_from(self._table)
.where(
sa.and_(
sa.not_(expected_condition),
sa.not_(ignore_values_condition),
)
)
)
query = str(
raw_query.compile(
self.engine, compile_kwargs={"literal_binds": True}
)
)
query += "\nAND ROWNUM <= %d" % unexpected_count_limit
else:
query = (
sa.select([sa.column(column)])
.select_from(self._table)
.where(
sa.and_(
sa.not_(expected_condition),
sa.not_(ignore_values_condition),
)
)
.limit(unexpected_count_limit)
)
unexpected_query_results = self.engine.execute(query)
nonnull_count: int = (
count_results["element_count"] - count_results["null_count"]
)
if "output_strftime_format" in kwargs:
output_strftime_format = kwargs["output_strftime_format"]
maybe_limited_unexpected_list = []
for x in unexpected_query_results.fetchall():
if isinstance(x[column], str):
col = parse(x[column])
else:
col = x[column]
maybe_limited_unexpected_list.append(
datetime.strftime(col, output_strftime_format)
)
else:
maybe_limited_unexpected_list = [
x[column] for x in unexpected_query_results.fetchall()
]
success_count = nonnull_count - count_results["unexpected_count"]
success, percent_success = self._calc_map_expectation_success(
success_count, nonnull_count, mostly
)
return_obj = self._format_map_output(
result_format,
success,
count_results["element_count"],
nonnull_count,
count_results["unexpected_count"],
maybe_limited_unexpected_list,
None,
)
if func.__name__ in [
"expect_column_values_to_not_be_null",
"expect_column_values_to_be_null",
]:
# These results are unnecessary for the above expectations
del return_obj["result"]["unexpected_percent_nonmissing"]
del return_obj["result"]["missing_count"]
del return_obj["result"]["missing_percent"]
try:
del return_obj["result"]["partial_unexpected_counts"]
del return_obj["result"]["partial_unexpected_list"]
except KeyError:
pass
return return_obj
inner_wrapper.__name__ = func.__name__
inner_wrapper.__doc__ = func.__doc__
return inner_wrapper
def _get_count_query_mssql(
self,
expected_condition: BinaryExpression,
ignore_values_condition: BinaryExpression,
) -> Select:
# mssql expects all temporary table names to have a prefix '#'
temp_table_name: str = f"#ge_tmp_{str(uuid.uuid4())[:8]}"
with self.engine.begin():
metadata: sa.MetaData = sa.MetaData(self.engine)
temp_table_obj: sa.Table = sa.Table(
temp_table_name,
metadata,
sa.Column("condition", sa.Integer, primary_key=False, nullable=False),
)
temp_table_obj.create(self.engine, checkfirst=True)
count_case_statement: List[sa.sql.elements.Label] = [
sa.case(
[
(
sa.and_(
sa.not_(expected_condition),
sa.not_(ignore_values_condition),
),
1,
)
],
else_=0,
).label("condition")
]
inner_case_query: sa.sql.dml.Insert = temp_table_obj.insert().from_select(
count_case_statement,
sa.select(count_case_statement).select_from(self._table),
)
self.engine.execute(inner_case_query)
element_count_query: Select = (
sa.select(
[
sa.func.count().label("element_count"),
sa.func.sum(sa.case([(ignore_values_condition, 1)], else_=0)).label(
"null_count"
),
]
)
.select_from(self._table)
.alias("ElementAndNullCountsSubquery")
)
unexpected_count_query: Select = (
sa.select(
[
sa.func.sum(sa.column("condition")).label("unexpected_count"),
]
)
.select_from(temp_table_obj)
.alias("UnexpectedCountSubquery")
)
count_query: Select = sa.select(
[
element_count_query.c.element_count,
element_count_query.c.null_count,
unexpected_count_query.c.unexpected_count,
]
)
return count_query
def _get_count_query_generic_sqlalchemy(
self,
expected_condition: BinaryExpression,
ignore_values_condition: BinaryExpression,
) -> Select:
return sa.select(
[
sa.func.count().label("element_count"),
sa.func.sum(sa.case([(ignore_values_condition, 1)], else_=0)).label(
"null_count"
),
sa.func.sum(
sa.case(
[
(
sa.and_(
sa.not_(expected_condition),
sa.not_(ignore_values_condition),
),
1,
)
],
else_=0,
)
).label("unexpected_count"),
]
).select_from(self._table)
class SqlAlchemyDataset(MetaSqlAlchemyDataset):
"""
--ge-feature-maturity-info--
id: validation_engine_sqlalchemy
title: Validation Engine - SQLAlchemy
icon:
short_description: Use SQLAlchemy to validate data in a database
description: Use SQLAlchemy to validate data in a database
how_to_guide_url: https://docs.greatexpectations.io/en/latest/how_to_guides/creating_batches/how_to_load_a_database_table_or_a_query_result_as_a_batch.html
maturity: Production
maturity_details:
api_stability: High
implementation_completeness: Moderate (temp table handling/permissions not universal)
unit_test_coverage: High
integration_infrastructure_test_coverage: N/A
documentation_completeness: Minimal (none)
bug_risk: Low
--ge-feature-maturity-info--"""
@classmethod
def from_dataset(cls, dataset=None):
if isinstance(dataset, SqlAlchemyDataset):
return cls(table_name=str(dataset._table.name), engine=dataset.engine)
else:
raise ValueError("from_dataset requires a SqlAlchemy dataset")
def __init__(
self,
table_name=None,
engine=None,
connection_string=None,
custom_sql=None,
schema=None,
*args,
**kwargs,
):
if custom_sql and not table_name:
# NOTE: Eugene 2020-01-31: @James, this is a not a proper fix, but without it the "public" schema
# was used for a temp table and raising an error
schema = None
table_name = f"ge_tmp_{str(uuid.uuid4())[:8]}"
# mssql expects all temporary table names to have a prefix '#'
if engine.dialect.name.lower() == "mssql":
table_name = f"#{table_name}"
self.generated_table_name = table_name
else:
self.generated_table_name = None
if table_name is None:
raise ValueError("No table_name provided.")
if engine is None and connection_string is None:
raise ValueError("Engine or connection_string must be provided.")
if engine is not None:
self.engine = engine
else:
try:
self.engine = sa.create_engine(connection_string)
except Exception as err:
# Currently we do no error handling if the engine doesn't work out of the box.
raise err
if self.engine.dialect.name.lower() == "bigquery":
# In BigQuery the table name is already qualified with its schema name
self._table = sa.Table(table_name, sa.MetaData(), schema=None)
temp_table_schema_name = None
else:
try:
# use the schema name configured for the datasource
temp_table_schema_name = self.engine.url.query.get("schema")
except AttributeError as err:
# sqlite/mssql dialects use a Connection object instead of Engine and override self.engine
# retrieve the schema from the Connection object i.e. self.engine
conn_object = self.engine
temp_table_schema_name = conn_object.engine.url.query.get("schema")
self._table = sa.Table(table_name, sa.MetaData(), schema=schema)
# Get the dialect **for purposes of identifying types**
if self.engine.dialect.name.lower() in [
"postgresql",
"mysql",
"sqlite",
"oracle",
"mssql",
]:
# These are the officially included and supported dialects by sqlalchemy
self.dialect = import_library_module(
module_name="sqlalchemy.dialects." + self.engine.dialect.name
)
elif self.engine.dialect.name.lower() == "snowflake":
self.dialect = import_library_module(
module_name="snowflake.sqlalchemy.snowdialect"
)
elif self.engine.dialect.name.lower() == "redshift":
self.dialect = import_library_module(
module_name="sqlalchemy_redshift.dialect"
)
elif self.engine.dialect.name.lower() == "bigquery":
self.dialect = import_library_module(
module_name="pybigquery.sqlalchemy_bigquery"
)
elif self.engine.dialect.name.lower() == "awsathena":
self.dialect = import_library_module(
module_name="pyathena.sqlalchemy_athena"
)
else:
self.dialect = None
if engine and engine.dialect.name.lower() in ["sqlite", "mssql", "snowflake"]:
# sqlite/mssql/snowflake temp tables only persist within a connection so override the engine
self.engine = engine.connect()
if schema is not None and custom_sql is not None:
# temporary table will be written to temp schema, so don't allow
# a user-defined schema
# NOTE: 20200306 - JPC - Previously, this would disallow both custom_sql (a query) and a schema, but
# that is overly restrictive -- snowflake could have had a schema specified, for example, in which to create
# a temporary table.
# raise ValueError("Cannot specify both schema and custom_sql.")
pass
if custom_sql is not None and self.engine.dialect.name.lower() == "bigquery":
if (
self.generated_table_name is not None
and self.engine.dialect.dataset_id is None
):
raise ValueError(
"No BigQuery dataset specified. Use bigquery_temp_table batch_kwarg or a specify a "
"default dataset in engine url"
)
if custom_sql:
self.create_temporary_table(
table_name, custom_sql, schema_name=temp_table_schema_name
)
if self.generated_table_name is not None:
if self.engine.dialect.name.lower() == "bigquery":
logger.warning(
"Created permanent table {table_name}".format(
table_name=table_name
)
)
if self.engine.dialect.name.lower() == "awsathena":
logger.warning(
"Created permanent table default.{table_name}".format(
table_name=table_name
)
)
try:
insp = reflection.Inspector.from_engine(self.engine)
self.columns = insp.get_columns(table_name, schema=schema)
except KeyError:
# we will get a KeyError for temporary tables, since
# reflection will not find the temporary schema
self.columns = self.column_reflection_fallback()
# Use fallback because for mssql reflection doesn't throw an error but returns an empty list
if len(self.columns) == 0:
self.columns = self.column_reflection_fallback()
# Only call super once connection is established and table_name and columns known to allow autoinspection
super().__init__(*args, **kwargs)
@property
def sql_engine_dialect(self) -> DefaultDialect:
return self.engine.dialect
def attempt_allowing_relative_error(self):
detected_redshift: bool = sqlalchemy_redshift is not None and check_sql_engine_dialect(
actual_sql_engine_dialect=self.sql_engine_dialect,
candidate_sql_engine_dialect=sqlalchemy_redshift.dialect.RedshiftDialect,
)
# noinspection PyTypeChecker
detected_psycopg2: bool = (
sqlalchemy_psycopg2 is not None
and check_sql_engine_dialect(
actual_sql_engine_dialect=self.sql_engine_dialect,
candidate_sql_engine_dialect=sqlalchemy_psycopg2.PGDialect_psycopg2,
)
)
return detected_redshift or detected_psycopg2
def head(self, n=5):
"""Returns a *PandasDataset* with the first *n* rows of the given Dataset"""
try:
df = next(
pd.read_sql_table(
table_name=self._table.name,
schema=self._table.schema,
con=self.engine,
chunksize=n,
)
)
except (ValueError, NotImplementedError):
# it looks like MetaData that is used by pd.read_sql_table
# cannot work on a temp table.
# If it fails, we are trying to get the data using read_sql
head_sql_str = "select * from "
if self._table.schema and self.engine.dialect.name.lower() != "bigquery":
head_sql_str += self._table.schema + "." + self._table.name
elif self.engine.dialect.name.lower() == "bigquery":
head_sql_str += "`" + self._table.name + "`"
else:
head_sql_str += self._table.name
head_sql_str += " limit {:d}".format(n)
# Limit is unknown in mssql! Use top instead!
if self.engine.dialect.name.lower() == "mssql":
head_sql_str = "select top({n}) * from {table}".format(
n=n, table=self._table.name
)
# Limit doesn't work in oracle either
if self.engine.dialect.name.lower() == "oracle":
head_sql_str = "select * from {table} WHERE ROWNUM <= {n}".format(
table=self._table.name, n=n
)
df = pd.read_sql(head_sql_str, con=self.engine)
except StopIteration:
df = pd.DataFrame(columns=self.get_table_columns())
return PandasDataset(
df,
expectation_suite=self.get_expectation_suite(
discard_failed_expectations=False,
discard_result_format_kwargs=False,
discard_catch_exceptions_kwargs=False,
discard_include_config_kwargs=False,
),
)
def get_row_count(self, table_name=None):
if table_name is None:
table_name = self._table
else:
table_name = sa.table(table_name)
count_query = sa.select([sa.func.count()]).select_from(table_name)
return int(self.engine.execute(count_query).scalar())
def get_column_count(self):
return len(self.columns)
def get_table_columns(self) -> List[str]:
return [col["name"] for col in self.columns]
def get_column_nonnull_count(self, column):
ignore_values = [None]
count_query = sa.select(
[
sa.func.count().label("element_count"),
sa.func.sum(
sa.case(
[
(
sa.or_(
sa.column(column).in_(ignore_values),
# Below is necessary b/c sa.in_() uses `==` but None != None
# But we only consider this if None is actually in the list of ignore values
sa.column(column).is_(None)
if None in ignore_values
else False,
),
1,
)
],
else_=0,
)
).label("null_count"),
]
).select_from(self._table)
count_results = dict(self.engine.execute(count_query).fetchone())
element_count = int(count_results.get("element_count") or 0)
null_count = int(count_results.get("null_count") or 0)
return element_count - null_count
def get_column_sum(self, column):
return convert_to_json_serializable(
self.engine.execute(
sa.select([sa.func.sum(sa.column(column))]).select_from(self._table)
).scalar()
)
def get_column_max(self, column, parse_strings_as_datetimes=False):
if parse_strings_as_datetimes:
raise NotImplementedError
return convert_to_json_serializable(
self.engine.execute(
sa.select([sa.func.max(sa.column(column))]).select_from(self._table)
).scalar()
)
def get_column_min(self, column, parse_strings_as_datetimes=False):
if parse_strings_as_datetimes:
raise NotImplementedError
return convert_to_json_serializable(
self.engine.execute(
sa.select([sa.func.min(sa.column(column))]).select_from(self._table)
).scalar()
)
def get_column_value_counts(self, column, sort="value", collate=None):
if sort not in ["value", "count", "none"]:
raise ValueError("sort must be either 'value', 'count', or 'none'")
query = (
sa.select(
[
sa.column(column).label("value"),
sa.func.count(sa.column(column)).label("count"),
]
)
.where(sa.column(column) != None)
.group_by(sa.column(column))
)
if sort == "value":
# NOTE: depending on the way the underlying database collates columns,
# ordering can vary. postgresql collate "C" matches default sort
# for python and most other systems, but is not universally supported,
# so we use the default sort for the system, unless specifically overridden
if collate is not None:
query = query.order_by(sa.column(column).collate(collate))
else:
query = query.order_by(sa.column(column))
elif sort == "count":
query = query.order_by(sa.column("count").desc())
results = self.engine.execute(query.select_from(self._table)).fetchall()
series = pd.Series(
[row[1] for row in results],
index=pd.Index(data=[row[0] for row in results], name="value"),
name="count",
)
return series
def get_column_mean(self, column):
# column * 1.0 needed for correct calculation of avg in MSSQL
return convert_to_json_serializable(
self.engine.execute(
sa.select([sa.func.avg(sa.column(column) * 1.0)]).select_from(
self._table
)
).scalar()
)
def get_column_unique_count(self, column):
return convert_to_json_serializable(
self.engine.execute(
sa.select(
[sa.func.count(sa.func.distinct(sa.column(column)))]
).select_from(self._table)
).scalar()
)
def get_column_median(self, column):
# AWS Athena and presto have an special function that can be used to retrieve the median
if self.sql_engine_dialect.name.lower() == "awsathena":
element_values = self.engine.execute(
f"SELECT approx_percentile({column}, 0.5) FROM {self._table}"
)
return convert_to_json_serializable(element_values.fetchone()[0])
else:
nonnull_count = self.get_column_nonnull_count(column)
element_values = self.engine.execute(
sa.select([sa.column(column)])
.order_by(sa.column(column))
.where(sa.column(column) != None)
.offset(max(nonnull_count // 2 - 1, 0))
.limit(2)
.select_from(self._table)
)
column_values = list(element_values.fetchall())
if len(column_values) == 0:
column_median = None
elif nonnull_count % 2 == 0:
# An even number of column values: take the average of the two center values
column_median = (
float(
column_values[0][0]
+ column_values[1][0] # left center value # right center value
)
/ 2.0
) # Average center values
else:
# An odd number of column values, we can just take the center value
column_median = column_values[1][0] # True center value
return convert_to_json_serializable(column_median)
def get_column_quantiles(
self, column: str, quantiles: Iterable, allow_relative_error: bool = False
) -> list:
if self.sql_engine_dialect.name.lower() == "mssql":
return self._get_column_quantiles_mssql(column=column, quantiles=quantiles)
elif self.sql_engine_dialect.name.lower() == "awsathena":
return self._get_column_quantiles_awsathena(
column=column, quantiles=quantiles
)
elif self.sql_engine_dialect.name.lower() == "bigquery":
return self._get_column_quantiles_bigquery(
column=column, quantiles=quantiles
)
elif self.sql_engine_dialect.name.lower() == "mysql":
return self._get_column_quantiles_mysql(column=column, quantiles=quantiles)
elif self.sql_engine_dialect.name.lower() == "snowflake":
# NOTE: 20201216 - JPC - snowflake has a representation/precision limitation
# in its percentile_disc implementation that causes an error when we do
# not round. It is unclear to me *how* the call to round affects the behavior --
# the binary representation should be identical before and after, and I do
# not observe a type difference. However, the issue is replicable in the
# snowflake console and directly observable in side-by-side comparisons with
# and without the call to round()
quantiles = [round(x, 10) for x in quantiles]
return self._get_column_quantiles_generic_sqlalchemy(
column=column,
quantiles=quantiles,
allow_relative_error=allow_relative_error,
)
else:
return convert_to_json_serializable(
self._get_column_quantiles_generic_sqlalchemy(
column=column,
quantiles=quantiles,
allow_relative_error=allow_relative_error,
)
)
@classmethod
def _treat_quantiles_exception(cls, pe):
exception_message: str = "An SQL syntax Exception occurred."
exception_traceback: str = traceback.format_exc()
exception_message += (
f'{type(pe).__name__}: "{str(pe)}". Traceback: "{exception_traceback}".'
)
logger.error(exception_message)
raise pe
def _get_column_quantiles_mssql(self, column: str, quantiles: Iterable) -> list:
# mssql requires over(), so we add an empty over() clause
selects: List[WithinGroup] = [
sa.func.percentile_disc(quantile)
.within_group(sa.column(column).asc())
.over()
for quantile in quantiles
]
quantiles_query: Select = sa.select(selects).select_from(self._table)
try:
quantiles_results: Row = self.engine.execute(quantiles_query).fetchone()
return list(quantiles_results)
except ProgrammingError as pe:
self._treat_quantiles_exception(pe)
def _get_column_quantiles_awsathena(self, column: str, quantiles: Iterable) -> list:
import ast
quantiles_list = list(quantiles)
quantiles_query = (
f"SELECT approx_percentile({column}, ARRAY{str(quantiles_list)}) as quantiles "
f"from (SELECT {column} from {self._table})"
)
try:
quantiles_results = self.engine.execute(quantiles_query).fetchone()[0]
quantiles_results_list = ast.literal_eval(quantiles_results)
return quantiles_results_list
except ProgrammingError as pe:
self._treat_quantiles_exception(pe)
def _get_column_quantiles_bigquery(self, column: str, quantiles: Iterable) -> list:
# BigQuery does not support "WITHIN", so we need a special case for it
selects: List[WithinGroup] = [
sa.func.percentile_disc(sa.column(column), quantile).over()
for quantile in quantiles
]
quantiles_query: Select = sa.select(selects).select_from(self._table)
try:
quantiles_results = self.engine.execute(quantiles_query).fetchone()
return list(quantiles_results)
except ProgrammingError as pe:
self._treat_quantiles_exception(pe)
def _get_column_quantiles_mysql(self, column: str, quantiles: Iterable) -> list:
# MySQL does not support "percentile_disc", so we implement it as a compound query.
# Please see https://stackoverflow.com/questions/19770026/calculate-percentile-value-using-mysql for reference.
percent_rank_query: CTE = (
sa.select(
[
sa.column(column),
sa.cast(
sa.func.percent_rank().over(order_by=sa.column(column).asc()),
sa.dialects.mysql.DECIMAL(18, 15),
).label("p"),
]
)
.order_by(sa.column("p").asc())
.select_from(self._table)
.cte("t")
)
selects: List[WithinGroup] = []
for idx, quantile in enumerate(quantiles):
# pymysql cannot handle conversion of numpy float64 to float; convert just in case
if np.issubdtype(type(quantile), np.float_):
quantile = float(quantile)
quantile_column: Label = (
sa.func.first_value(sa.column(column))
.over(
order_by=sa.case(
[
(
percent_rank_query.c.p
<= sa.cast(quantile, sa.dialects.mysql.DECIMAL(18, 15)),
percent_rank_query.c.p,
)
],
else_=None,
).desc()
)
.label(f"q_{idx}")
)
selects.append(quantile_column)
quantiles_query: Select = (
sa.select(selects).distinct().order_by(percent_rank_query.c.p.desc())
)
try:
quantiles_results: Row = self.engine.execute(quantiles_query).fetchone()
return list(quantiles_results)
except ProgrammingError as pe:
self._treat_quantiles_exception(pe)
# Support for computing the quantiles column for PostGreSQL and Redshift is included in the same method as that for
# the generic sqlalchemy compatible DBMS engine, because users often use the postgresql driver to connect to Redshift
# The key functional difference is that Redshift does not support the aggregate function
# "percentile_disc", but does support the approximate percentile_disc or percentile_cont function version instead.```
def _get_column_quantiles_generic_sqlalchemy(
self, column: str, quantiles: Iterable, allow_relative_error: bool
) -> list:
selects: List[WithinGroup] = [
sa.func.percentile_disc(quantile).within_group(sa.column(column).asc())
for quantile in quantiles
]
quantiles_query: Select = sa.select(selects).select_from(self._table)
try:
quantiles_results: Row = self.engine.execute(quantiles_query).fetchone()
return list(quantiles_results)
except ProgrammingError:
# ProgrammingError: (psycopg2.errors.SyntaxError) Aggregate function "percentile_disc" is not supported;
# use approximate percentile_disc or percentile_cont instead.
if self.attempt_allowing_relative_error():
# Redshift does not have a percentile_disc method, but does support an approximate version.
sql_approx: str = get_approximate_percentile_disc_sql(
selects=selects, sql_engine_dialect=self.sql_engine_dialect
)
selects_approx: List[TextClause] = [sa.text(sql_approx)]
quantiles_query_approx: Select = sa.select(selects_approx).select_from(
self._table
)
if allow_relative_error:
try:
quantiles_results: Row = self.engine.execute(
quantiles_query_approx
).fetchone()
return list(quantiles_results)
except ProgrammingError as pe:
self._treat_quantiles_exception(pe)
else:
raise ValueError(
f'The SQL engine dialect "{str(self.sql_engine_dialect)}" does not support computing quantiles '
"without approximation error; set allow_relative_error to True to allow approximate quantiles."
)
else:
raise ValueError(
f'The SQL engine dialect "{str(self.sql_engine_dialect)}" does not support computing quantiles with '
"approximation error; set allow_relative_error to False to disable approximate quantiles."
)
def get_column_stdev(self, column):
if self.sql_engine_dialect.name.lower() == "mssql":
# Note: "stdev_samp" is not a recognized built-in function name (but "stdev" does exist for "mssql").
# This function is used to compute statistical standard deviation from sample data (per the reference in
# https://sqlserverrider.wordpress.com/2013/03/06/standard-deviation-functions-stdev-and-stdevp-sql-server).
res = self.engine.execute(
sa.select([sa.func.stdev(sa.column(column))])
.select_from(self._table)
.where(sa.column(column) is not None)
).fetchone()
else:
res = self.engine.execute(
sa.select([sa.func.stddev_samp(sa.column(column))])
.select_from(self._table)
.where(sa.column(column) is not None)
).fetchone()
return float(res[0])
def get_column_hist(self, column, bins):
"""return a list of counts corresponding to bins
Args:
column: the name of the column for which to get the histogram
bins: tuple of bin edges for which to get histogram values; *must* be tuple to support caching
"""
case_conditions = []
idx = 0
bins = list(bins)
# If we have an infinte lower bound, don't express that in sql
if (
bins[0]
== get_sql_dialect_floating_point_infinity_value(
schema="api_np", negative=True
)
) or (
bins[0]
== get_sql_dialect_floating_point_infinity_value(
schema="api_cast", negative=True
)
):
case_conditions.append(
sa.func.sum(
sa.case([(sa.column(column) < bins[idx + 1], 1)], else_=0)
).label("bin_" + str(idx))
)
idx += 1
for idx in range(idx, len(bins) - 2):
case_conditions.append(
sa.func.sum(
sa.case(
[
(
sa.and_(
sa.column(column) >= bins[idx],
sa.column(column) < bins[idx + 1],
),
1,
)
],
else_=0,
)
).label("bin_" + str(idx))
)
if (
bins[-1]
== get_sql_dialect_floating_point_infinity_value(
schema="api_np", negative=False
)
) or (
bins[-1]
== get_sql_dialect_floating_point_infinity_value(
schema="api_cast", negative=False
)
):
case_conditions.append(
sa.func.sum(
sa.case([(sa.column(column) >= bins[-2], 1)], else_=0)
).label("bin_" + str(len(bins) - 1))
)
else:
case_conditions.append(
sa.func.sum(
sa.case(
[
(
sa.and_(
sa.column(column) >= bins[-2],
sa.column(column) <= bins[-1],
),
1,
)
],
else_=0,
)
).label("bin_" + str(len(bins) - 1))
)
query = (
sa.select(case_conditions)
.where(
sa.column(column) != None,
)
.select_from(self._table)
)
# Run the data through convert_to_json_serializable to ensure we do not have Decimal types
hist = convert_to_json_serializable(list(self.engine.execute(query).fetchone()))
return hist
def get_column_count_in_range(
self, column, min_val=None, max_val=None, strict_min=False, strict_max=True
):
if min_val is None and max_val is None:
raise ValueError("Must specify either min or max value")
if min_val is not None and max_val is not None and min_val > max_val:
raise ValueError("Min value must be <= to max value")
if (
min_val
== get_sql_dialect_floating_point_infinity_value(
schema="api_np", negative=True
)
) or (
min_val
== get_sql_dialect_floating_point_infinity_value(
schema="api_cast", negative=True
)
):
min_val = get_sql_dialect_floating_point_infinity_value(
schema=self.sql_engine_dialect.name.lower(), negative=True
)
if (
min_val
== get_sql_dialect_floating_point_infinity_value(
schema="api_np", negative=False
)
) or (
min_val
== get_sql_dialect_floating_point_infinity_value(
schema="api_cast", negative=False
)
):
min_val = get_sql_dialect_floating_point_infinity_value(
schema=self.sql_engine_dialect.name.lower(), negative=False
)
if (
max_val
== get_sql_dialect_floating_point_infinity_value(
schema="api_np", negative=True
)
) or (
max_val
== get_sql_dialect_floating_point_infinity_value(
schema="api_cast", negative=True
)
):
max_val = get_sql_dialect_floating_point_infinity_value(
schema=self.sql_engine_dialect.name.lower(), negative=True
)
if (
max_val
== get_sql_dialect_floating_point_infinity_value(
schema="api_np", negative=False
)
) or (
max_val
== get_sql_dialect_floating_point_infinity_value(
schema="api_cast", negative=False
)
):
max_val = get_sql_dialect_floating_point_infinity_value(
schema=self.sql_engine_dialect.name.lower(), negative=False
)
min_condition = None
max_condition = None
if min_val is not None:
if strict_min:
min_condition = sa.column(column) > min_val
else:
min_condition = sa.column(column) >= min_val
if max_val is not None:
if strict_max:
max_condition = sa.column(column) < max_val
else:
max_condition = sa.column(column) <= max_val
if min_condition is not None and max_condition is not None:
condition = sa.and_(min_condition, max_condition)
elif min_condition is not None:
condition = min_condition
else:
condition = max_condition
query = (
sa.select([sa.func.count(sa.column(column))])
.where(sa.and_(sa.column(column) != None, condition))
.select_from(self._table)
)
return convert_to_json_serializable(self.engine.execute(query).scalar())
def create_temporary_table(self, table_name, custom_sql, schema_name=None):
"""
Create Temporary table based on sql query. This will be used as a basis for executing expectations.
WARNING: this feature is new in v0.4.
It hasn't been tested in all SQL dialects, and may change based on community feedback.
:param custom_sql:
"""
###
# NOTE: 20200310 - The update to support snowflake transient table creation revealed several
# import cases that are not fully handled.
# The snowflake-related change updated behavior to allow both custom_sql and schema to be specified. But
# the underlying incomplete handling of schema remains.
#
# Several cases we need to consider:
#
# 1. Distributed backends (e.g. Snowflake and BigQuery) often use a `<database>.<schema>.<table>`
# syntax, but currently we are biased towards only allowing schema.table
#
# 2. In the wild, we see people using several ways to declare the schema they want to use:
# a. In the connection string, the original RFC only specifies database, but schema is supported by some
# backends (Snowflake) as a query parameter.
# b. As a default for a user (the equivalent of USE SCHEMA being provided at the beginning of a session)
# c. As part of individual queries.
#
# 3. We currently don't make it possible to select from a table in one query, but create a temporary table in
# another schema, except for with BigQuery and (now) snowflake, where you can specify the table name (and
# potentially triple of database, schema, table) in the batch_kwargs.
#
# The SqlAlchemyDataset interface essentially predates the batch_kwargs concept and so part of what's going
# on, I think, is a mismatch between those. I think we should rename custom_sql -> "temp_table_query" or
# similar, for example.
###
engine_dialect = self.sql_engine_dialect.name.lower()
# handle cases where dialect.name.lower() returns a byte string (e.g. databricks)
if isinstance(engine_dialect, bytes):
engine_dialect = str(engine_dialect, "utf-8")
if engine_dialect == "bigquery":
stmt = "CREATE OR REPLACE VIEW `{table_name}` AS {custom_sql}".format(
table_name=table_name, custom_sql=custom_sql
)
elif engine_dialect == "databricks":
stmt = "CREATE OR REPLACE VIEW `{table_name}` AS {custom_sql}".format(
table_name=table_name, custom_sql=custom_sql
)
elif engine_dialect == "snowflake":
table_type = "TEMPORARY" if self.generated_table_name else "TRANSIENT"
logger.info("Creating temporary table %s" % table_name)
if schema_name is not None:
table_name = schema_name + "." + table_name
stmt = "CREATE OR REPLACE {table_type} TABLE {table_name} AS {custom_sql}".format(
table_type=table_type, table_name=table_name, custom_sql=custom_sql
)
elif self.sql_engine_dialect.name == "mysql":
# Note: We can keep the "MySQL" clause separate for clarity, even though it is the same as the generic case.
stmt = "CREATE TEMPORARY TABLE {table_name} AS {custom_sql}".format(
table_name=table_name, custom_sql=custom_sql
)
elif self.sql_engine_dialect.name == "mssql":
# Insert "into #{table_name}" in the custom sql query right before the "from" clause
# Split is case sensitive so detect case.
# Note: transforming custom_sql to uppercase/lowercase has uninteded consequences (i.e., changing column names), so this is not an option!
if "from" in custom_sql:
strsep = "from"
else:
strsep = "FROM"
custom_sqlmod = custom_sql.split(strsep, maxsplit=1)
stmt = (
custom_sqlmod[0] + "into {table_name} from" + custom_sqlmod[1]
).format(table_name=table_name)
elif engine_dialect == "awsathena":
stmt = "CREATE TABLE {table_name} AS {custom_sql}".format(
table_name=table_name, custom_sql=custom_sql
)
elif engine_dialect == "oracle":
# oracle 18c introduced PRIVATE temp tables which are transient objects
stmt_1 = "CREATE PRIVATE TEMPORARY TABLE {table_name} ON COMMIT PRESERVE DEFINITION AS {custom_sql}".format(
table_name=table_name, custom_sql=custom_sql
)
# prior to oracle 18c only GLOBAL temp tables existed and only the data is transient
# this means an empty table will persist after the db session
stmt_2 = "CREATE GLOBAL TEMPORARY TABLE {table_name} ON COMMIT PRESERVE ROWS AS {custom_sql}".format(
table_name=table_name, custom_sql=custom_sql
)
else:
stmt = 'CREATE TEMPORARY TABLE "{table_name}" AS {custom_sql}'.format(
table_name=table_name, custom_sql=custom_sql
)
if engine_dialect == "oracle":
try:
self.engine.execute(stmt_1)
except DatabaseError:
self.engine.execute(stmt_2)
else:
self.engine.execute(stmt)
def column_reflection_fallback(self):
"""If we can't reflect the table, use a query to at least get column names."""
col_info_dict_list: List[Dict]
if self.sql_engine_dialect.name.lower() == "mssql":
type_module = self._get_dialect_type_module()
# Get column names and types from the database
# StackOverflow to the rescue: https://stackoverflow.com/a/38634368
col_info_query: TextClause = sa.text(
f"""
SELECT
cols.NAME, ty.NAME
FROM
tempdb.sys.columns AS cols
JOIN
sys.types AS ty
ON
cols.user_type_id = ty.user_type_id
WHERE
object_id = OBJECT_ID('tempdb..{self._table}')
"""
)
col_info_tuples_list = self.engine.execute(col_info_query).fetchall()
col_info_dict_list = [
{"name": col_name, "type": getattr(type_module, col_type.upper())()}
for col_name, col_type in col_info_tuples_list
]
else:
query: Select = sa.select([sa.text("*")]).select_from(self._table).limit(1)
col_names: list = self.engine.execute(query).keys()
col_info_dict_list = [{"name": col_name} for col_name in col_names]
return col_info_dict_list
###
###
###
#
# Table Expectation Implementations
#
###
###
###
# noinspection PyUnusedLocal
@DocInherit
@MetaSqlAlchemyDataset.expectation(["other_table_name"])
def expect_table_row_count_to_equal_other_table(
self,
other_table_name,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""Expect the number of rows in this table to equal the number of rows in a different table.
expect_table_row_count_to_equal is a :func:`expectation \
<great_expectations.data_asset.data_asset.DataAsset.expectation>`, not a
``column_map_expectation`` or ``column_aggregate_expectation``.
Args:
other_table_name (str): \
The name of the other table to which to compare.
Other Parameters:
result_format (string or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
See Also:
expect_table_row_count_to_be_between
"""
row_count = self.get_row_count()
other_table_row_count = self.get_row_count(table_name=other_table_name)
return {
"success": row_count == other_table_row_count,
"result": {
"observed_value": {
"self": row_count,
"other": other_table_row_count,
}
},
}
###
###
###
#
# Compound Column Expectation Implementations
#
###
###
###
@DocInherit
@MetaSqlAlchemyDataset.expectation(["column_list", "ignore_row_if"])
def expect_compound_columns_to_be_unique(
self,
column_list,
ignore_row_if="all_values_are_missing",
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
columns = [
sa.column(col["name"]) for col in self.columns if col["name"] in column_list
]
query = (
sa.select([sa.func.count()])
.group_by(*columns)
.having(sa.func.count() > 1)
.select_from(self._table)
)
if ignore_row_if == "all_values_are_missing":
query = query.where(sa.and_(*[col != None for col in columns]))
elif ignore_row_if == "any_value_is_missing":
query = query.where(sa.or_(*[col != None for col in columns]))
elif ignore_row_if == "never":
pass
else:
raise ValueError(
"ignore_row_if was set to an unexpected value: %s" % ignore_row_if
)
unexpected_count = self.engine.execute(query).fetchone()
if unexpected_count is None:
# This can happen when the condition filters out all rows
unexpected_count = 0
else:
unexpected_count = unexpected_count[0]
total_count_query = sa.select([sa.func.count()]).select_from(self._table)
total_count = self.engine.execute(total_count_query).fetchone()[0]
if total_count > 0:
unexpected_percent = 100.0 * unexpected_count / total_count
else:
# If no rows, then zero percent are unexpected.
unexpected_percent = 0
return {
"success": unexpected_count == 0,
"result": {"unexpected_percent": unexpected_percent},
}
###
###
###
#
# Column Map Expectation Implementations
#
###
###
###
@DocInherit
@MetaSqlAlchemyDataset.column_map_expectation
def expect_column_values_to_be_null(
self,
column,
mostly=None,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
return sa.column(column) == None
@DocInherit
@MetaSqlAlchemyDataset.column_map_expectation
def expect_column_values_to_not_be_null(
self,
column,
mostly=None,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
return sa.column(column) != None
def _get_dialect_type_module(self):
if self.dialect is None:
logger.warning(
"No sqlalchemy dialect found; relying in top-level sqlalchemy types."
)
return sa
try:
# Redshift does not (yet) export types to top level; only recognize base SA types
if isinstance(
self.sql_engine_dialect, sqlalchemy_redshift.dialect.RedshiftDialect
):
return self.dialect.sa
except (TypeError, AttributeError):
pass
# Bigquery works with newer versions, but use a patch if we had to define bigquery_types_tuple
try:
if (
isinstance(
self.sql_engine_dialect,
pybigquery.sqlalchemy_bigquery.BigQueryDialect,
)
and bigquery_types_tuple is not None
):
return bigquery_types_tuple
except (TypeError, AttributeError):
pass
return self.dialect
@DocInherit
@DataAsset.expectation(["column", "type_", "mostly"])
def expect_column_values_to_be_of_type(
self,
column,
type_,
mostly=None,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
if mostly is not None:
raise ValueError(
"SqlAlchemyDataset does not support column map semantics for column types"
)
try:
col_data = [col for col in self.columns if col["name"] == column][0]
col_type = type(col_data["type"])
except IndexError:
raise ValueError("Unrecognized column: %s" % column)
except KeyError:
raise ValueError("No database type data available for column: %s" % column)
try:
# Our goal is to be as explicit as possible. We will match the dialect
# if that is possible. If there is no dialect available, we *will*
# match against a top-level SqlAlchemy type if that's possible.
#
# This is intended to be a conservative approach.
#
# In particular, we *exclude* types that would be valid under an ORM
# such as "float" for postgresql with this approach
if type_ is None:
# vacuously true
success = True
else:
type_module = self._get_dialect_type_module()
success = issubclass(col_type, getattr(type_module, type_))
return {"success": success, "result": {"observed_value": col_type.__name__}}
except AttributeError:
raise ValueError("Type not recognized by current driver: %s" % type_)
@DocInherit
@DataAsset.expectation(["column", "type_list", "mostly"])
def expect_column_values_to_be_in_type_list(
self,
column,
type_list,
mostly=None,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
if mostly is not None:
raise ValueError(
"SqlAlchemyDataset does not support column map semantics for column types"
)
try:
col_data = [col for col in self.columns if col["name"] == column][0]
col_type = type(col_data["type"])
except IndexError:
raise ValueError("Unrecognized column: %s" % column)
except KeyError:
raise ValueError("No database type data available for column: %s" % column)
# Our goal is to be as explicit as possible. We will match the dialect
# if that is possible. If there is no dialect available, we *will*
# match against a top-level SqlAlchemy type.
#
# This is intended to be a conservative approach.
#
# In particular, we *exclude* types that would be valid under an ORM
# such as "float" for postgresql with this approach
if type_list is None:
success = True
else:
types = []
type_module = self._get_dialect_type_module()
for type_ in type_list:
try:
type_class = getattr(type_module, type_)
types.append(type_class)
except AttributeError:
logger.debug("Unrecognized type: %s" % type_)
if len(types) == 0:
logger.warning(
"No recognized sqlalchemy types in type_list for current dialect."
)
types = tuple(types)
success = issubclass(col_type, types)
return {"success": success, "result": {"observed_value": col_type.__name__}}
@DocInherit
@MetaSqlAlchemyDataset.column_map_expectation
def expect_column_values_to_be_in_set(
self,
column,
value_set,
mostly=None,
parse_strings_as_datetimes=None,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
if value_set is None:
# vacuously true
return True
if parse_strings_as_datetimes:
parsed_value_set = self._parse_value_set(value_set)
else:
parsed_value_set = value_set
return sa.column(column).in_(tuple(parsed_value_set))
@DocInherit
@MetaSqlAlchemyDataset.column_map_expectation
def expect_column_values_to_not_be_in_set(
self,
column,
value_set,
mostly=None,
parse_strings_as_datetimes=None,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
if parse_strings_as_datetimes:
parsed_value_set = self._parse_value_set(value_set)
else:
parsed_value_set = value_set
return sa.column(column).notin_(tuple(parsed_value_set))
@DocInherit
@MetaSqlAlchemyDataset.column_map_expectation
def expect_column_values_to_be_between(
self,
column,
min_value=None,
max_value=None,
strict_min=False,
strict_max=False,
allow_cross_type_comparisons=None,
parse_strings_as_datetimes=None,
output_strftime_format=None,
mostly=None,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
if parse_strings_as_datetimes:
if min_value:
min_value = parse(min_value)
if max_value:
max_value = parse(max_value)
if min_value is not None and max_value is not None and min_value > max_value:
raise ValueError("min_value cannot be greater than max_value")
if min_value is None and max_value is None:
raise ValueError("min_value and max_value cannot both be None")
if min_value is None:
if strict_max:
return sa.column(column) < max_value
else:
return sa.column(column) <= max_value
elif max_value is None:
if strict_min:
return sa.column(column) > min_value
else:
return sa.column(column) >= min_value
else:
if strict_min and strict_max:
return sa.and_(
sa.column(column) > min_value, sa.column(column) < max_value
)
elif strict_min:
return sa.and_(
sa.column(column) > min_value, sa.column(column) <= max_value
)
elif strict_max:
return sa.and_(
sa.column(column) >= min_value, sa.column(column) < max_value
)
else:
return sa.and_(
sa.column(column) >= min_value, sa.column(column) <= max_value
)
@DocInherit
@MetaSqlAlchemyDataset.column_map_expectation
def expect_column_value_lengths_to_equal(
self,
column,
value,
mostly=None,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
return sa.func.length(sa.column(column)) == value
@DocInherit
@MetaSqlAlchemyDataset.column_map_expectation
def expect_column_value_lengths_to_be_between(
self,
column,
min_value=None,
max_value=None,
mostly=None,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
if min_value is None and max_value is None:
raise ValueError("min_value and max_value cannot both be None")
# Assert that min_value and max_value are integers
try:
if min_value is not None and not float(min_value).is_integer():
raise ValueError("min_value and max_value must be integers")
if max_value is not None and not float(max_value).is_integer():
raise ValueError("min_value and max_value must be integers")
except ValueError:
raise ValueError("min_value and max_value must be integers")
if min_value is not None and max_value is not None:
return sa.and_(
sa.func.length(sa.column(column)) >= min_value,
sa.func.length(sa.column(column)) <= max_value,
)
elif min_value is None and max_value is not None:
return sa.func.length(sa.column(column)) <= max_value
elif min_value is not None and max_value is None:
return sa.func.length(sa.column(column)) >= min_value
@MetaSqlAlchemyDataset.column_map_expectation
def expect_column_values_to_be_unique(
self,
column,
mostly=None,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
# Duplicates are found by filtering a group by query
dup_query = (
sa.select([sa.column(column)])
.select_from(self._table)
.group_by(sa.column(column))
.having(sa.func.count(sa.column(column)) > 1)
)
# Will - 20210126
# This is a special case that needs to be handled for mysql, where you cannot refer to a temp_table
# more than once in the same query. So instead of passing dup_query as-is, a second temp_table is created with
# just the column we will be performing the expectation on, and the query is performed against it.
if self.sql_engine_dialect.name.lower() == "mysql":
temp_table_name = f"ge_tmp_{str(uuid.uuid4())[:8]}"
temp_table_stmt = "CREATE TEMPORARY TABLE {new_temp_table} AS SELECT tmp.{column_name} FROM {source_table} tmp".format(
new_temp_table=temp_table_name,
source_table=self._table,
column_name=column,
)
self.engine.execute(temp_table_stmt)
dup_query = (
sa.select([sa.column(column)])
.select_from(sa.text(temp_table_name))
.group_by(sa.column(column))
.having(sa.func.count(sa.column(column)) > 1)
)
return sa.column(column).notin_(dup_query)
def _get_dialect_regex_expression(self, column, regex, positive=True):
try:
# postgres
if isinstance(self.sql_engine_dialect, sa.dialects.postgresql.dialect):
if positive:
return BinaryExpression(
sa.column(column), literal(regex), custom_op("~")
)
else:
return BinaryExpression(
sa.column(column), literal(regex), custom_op("!~")
)
except AttributeError:
pass
try:
# redshift
if isinstance(
self.sql_engine_dialect, sqlalchemy_redshift.dialect.RedshiftDialect
):
if positive:
return BinaryExpression(
sa.column(column), literal(regex), custom_op("~")
)
else:
return BinaryExpression(
sa.column(column), literal(regex), custom_op("!~")
)
except (
AttributeError,
TypeError,
): # TypeError can occur if the driver was not installed and so is None
pass
try:
# MySQL
if isinstance(self.sql_engine_dialect, sa.dialects.mysql.dialect):
if positive:
return BinaryExpression(
sa.column(column), literal(regex), custom_op("REGEXP")
)
else:
return BinaryExpression(
sa.column(column), literal(regex), custom_op("NOT REGEXP")
)
except AttributeError:
pass
try:
# Snowflake
if isinstance(
self.sql_engine_dialect,
snowflake.sqlalchemy.snowdialect.SnowflakeDialect,
):
if positive:
return BinaryExpression(
sa.column(column), literal(regex), custom_op("RLIKE")
)
else:
return BinaryExpression(
sa.column(column), literal(regex), custom_op("NOT RLIKE")
)
except (
AttributeError,
TypeError,
): # TypeError can occur if the driver was not installed and so is None
pass
try:
# Bigquery
if isinstance(
self.sql_engine_dialect, pybigquery.sqlalchemy_bigquery.BigQueryDialect
):
if positive:
return sa.func.REGEXP_CONTAINS(sa.column(column), literal(regex))
else:
return sa.not_(
sa.func.REGEXP_CONTAINS(sa.column(column), literal(regex))
)
except (
AttributeError,
TypeError,
): # TypeError can occur if the driver was not installed and so is None
pass
return None
@MetaSqlAlchemyDataset.column_map_expectation
def expect_column_values_to_match_regex(
self,
column,
regex,
mostly=None,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
regex_expression = self._get_dialect_regex_expression(column, regex)
if regex_expression is None:
logger.warning(
"Regex is not supported for dialect %s" % str(self.sql_engine_dialect)
)
raise NotImplementedError
return regex_expression
@MetaSqlAlchemyDataset.column_map_expectation
def expect_column_values_to_not_match_regex(
self,
column,
regex,
mostly=None,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
regex_expression = self._get_dialect_regex_expression(
column, regex, positive=False
)
if regex_expression is None:
logger.warning(
"Regex is not supported for dialect %s" % str(self.sql_engine_dialect)
)
raise NotImplementedError
return regex_expression
@MetaSqlAlchemyDataset.column_map_expectation
def expect_column_values_to_match_regex_list(
self,
column,
regex_list,
match_on="any",
mostly=None,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
if match_on not in ["any", "all"]:
raise ValueError("match_on must be any or all")
if len(regex_list) == 0:
raise ValueError("At least one regex must be supplied in the regex_list.")
regex_expression = self._get_dialect_regex_expression(column, regex_list[0])
if regex_expression is None:
logger.warning(
"Regex is not supported for dialect %s" % str(self.sql_engine_dialect)
)
raise NotImplementedError
if match_on == "any":
condition = sa.or_(
*[
self._get_dialect_regex_expression(column, regex)
for regex in regex_list
]
)
else:
condition = sa.and_(
*[
self._get_dialect_regex_expression(column, regex)
for regex in regex_list
]
)
return condition
@MetaSqlAlchemyDataset.column_map_expectation
def expect_column_values_to_not_match_regex_list(
self,
column,
regex_list,
mostly=None,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
if len(regex_list) == 0:
raise ValueError("At least one regex must be supplied in the regex_list.")
regex_expression = self._get_dialect_regex_expression(
column, regex_list[0], positive=False
)
if regex_expression is None:
logger.warning(
"Regex is not supported for dialect %s" % str(self.sql_engine_dialect)
)
raise NotImplementedError
return sa.and_(
*[
self._get_dialect_regex_expression(column, regex, positive=False)
for regex in regex_list
]
)
def _get_dialect_like_pattern_expression(self, column, like_pattern, positive=True):
dialect_supported: bool = False
try:
# Bigquery
if isinstance(
self.sql_engine_dialect, pybigquery.sqlalchemy_bigquery.BigQueryDialect
):
dialect_supported = True
except (
AttributeError,
TypeError,
): # TypeError can occur if the driver was not installed and so is None
pass
if isinstance(
self.sql_engine_dialect,
(
sa.dialects.sqlite.dialect,
sa.dialects.postgresql.dialect,
sa.dialects.mysql.dialect,
sa.dialects.mssql.dialect,
),
):
dialect_supported = True
try:
if isinstance(
self.sql_engine_dialect, sqlalchemy_redshift.dialect.RedshiftDialect
):
dialect_supported = True
except (AttributeError, TypeError):
pass
if dialect_supported:
try:
if positive:
return sa.column(column).like(literal(like_pattern))
else:
return sa.not_(sa.column(column).like(literal(like_pattern)))
except AttributeError:
pass
return None
@MetaSqlAlchemyDataset.column_map_expectation
def expect_column_values_to_match_like_pattern(
self,
column,
like_pattern,
mostly=None,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
like_pattern_expression = self._get_dialect_like_pattern_expression(
column, like_pattern
)
if like_pattern_expression is None:
logger.warning(
"Like patterns are not supported for dialect %s"
% str(self.sql_engine_dialect)
)
raise NotImplementedError
return like_pattern_expression
@MetaSqlAlchemyDataset.column_map_expectation
def expect_column_values_to_not_match_like_pattern(
self,
column,
like_pattern,
mostly=None,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
like_pattern_expression = self._get_dialect_like_pattern_expression(
column, like_pattern, positive=False
)
if like_pattern_expression is None:
logger.warning(
"Like patterns are not supported for dialect %s"
% str(self.sql_engine_dialect)
)
raise NotImplementedError
return like_pattern_expression
@MetaSqlAlchemyDataset.column_map_expectation
def expect_column_values_to_match_like_pattern_list(
self,
column,
like_pattern_list,
match_on="any",
mostly=None,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
if match_on not in ["any", "all"]:
raise ValueError("match_on must be any or all")
if len(like_pattern_list) == 0:
raise ValueError(
"At least one like_pattern must be supplied in the like_pattern_list."
)
like_pattern_expression = self._get_dialect_like_pattern_expression(
column, like_pattern_list[0]
)
if like_pattern_expression is None:
logger.warning(
"Like patterns are not supported for dialect %s"
% str(self.sql_engine_dialect)
)
raise NotImplementedError
if match_on == "any":
condition = sa.or_(
*[
self._get_dialect_like_pattern_expression(column, like_pattern)
for like_pattern in like_pattern_list
]
)
else:
condition = sa.and_(
*[
self._get_dialect_like_pattern_expression(column, like_pattern)
for like_pattern in like_pattern_list
]
)
return condition
@MetaSqlAlchemyDataset.column_map_expectation
def expect_column_values_to_not_match_like_pattern_list(
self,
column,
like_pattern_list,
mostly=None,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
if len(like_pattern_list) == 0:
raise ValueError(
"At least one like_pattern must be supplied in the like_pattern_list."
)
like_pattern_expression = self._get_dialect_like_pattern_expression(
column, like_pattern_list[0], positive=False
)
if like_pattern_expression is None:
logger.warning(
"Like patterns are not supported for dialect %s"
% str(self.sql_engine_dialect)
)
raise NotImplementedError
return sa.and_(
*[
self._get_dialect_like_pattern_expression(
column, like_pattern, positive=False
)
for like_pattern in like_pattern_list
]
)
| apache-2.0 |
nan86150/ImageFusion | lib/python2.7/site-packages/numpy/lib/function_base.py | 30 | 124613 | from __future__ import division, absolute_import, print_function
import warnings
import sys
import collections
import operator
import numpy as np
import numpy.core.numeric as _nx
from numpy.core import linspace, atleast_1d, atleast_2d
from numpy.core.numeric import (
ones, zeros, arange, concatenate, array, asarray, asanyarray, empty,
empty_like, ndarray, around, floor, ceil, take, dot, where, intp,
integer, isscalar
)
from numpy.core.umath import (
pi, multiply, add, arctan2, frompyfunc, cos, less_equal, sqrt, sin,
mod, exp, log10
)
from numpy.core.fromnumeric import (
ravel, nonzero, sort, partition, mean
)
from numpy.core.numerictypes import typecodes, number
from numpy.lib.twodim_base import diag
from .utils import deprecate
from ._compiled_base import _insert, add_docstring
from ._compiled_base import digitize, bincount, interp as compiled_interp
from ._compiled_base import add_newdoc_ufunc
from numpy.compat import long
# Force range to be a generator, for np.delete's usage.
if sys.version_info[0] < 3:
range = xrange
__all__ = [
'select', 'piecewise', 'trim_zeros', 'copy', 'iterable', 'percentile',
'diff', 'gradient', 'angle', 'unwrap', 'sort_complex', 'disp',
'extract', 'place', 'vectorize', 'asarray_chkfinite', 'average',
'histogram', 'histogramdd', 'bincount', 'digitize', 'cov', 'corrcoef',
'msort', 'median', 'sinc', 'hamming', 'hanning', 'bartlett',
'blackman', 'kaiser', 'trapz', 'i0', 'add_newdoc', 'add_docstring',
'meshgrid', 'delete', 'insert', 'append', 'interp', 'add_newdoc_ufunc'
]
def iterable(y):
"""
Check whether or not an object can be iterated over.
Parameters
----------
y : object
Input object.
Returns
-------
b : {0, 1}
Return 1 if the object has an iterator method or is a sequence,
and 0 otherwise.
Examples
--------
>>> np.iterable([1, 2, 3])
1
>>> np.iterable(2)
0
"""
try:
iter(y)
except:
return 0
return 1
def histogram(a, bins=10, range=None, normed=False, weights=None,
density=None):
"""
Compute the histogram of a set of data.
Parameters
----------
a : array_like
Input data. The histogram is computed over the flattened array.
bins : int or sequence of scalars, optional
If `bins` is an int, it defines the number of equal-width
bins in the given range (10, by default). If `bins` is a sequence,
it defines the bin edges, including the rightmost edge, allowing
for non-uniform bin widths.
range : (float, float), optional
The lower and upper range of the bins. If not provided, range
is simply ``(a.min(), a.max())``. Values outside the range are
ignored.
normed : bool, optional
This keyword is deprecated in Numpy 1.6 due to confusing/buggy
behavior. It will be removed in Numpy 2.0. Use the density keyword
instead.
If False, the result will contain the number of samples
in each bin. If True, the result is the value of the
probability *density* function at the bin, normalized such that
the *integral* over the range is 1. Note that this latter behavior is
known to be buggy with unequal bin widths; use `density` instead.
weights : array_like, optional
An array of weights, of the same shape as `a`. Each value in `a`
only contributes its associated weight towards the bin count
(instead of 1). If `normed` is True, the weights are normalized,
so that the integral of the density over the range remains 1
density : bool, optional
If False, the result will contain the number of samples
in each bin. If True, the result is the value of the
probability *density* function at the bin, normalized such that
the *integral* over the range is 1. Note that the sum of the
histogram values will not be equal to 1 unless bins of unity
width are chosen; it is not a probability *mass* function.
Overrides the `normed` keyword if given.
Returns
-------
hist : array
The values of the histogram. See `normed` and `weights` for a
description of the possible semantics.
bin_edges : array of dtype float
Return the bin edges ``(length(hist)+1)``.
See Also
--------
histogramdd, bincount, searchsorted, digitize
Notes
-----
All but the last (righthand-most) bin is half-open. In other words, if
`bins` is::
[1, 2, 3, 4]
then the first bin is ``[1, 2)`` (including 1, but excluding 2) and the
second ``[2, 3)``. The last bin, however, is ``[3, 4]``, which *includes*
4.
Examples
--------
>>> np.histogram([1, 2, 1], bins=[0, 1, 2, 3])
(array([0, 2, 1]), array([0, 1, 2, 3]))
>>> np.histogram(np.arange(4), bins=np.arange(5), density=True)
(array([ 0.25, 0.25, 0.25, 0.25]), array([0, 1, 2, 3, 4]))
>>> np.histogram([[1, 2, 1], [1, 0, 1]], bins=[0,1,2,3])
(array([1, 4, 1]), array([0, 1, 2, 3]))
>>> a = np.arange(5)
>>> hist, bin_edges = np.histogram(a, density=True)
>>> hist
array([ 0.5, 0. , 0.5, 0. , 0. , 0.5, 0. , 0.5, 0. , 0.5])
>>> hist.sum()
2.4999999999999996
>>> np.sum(hist*np.diff(bin_edges))
1.0
"""
a = asarray(a)
if weights is not None:
weights = asarray(weights)
if np.any(weights.shape != a.shape):
raise ValueError(
'weights should have the same shape as a.')
weights = weights.ravel()
a = a.ravel()
if (range is not None):
mn, mx = range
if (mn > mx):
raise AttributeError(
'max must be larger than min in range parameter.')
if not iterable(bins):
if np.isscalar(bins) and bins < 1:
raise ValueError(
'`bins` should be a positive integer.')
if range is None:
if a.size == 0:
# handle empty arrays. Can't determine range, so use 0-1.
range = (0, 1)
else:
range = (a.min(), a.max())
mn, mx = [mi + 0.0 for mi in range]
if mn == mx:
mn -= 0.5
mx += 0.5
bins = linspace(mn, mx, bins + 1, endpoint=True)
else:
bins = asarray(bins)
if (np.diff(bins) < 0).any():
raise AttributeError(
'bins must increase monotonically.')
# Histogram is an integer or a float array depending on the weights.
if weights is None:
ntype = int
else:
ntype = weights.dtype
n = np.zeros(bins.shape, ntype)
block = 65536
if weights is None:
for i in arange(0, len(a), block):
sa = sort(a[i:i+block])
n += np.r_[sa.searchsorted(bins[:-1], 'left'),
sa.searchsorted(bins[-1], 'right')]
else:
zero = array(0, dtype=ntype)
for i in arange(0, len(a), block):
tmp_a = a[i:i+block]
tmp_w = weights[i:i+block]
sorting_index = np.argsort(tmp_a)
sa = tmp_a[sorting_index]
sw = tmp_w[sorting_index]
cw = np.concatenate(([zero, ], sw.cumsum()))
bin_index = np.r_[sa.searchsorted(bins[:-1], 'left'),
sa.searchsorted(bins[-1], 'right')]
n += cw[bin_index]
n = np.diff(n)
if density is not None:
if density:
db = array(np.diff(bins), float)
return n/db/n.sum(), bins
else:
return n, bins
else:
# deprecated, buggy behavior. Remove for Numpy 2.0
if normed:
db = array(np.diff(bins), float)
return n/(n*db).sum(), bins
else:
return n, bins
def histogramdd(sample, bins=10, range=None, normed=False, weights=None):
"""
Compute the multidimensional histogram of some data.
Parameters
----------
sample : array_like
The data to be histogrammed. It must be an (N,D) array or data
that can be converted to such. The rows of the resulting array
are the coordinates of points in a D dimensional polytope.
bins : sequence or int, optional
The bin specification:
* A sequence of arrays describing the bin edges along each dimension.
* The number of bins for each dimension (nx, ny, ... =bins)
* The number of bins for all dimensions (nx=ny=...=bins).
range : sequence, optional
A sequence of lower and upper bin edges to be used if the edges are
not given explicitly in `bins`. Defaults to the minimum and maximum
values along each dimension.
normed : bool, optional
If False, returns the number of samples in each bin. If True,
returns the bin density ``bin_count / sample_count / bin_volume``.
weights : array_like (N,), optional
An array of values `w_i` weighing each sample `(x_i, y_i, z_i, ...)`.
Weights are normalized to 1 if normed is True. If normed is False,
the values of the returned histogram are equal to the sum of the
weights belonging to the samples falling into each bin.
Returns
-------
H : ndarray
The multidimensional histogram of sample x. See normed and weights
for the different possible semantics.
edges : list
A list of D arrays describing the bin edges for each dimension.
See Also
--------
histogram: 1-D histogram
histogram2d: 2-D histogram
Examples
--------
>>> r = np.random.randn(100,3)
>>> H, edges = np.histogramdd(r, bins = (5, 8, 4))
>>> H.shape, edges[0].size, edges[1].size, edges[2].size
((5, 8, 4), 6, 9, 5)
"""
try:
# Sample is an ND-array.
N, D = sample.shape
except (AttributeError, ValueError):
# Sample is a sequence of 1D arrays.
sample = atleast_2d(sample).T
N, D = sample.shape
nbin = empty(D, int)
edges = D*[None]
dedges = D*[None]
if weights is not None:
weights = asarray(weights)
try:
M = len(bins)
if M != D:
raise AttributeError(
'The dimension of bins must be equal to the dimension of the '
' sample x.')
except TypeError:
# bins is an integer
bins = D*[bins]
# Select range for each dimension
# Used only if number of bins is given.
if range is None:
# Handle empty input. Range can't be determined in that case, use 0-1.
if N == 0:
smin = zeros(D)
smax = ones(D)
else:
smin = atleast_1d(array(sample.min(0), float))
smax = atleast_1d(array(sample.max(0), float))
else:
smin = zeros(D)
smax = zeros(D)
for i in arange(D):
smin[i], smax[i] = range[i]
# Make sure the bins have a finite width.
for i in arange(len(smin)):
if smin[i] == smax[i]:
smin[i] = smin[i] - .5
smax[i] = smax[i] + .5
# avoid rounding issues for comparisons when dealing with inexact types
if np.issubdtype(sample.dtype, np.inexact):
edge_dt = sample.dtype
else:
edge_dt = float
# Create edge arrays
for i in arange(D):
if isscalar(bins[i]):
if bins[i] < 1:
raise ValueError(
"Element at index %s in `bins` should be a positive "
"integer." % i)
nbin[i] = bins[i] + 2 # +2 for outlier bins
edges[i] = linspace(smin[i], smax[i], nbin[i]-1, dtype=edge_dt)
else:
edges[i] = asarray(bins[i], edge_dt)
nbin[i] = len(edges[i]) + 1 # +1 for outlier bins
dedges[i] = diff(edges[i])
if np.any(np.asarray(dedges[i]) <= 0):
raise ValueError(
"Found bin edge of size <= 0. Did you specify `bins` with"
"non-monotonic sequence?")
nbin = asarray(nbin)
# Handle empty input.
if N == 0:
return np.zeros(nbin-2), edges
# Compute the bin number each sample falls into.
Ncount = {}
for i in arange(D):
Ncount[i] = digitize(sample[:, i], edges[i])
# Using digitize, values that fall on an edge are put in the right bin.
# For the rightmost bin, we want values equal to the right edge to be
# counted in the last bin, and not as an outlier.
for i in arange(D):
# Rounding precision
mindiff = dedges[i].min()
if not np.isinf(mindiff):
decimal = int(-log10(mindiff)) + 6
# Find which points are on the rightmost edge.
not_smaller_than_edge = (sample[:, i] >= edges[i][-1])
on_edge = (around(sample[:, i], decimal) ==
around(edges[i][-1], decimal))
# Shift these points one bin to the left.
Ncount[i][where(on_edge & not_smaller_than_edge)[0]] -= 1
# Flattened histogram matrix (1D)
# Reshape is used so that overlarge arrays
# will raise an error.
hist = zeros(nbin, float).reshape(-1)
# Compute the sample indices in the flattened histogram matrix.
ni = nbin.argsort()
xy = zeros(N, int)
for i in arange(0, D-1):
xy += Ncount[ni[i]] * nbin[ni[i+1:]].prod()
xy += Ncount[ni[-1]]
# Compute the number of repetitions in xy and assign it to the
# flattened histmat.
if len(xy) == 0:
return zeros(nbin-2, int), edges
flatcount = bincount(xy, weights)
a = arange(len(flatcount))
hist[a] = flatcount
# Shape into a proper matrix
hist = hist.reshape(sort(nbin))
for i in arange(nbin.size):
j = ni.argsort()[i]
hist = hist.swapaxes(i, j)
ni[i], ni[j] = ni[j], ni[i]
# Remove outliers (indices 0 and -1 for each dimension).
core = D*[slice(1, -1)]
hist = hist[core]
# Normalize if normed is True
if normed:
s = hist.sum()
for i in arange(D):
shape = ones(D, int)
shape[i] = nbin[i] - 2
hist = hist / dedges[i].reshape(shape)
hist /= s
if (hist.shape != nbin - 2).any():
raise RuntimeError(
"Internal Shape Error")
return hist, edges
def average(a, axis=None, weights=None, returned=False):
"""
Compute the weighted average along the specified axis.
Parameters
----------
a : array_like
Array containing data to be averaged. If `a` is not an array, a
conversion is attempted.
axis : int, optional
Axis along which to average `a`. If `None`, averaging is done over
the flattened array.
weights : array_like, optional
An array of weights associated with the values in `a`. Each value in
`a` contributes to the average according to its associated weight.
The weights array can either be 1-D (in which case its length must be
the size of `a` along the given axis) or of the same shape as `a`.
If `weights=None`, then all data in `a` are assumed to have a
weight equal to one.
returned : bool, optional
Default is `False`. If `True`, the tuple (`average`, `sum_of_weights`)
is returned, otherwise only the average is returned.
If `weights=None`, `sum_of_weights` is equivalent to the number of
elements over which the average is taken.
Returns
-------
average, [sum_of_weights] : {array_type, double}
Return the average along the specified axis. When returned is `True`,
return a tuple with the average as the first element and the sum
of the weights as the second element. The return type is `Float`
if `a` is of integer type, otherwise it is of the same type as `a`.
`sum_of_weights` is of the same type as `average`.
Raises
------
ZeroDivisionError
When all weights along axis are zero. See `numpy.ma.average` for a
version robust to this type of error.
TypeError
When the length of 1D `weights` is not the same as the shape of `a`
along axis.
See Also
--------
mean
ma.average : average for masked arrays -- useful if your data contains
"missing" values
Examples
--------
>>> data = range(1,5)
>>> data
[1, 2, 3, 4]
>>> np.average(data)
2.5
>>> np.average(range(1,11), weights=range(10,0,-1))
4.0
>>> data = np.arange(6).reshape((3,2))
>>> data
array([[0, 1],
[2, 3],
[4, 5]])
>>> np.average(data, axis=1, weights=[1./4, 3./4])
array([ 0.75, 2.75, 4.75])
>>> np.average(data, weights=[1./4, 3./4])
Traceback (most recent call last):
...
TypeError: Axis must be specified when shapes of a and weights differ.
"""
if not isinstance(a, np.matrix):
a = np.asarray(a)
if weights is None:
avg = a.mean(axis)
scl = avg.dtype.type(a.size/avg.size)
else:
a = a + 0.0
wgt = np.array(weights, dtype=a.dtype, copy=0)
# Sanity checks
if a.shape != wgt.shape:
if axis is None:
raise TypeError(
"Axis must be specified when shapes of a and weights "
"differ.")
if wgt.ndim != 1:
raise TypeError(
"1D weights expected when shapes of a and weights differ.")
if wgt.shape[0] != a.shape[axis]:
raise ValueError(
"Length of weights not compatible with specified axis.")
# setup wgt to broadcast along axis
wgt = np.array(wgt, copy=0, ndmin=a.ndim).swapaxes(-1, axis)
scl = wgt.sum(axis=axis)
if (scl == 0.0).any():
raise ZeroDivisionError(
"Weights sum to zero, can't be normalized")
avg = np.multiply(a, wgt).sum(axis)/scl
if returned:
scl = np.multiply(avg, 0) + scl
return avg, scl
else:
return avg
def asarray_chkfinite(a, dtype=None, order=None):
"""
Convert the input to an array, checking for NaNs or Infs.
Parameters
----------
a : array_like
Input data, in any form that can be converted to an array. This
includes lists, lists of tuples, tuples, tuples of tuples, tuples
of lists and ndarrays. Success requires no NaNs or Infs.
dtype : data-type, optional
By default, the data-type is inferred from the input data.
order : {'C', 'F'}, optional
Whether to use row-major ('C') or column-major ('FORTRAN') memory
representation. Defaults to 'C'.
Returns
-------
out : ndarray
Array interpretation of `a`. No copy is performed if the input
is already an ndarray. If `a` is a subclass of ndarray, a base
class ndarray is returned.
Raises
------
ValueError
Raises ValueError if `a` contains NaN (Not a Number) or Inf (Infinity).
See Also
--------
asarray : Create and array.
asanyarray : Similar function which passes through subclasses.
ascontiguousarray : Convert input to a contiguous array.
asfarray : Convert input to a floating point ndarray.
asfortranarray : Convert input to an ndarray with column-major
memory order.
fromiter : Create an array from an iterator.
fromfunction : Construct an array by executing a function on grid
positions.
Examples
--------
Convert a list into an array. If all elements are finite
``asarray_chkfinite`` is identical to ``asarray``.
>>> a = [1, 2]
>>> np.asarray_chkfinite(a, dtype=float)
array([1., 2.])
Raises ValueError if array_like contains Nans or Infs.
>>> a = [1, 2, np.inf]
>>> try:
... np.asarray_chkfinite(a)
... except ValueError:
... print 'ValueError'
...
ValueError
"""
a = asarray(a, dtype=dtype, order=order)
if a.dtype.char in typecodes['AllFloat'] and not np.isfinite(a).all():
raise ValueError(
"array must not contain infs or NaNs")
return a
def piecewise(x, condlist, funclist, *args, **kw):
"""
Evaluate a piecewise-defined function.
Given a set of conditions and corresponding functions, evaluate each
function on the input data wherever its condition is true.
Parameters
----------
x : ndarray
The input domain.
condlist : list of bool arrays
Each boolean array corresponds to a function in `funclist`. Wherever
`condlist[i]` is True, `funclist[i](x)` is used as the output value.
Each boolean array in `condlist` selects a piece of `x`,
and should therefore be of the same shape as `x`.
The length of `condlist` must correspond to that of `funclist`.
If one extra function is given, i.e. if
``len(funclist) - len(condlist) == 1``, then that extra function
is the default value, used wherever all conditions are false.
funclist : list of callables, f(x,*args,**kw), or scalars
Each function is evaluated over `x` wherever its corresponding
condition is True. It should take an array as input and give an array
or a scalar value as output. If, instead of a callable,
a scalar is provided then a constant function (``lambda x: scalar``) is
assumed.
args : tuple, optional
Any further arguments given to `piecewise` are passed to the functions
upon execution, i.e., if called ``piecewise(..., ..., 1, 'a')``, then
each function is called as ``f(x, 1, 'a')``.
kw : dict, optional
Keyword arguments used in calling `piecewise` are passed to the
functions upon execution, i.e., if called
``piecewise(..., ..., lambda=1)``, then each function is called as
``f(x, lambda=1)``.
Returns
-------
out : ndarray
The output is the same shape and type as x and is found by
calling the functions in `funclist` on the appropriate portions of `x`,
as defined by the boolean arrays in `condlist`. Portions not covered
by any condition have a default value of 0.
See Also
--------
choose, select, where
Notes
-----
This is similar to choose or select, except that functions are
evaluated on elements of `x` that satisfy the corresponding condition from
`condlist`.
The result is::
|--
|funclist[0](x[condlist[0]])
out = |funclist[1](x[condlist[1]])
|...
|funclist[n2](x[condlist[n2]])
|--
Examples
--------
Define the sigma function, which is -1 for ``x < 0`` and +1 for ``x >= 0``.
>>> x = np.linspace(-2.5, 2.5, 6)
>>> np.piecewise(x, [x < 0, x >= 0], [-1, 1])
array([-1., -1., -1., 1., 1., 1.])
Define the absolute value, which is ``-x`` for ``x <0`` and ``x`` for
``x >= 0``.
>>> np.piecewise(x, [x < 0, x >= 0], [lambda x: -x, lambda x: x])
array([ 2.5, 1.5, 0.5, 0.5, 1.5, 2.5])
"""
x = asanyarray(x)
n2 = len(funclist)
if (isscalar(condlist) or not (isinstance(condlist[0], list) or
isinstance(condlist[0], ndarray))):
condlist = [condlist]
condlist = array(condlist, dtype=bool)
n = len(condlist)
# This is a hack to work around problems with NumPy's
# handling of 0-d arrays and boolean indexing with
# numpy.bool_ scalars
zerod = False
if x.ndim == 0:
x = x[None]
zerod = True
if condlist.shape[-1] != 1:
condlist = condlist.T
if n == n2 - 1: # compute the "otherwise" condition.
totlist = np.logical_or.reduce(condlist, axis=0)
condlist = np.vstack([condlist, ~totlist])
n += 1
if (n != n2):
raise ValueError(
"function list and condition list must be the same")
y = zeros(x.shape, x.dtype)
for k in range(n):
item = funclist[k]
if not isinstance(item, collections.Callable):
y[condlist[k]] = item
else:
vals = x[condlist[k]]
if vals.size > 0:
y[condlist[k]] = item(vals, *args, **kw)
if zerod:
y = y.squeeze()
return y
def select(condlist, choicelist, default=0):
"""
Return an array drawn from elements in choicelist, depending on conditions.
Parameters
----------
condlist : list of bool ndarrays
The list of conditions which determine from which array in `choicelist`
the output elements are taken. When multiple conditions are satisfied,
the first one encountered in `condlist` is used.
choicelist : list of ndarrays
The list of arrays from which the output elements are taken. It has
to be of the same length as `condlist`.
default : scalar, optional
The element inserted in `output` when all conditions evaluate to False.
Returns
-------
output : ndarray
The output at position m is the m-th element of the array in
`choicelist` where the m-th element of the corresponding array in
`condlist` is True.
See Also
--------
where : Return elements from one of two arrays depending on condition.
take, choose, compress, diag, diagonal
Examples
--------
>>> x = np.arange(10)
>>> condlist = [x<3, x>5]
>>> choicelist = [x, x**2]
>>> np.select(condlist, choicelist)
array([ 0, 1, 2, 0, 0, 0, 36, 49, 64, 81])
"""
# Check the size of condlist and choicelist are the same, or abort.
if len(condlist) != len(choicelist):
raise ValueError(
'list of cases must be same length as list of conditions')
# Now that the dtype is known, handle the deprecated select([], []) case
if len(condlist) == 0:
warnings.warn("select with an empty condition list is not possible"
"and will be deprecated",
DeprecationWarning)
return np.asarray(default)[()]
choicelist = [np.asarray(choice) for choice in choicelist]
choicelist.append(np.asarray(default))
# need to get the result type before broadcasting for correct scalar
# behaviour
dtype = np.result_type(*choicelist)
# Convert conditions to arrays and broadcast conditions and choices
# as the shape is needed for the result. Doing it seperatly optimizes
# for example when all choices are scalars.
condlist = np.broadcast_arrays(*condlist)
choicelist = np.broadcast_arrays(*choicelist)
# If cond array is not an ndarray in boolean format or scalar bool, abort.
deprecated_ints = False
for i in range(len(condlist)):
cond = condlist[i]
if cond.dtype.type is not np.bool_:
if np.issubdtype(cond.dtype, np.integer):
# A previous implementation accepted int ndarrays accidentally.
# Supported here deliberately, but deprecated.
condlist[i] = condlist[i].astype(bool)
deprecated_ints = True
else:
raise ValueError(
'invalid entry in choicelist: should be boolean ndarray')
if deprecated_ints:
msg = "select condlists containing integer ndarrays is deprecated " \
"and will be removed in the future. Use `.astype(bool)` to " \
"convert to bools."
warnings.warn(msg, DeprecationWarning)
if choicelist[0].ndim == 0:
# This may be common, so avoid the call.
result_shape = condlist[0].shape
else:
result_shape = np.broadcast_arrays(condlist[0], choicelist[0])[0].shape
result = np.full(result_shape, choicelist[-1], dtype)
# Use np.copyto to burn each choicelist array onto result, using the
# corresponding condlist as a boolean mask. This is done in reverse
# order since the first choice should take precedence.
choicelist = choicelist[-2::-1]
condlist = condlist[::-1]
for choice, cond in zip(choicelist, condlist):
np.copyto(result, choice, where=cond)
return result
def copy(a, order='K'):
"""
Return an array copy of the given object.
Parameters
----------
a : array_like
Input data.
order : {'C', 'F', 'A', 'K'}, optional
Controls the memory layout of the copy. 'C' means C-order,
'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous,
'C' otherwise. 'K' means match the layout of `a` as closely
as possible. (Note that this function and :meth:ndarray.copy are very
similar, but have different default values for their order=
arguments.)
Returns
-------
arr : ndarray
Array interpretation of `a`.
Notes
-----
This is equivalent to
>>> np.array(a, copy=True) #doctest: +SKIP
Examples
--------
Create an array x, with a reference y and a copy z:
>>> x = np.array([1, 2, 3])
>>> y = x
>>> z = np.copy(x)
Note that, when we modify x, y changes, but not z:
>>> x[0] = 10
>>> x[0] == y[0]
True
>>> x[0] == z[0]
False
"""
return array(a, order=order, copy=True)
# Basic operations
def gradient(f, *varargs, **kwargs):
"""
Return the gradient of an N-dimensional array.
The gradient is computed using second order accurate central differences
in the interior and either first differences or second order accurate
one-sides (forward or backwards) differences at the boundaries. The
returned gradient hence has the same shape as the input array.
Parameters
----------
f : array_like
An N-dimensional array containing samples of a scalar function.
varargs : list of scalar, optional
N scalars specifying the sample distances for each dimension,
i.e. `dx`, `dy`, `dz`, ... Default distance: 1.
edge_order : {1, 2}, optional
Gradient is calculated using N\ :sup:`th` order accurate differences
at the boundaries. Default: 1.
.. versionadded:: 1.9.1
Returns
-------
gradient : ndarray
N arrays of the same shape as `f` giving the derivative of `f` with
respect to each dimension.
Examples
--------
>>> x = np.array([1, 2, 4, 7, 11, 16], dtype=np.float)
>>> np.gradient(x)
array([ 1. , 1.5, 2.5, 3.5, 4.5, 5. ])
>>> np.gradient(x, 2)
array([ 0.5 , 0.75, 1.25, 1.75, 2.25, 2.5 ])
>>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=np.float))
[array([[ 2., 2., -1.],
[ 2., 2., -1.]]), array([[ 1. , 2.5, 4. ],
[ 1. , 1. , 1. ]])]
>>> x = np.array([0, 1, 2, 3, 4])
>>> dx = np.gradient(x)
>>> y = x**2
>>> np.gradient(y, dx, edge_order=2)
array([-0., 2., 4., 6., 8.])
"""
f = np.asanyarray(f)
N = len(f.shape) # number of dimensions
n = len(varargs)
if n == 0:
dx = [1.0]*N
elif n == 1:
dx = [varargs[0]]*N
elif n == N:
dx = list(varargs)
else:
raise SyntaxError(
"invalid number of arguments")
edge_order = kwargs.pop('edge_order', 1)
if kwargs:
raise TypeError('"{}" are not valid keyword arguments.'.format(
'", "'.join(kwargs.keys())))
if edge_order > 2:
raise ValueError("'edge_order' greater than 2 not supported")
# use central differences on interior and one-sided differences on the
# endpoints. This preserves second order-accuracy over the full domain.
outvals = []
# create slice objects --- initially all are [:, :, ..., :]
slice1 = [slice(None)]*N
slice2 = [slice(None)]*N
slice3 = [slice(None)]*N
slice4 = [slice(None)]*N
otype = f.dtype.char
if otype not in ['f', 'd', 'F', 'D', 'm', 'M']:
otype = 'd'
# Difference of datetime64 elements results in timedelta64
if otype == 'M':
# Need to use the full dtype name because it contains unit information
otype = f.dtype.name.replace('datetime', 'timedelta')
elif otype == 'm':
# Needs to keep the specific units, can't be a general unit
otype = f.dtype
# Convert datetime64 data into ints. Make dummy variable `y`
# that is a view of ints if the data is datetime64, otherwise
# just set y equal to the the array `f`.
if f.dtype.char in ["M", "m"]:
y = f.view('int64')
else:
y = f
for axis in range(N):
if y.shape[axis] < 2:
raise ValueError(
"Shape of array too small to calculate a numerical gradient, "
"at least two elements are required.")
# Numerical differentiation: 1st order edges, 2nd order interior
if y.shape[axis] == 2 or edge_order == 1:
# Use first order differences for time data
out = np.empty_like(y, dtype=otype)
slice1[axis] = slice(1, -1)
slice2[axis] = slice(2, None)
slice3[axis] = slice(None, -2)
# 1D equivalent -- out[1:-1] = (y[2:] - y[:-2])/2.0
out[slice1] = (y[slice2] - y[slice3])/2.0
slice1[axis] = 0
slice2[axis] = 1
slice3[axis] = 0
# 1D equivalent -- out[0] = (y[1] - y[0])
out[slice1] = (y[slice2] - y[slice3])
slice1[axis] = -1
slice2[axis] = -1
slice3[axis] = -2
# 1D equivalent -- out[-1] = (y[-1] - y[-2])
out[slice1] = (y[slice2] - y[slice3])
# Numerical differentiation: 2st order edges, 2nd order interior
else:
# Use second order differences where possible
out = np.empty_like(y, dtype=otype)
slice1[axis] = slice(1, -1)
slice2[axis] = slice(2, None)
slice3[axis] = slice(None, -2)
# 1D equivalent -- out[1:-1] = (y[2:] - y[:-2])/2.0
out[slice1] = (y[slice2] - y[slice3])/2.0
slice1[axis] = 0
slice2[axis] = 0
slice3[axis] = 1
slice4[axis] = 2
# 1D equivalent -- out[0] = -(3*y[0] - 4*y[1] + y[2]) / 2.0
out[slice1] = -(3.0*y[slice2] - 4.0*y[slice3] + y[slice4])/2.0
slice1[axis] = -1
slice2[axis] = -1
slice3[axis] = -2
slice4[axis] = -3
# 1D equivalent -- out[-1] = (3*y[-1] - 4*y[-2] + y[-3])
out[slice1] = (3.0*y[slice2] - 4.0*y[slice3] + y[slice4])/2.0
# divide by step size
out /= dx[axis]
outvals.append(out)
# reset the slice object in this dimension to ":"
slice1[axis] = slice(None)
slice2[axis] = slice(None)
slice3[axis] = slice(None)
slice4[axis] = slice(None)
if N == 1:
return outvals[0]
else:
return outvals
def diff(a, n=1, axis=-1):
"""
Calculate the n-th order discrete difference along given axis.
The first order difference is given by ``out[n] = a[n+1] - a[n]`` along
the given axis, higher order differences are calculated by using `diff`
recursively.
Parameters
----------
a : array_like
Input array
n : int, optional
The number of times values are differenced.
axis : int, optional
The axis along which the difference is taken, default is the last axis.
Returns
-------
diff : ndarray
The `n` order differences. The shape of the output is the same as `a`
except along `axis` where the dimension is smaller by `n`.
See Also
--------
gradient, ediff1d, cumsum
Examples
--------
>>> x = np.array([1, 2, 4, 7, 0])
>>> np.diff(x)
array([ 1, 2, 3, -7])
>>> np.diff(x, n=2)
array([ 1, 1, -10])
>>> x = np.array([[1, 3, 6, 10], [0, 5, 6, 8]])
>>> np.diff(x)
array([[2, 3, 4],
[5, 1, 2]])
>>> np.diff(x, axis=0)
array([[-1, 2, 0, -2]])
"""
if n == 0:
return a
if n < 0:
raise ValueError(
"order must be non-negative but got " + repr(n))
a = asanyarray(a)
nd = len(a.shape)
slice1 = [slice(None)]*nd
slice2 = [slice(None)]*nd
slice1[axis] = slice(1, None)
slice2[axis] = slice(None, -1)
slice1 = tuple(slice1)
slice2 = tuple(slice2)
if n > 1:
return diff(a[slice1]-a[slice2], n-1, axis=axis)
else:
return a[slice1]-a[slice2]
def interp(x, xp, fp, left=None, right=None):
"""
One-dimensional linear interpolation.
Returns the one-dimensional piecewise linear interpolant to a function
with given values at discrete data-points.
Parameters
----------
x : array_like
The x-coordinates of the interpolated values.
xp : 1-D sequence of floats
The x-coordinates of the data points, must be increasing.
fp : 1-D sequence of floats
The y-coordinates of the data points, same length as `xp`.
left : float, optional
Value to return for `x < xp[0]`, default is `fp[0]`.
right : float, optional
Value to return for `x > xp[-1]`, default is `fp[-1]`.
Returns
-------
y : {float, ndarray}
The interpolated values, same shape as `x`.
Raises
------
ValueError
If `xp` and `fp` have different length
Notes
-----
Does not check that the x-coordinate sequence `xp` is increasing.
If `xp` is not increasing, the results are nonsense.
A simple check for increasing is::
np.all(np.diff(xp) > 0)
Examples
--------
>>> xp = [1, 2, 3]
>>> fp = [3, 2, 0]
>>> np.interp(2.5, xp, fp)
1.0
>>> np.interp([0, 1, 1.5, 2.72, 3.14], xp, fp)
array([ 3. , 3. , 2.5 , 0.56, 0. ])
>>> UNDEF = -99.0
>>> np.interp(3.14, xp, fp, right=UNDEF)
-99.0
Plot an interpolant to the sine function:
>>> x = np.linspace(0, 2*np.pi, 10)
>>> y = np.sin(x)
>>> xvals = np.linspace(0, 2*np.pi, 50)
>>> yinterp = np.interp(xvals, x, y)
>>> import matplotlib.pyplot as plt
>>> plt.plot(x, y, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.plot(xvals, yinterp, '-x')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.show()
"""
if isinstance(x, (float, int, number)):
return compiled_interp([x], xp, fp, left, right).item()
elif isinstance(x, np.ndarray) and x.ndim == 0:
return compiled_interp([x], xp, fp, left, right).item()
else:
return compiled_interp(x, xp, fp, left, right)
def angle(z, deg=0):
"""
Return the angle of the complex argument.
Parameters
----------
z : array_like
A complex number or sequence of complex numbers.
deg : bool, optional
Return angle in degrees if True, radians if False (default).
Returns
-------
angle : {ndarray, scalar}
The counterclockwise angle from the positive real axis on
the complex plane, with dtype as numpy.float64.
See Also
--------
arctan2
absolute
Examples
--------
>>> np.angle([1.0, 1.0j, 1+1j]) # in radians
array([ 0. , 1.57079633, 0.78539816])
>>> np.angle(1+1j, deg=True) # in degrees
45.0
"""
if deg:
fact = 180/pi
else:
fact = 1.0
z = asarray(z)
if (issubclass(z.dtype.type, _nx.complexfloating)):
zimag = z.imag
zreal = z.real
else:
zimag = 0
zreal = z
return arctan2(zimag, zreal) * fact
def unwrap(p, discont=pi, axis=-1):
"""
Unwrap by changing deltas between values to 2*pi complement.
Unwrap radian phase `p` by changing absolute jumps greater than
`discont` to their 2*pi complement along the given axis.
Parameters
----------
p : array_like
Input array.
discont : float, optional
Maximum discontinuity between values, default is ``pi``.
axis : int, optional
Axis along which unwrap will operate, default is the last axis.
Returns
-------
out : ndarray
Output array.
See Also
--------
rad2deg, deg2rad
Notes
-----
If the discontinuity in `p` is smaller than ``pi``, but larger than
`discont`, no unwrapping is done because taking the 2*pi complement
would only make the discontinuity larger.
Examples
--------
>>> phase = np.linspace(0, np.pi, num=5)
>>> phase[3:] += np.pi
>>> phase
array([ 0. , 0.78539816, 1.57079633, 5.49778714, 6.28318531])
>>> np.unwrap(phase)
array([ 0. , 0.78539816, 1.57079633, -0.78539816, 0. ])
"""
p = asarray(p)
nd = len(p.shape)
dd = diff(p, axis=axis)
slice1 = [slice(None, None)]*nd # full slices
slice1[axis] = slice(1, None)
ddmod = mod(dd + pi, 2*pi) - pi
_nx.copyto(ddmod, pi, where=(ddmod == -pi) & (dd > 0))
ph_correct = ddmod - dd
_nx.copyto(ph_correct, 0, where=abs(dd) < discont)
up = array(p, copy=True, dtype='d')
up[slice1] = p[slice1] + ph_correct.cumsum(axis)
return up
def sort_complex(a):
"""
Sort a complex array using the real part first, then the imaginary part.
Parameters
----------
a : array_like
Input array
Returns
-------
out : complex ndarray
Always returns a sorted complex array.
Examples
--------
>>> np.sort_complex([5, 3, 6, 2, 1])
array([ 1.+0.j, 2.+0.j, 3.+0.j, 5.+0.j, 6.+0.j])
>>> np.sort_complex([1 + 2j, 2 - 1j, 3 - 2j, 3 - 3j, 3 + 5j])
array([ 1.+2.j, 2.-1.j, 3.-3.j, 3.-2.j, 3.+5.j])
"""
b = array(a, copy=True)
b.sort()
if not issubclass(b.dtype.type, _nx.complexfloating):
if b.dtype.char in 'bhBH':
return b.astype('F')
elif b.dtype.char == 'g':
return b.astype('G')
else:
return b.astype('D')
else:
return b
def trim_zeros(filt, trim='fb'):
"""
Trim the leading and/or trailing zeros from a 1-D array or sequence.
Parameters
----------
filt : 1-D array or sequence
Input array.
trim : str, optional
A string with 'f' representing trim from front and 'b' to trim from
back. Default is 'fb', trim zeros from both front and back of the
array.
Returns
-------
trimmed : 1-D array or sequence
The result of trimming the input. The input data type is preserved.
Examples
--------
>>> a = np.array((0, 0, 0, 1, 2, 3, 0, 2, 1, 0))
>>> np.trim_zeros(a)
array([1, 2, 3, 0, 2, 1])
>>> np.trim_zeros(a, 'b')
array([0, 0, 0, 1, 2, 3, 0, 2, 1])
The input data type is preserved, list/tuple in means list/tuple out.
>>> np.trim_zeros([0, 1, 2, 0])
[1, 2]
"""
first = 0
trim = trim.upper()
if 'F' in trim:
for i in filt:
if i != 0.:
break
else:
first = first + 1
last = len(filt)
if 'B' in trim:
for i in filt[::-1]:
if i != 0.:
break
else:
last = last - 1
return filt[first:last]
@deprecate
def unique(x):
"""
This function is deprecated. Use numpy.lib.arraysetops.unique()
instead.
"""
try:
tmp = x.flatten()
if tmp.size == 0:
return tmp
tmp.sort()
idx = concatenate(([True], tmp[1:] != tmp[:-1]))
return tmp[idx]
except AttributeError:
items = sorted(set(x))
return asarray(items)
def extract(condition, arr):
"""
Return the elements of an array that satisfy some condition.
This is equivalent to ``np.compress(ravel(condition), ravel(arr))``. If
`condition` is boolean ``np.extract`` is equivalent to ``arr[condition]``.
Parameters
----------
condition : array_like
An array whose nonzero or True entries indicate the elements of `arr`
to extract.
arr : array_like
Input array of the same size as `condition`.
Returns
-------
extract : ndarray
Rank 1 array of values from `arr` where `condition` is True.
See Also
--------
take, put, copyto, compress
Examples
--------
>>> arr = np.arange(12).reshape((3, 4))
>>> arr
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> condition = np.mod(arr, 3)==0
>>> condition
array([[ True, False, False, True],
[False, False, True, False],
[False, True, False, False]], dtype=bool)
>>> np.extract(condition, arr)
array([0, 3, 6, 9])
If `condition` is boolean:
>>> arr[condition]
array([0, 3, 6, 9])
"""
return _nx.take(ravel(arr), nonzero(ravel(condition))[0])
def place(arr, mask, vals):
"""
Change elements of an array based on conditional and input values.
Similar to ``np.copyto(arr, vals, where=mask)``, the difference is that
`place` uses the first N elements of `vals`, where N is the number of
True values in `mask`, while `copyto` uses the elements where `mask`
is True.
Note that `extract` does the exact opposite of `place`.
Parameters
----------
arr : array_like
Array to put data into.
mask : array_like
Boolean mask array. Must have the same size as `a`.
vals : 1-D sequence
Values to put into `a`. Only the first N elements are used, where
N is the number of True values in `mask`. If `vals` is smaller
than N it will be repeated.
See Also
--------
copyto, put, take, extract
Examples
--------
>>> arr = np.arange(6).reshape(2, 3)
>>> np.place(arr, arr>2, [44, 55])
>>> arr
array([[ 0, 1, 2],
[44, 55, 44]])
"""
return _insert(arr, mask, vals)
def disp(mesg, device=None, linefeed=True):
"""
Display a message on a device.
Parameters
----------
mesg : str
Message to display.
device : object
Device to write message. If None, defaults to ``sys.stdout`` which is
very similar to ``print``. `device` needs to have ``write()`` and
``flush()`` methods.
linefeed : bool, optional
Option whether to print a line feed or not. Defaults to True.
Raises
------
AttributeError
If `device` does not have a ``write()`` or ``flush()`` method.
Examples
--------
Besides ``sys.stdout``, a file-like object can also be used as it has
both required methods:
>>> from StringIO import StringIO
>>> buf = StringIO()
>>> np.disp('"Display" in a file', device=buf)
>>> buf.getvalue()
'"Display" in a file\\n'
"""
if device is None:
device = sys.stdout
if linefeed:
device.write('%s\n' % mesg)
else:
device.write('%s' % mesg)
device.flush()
return
class vectorize(object):
"""
vectorize(pyfunc, otypes='', doc=None, excluded=None, cache=False)
Generalized function class.
Define a vectorized function which takes a nested sequence
of objects or numpy arrays as inputs and returns a
numpy array as output. The vectorized function evaluates `pyfunc` over
successive tuples of the input arrays like the python map function,
except it uses the broadcasting rules of numpy.
The data type of the output of `vectorized` is determined by calling
the function with the first element of the input. This can be avoided
by specifying the `otypes` argument.
Parameters
----------
pyfunc : callable
A python function or method.
otypes : str or list of dtypes, optional
The output data type. It must be specified as either a string of
typecode characters or a list of data type specifiers. There should
be one data type specifier for each output.
doc : str, optional
The docstring for the function. If `None`, the docstring will be the
``pyfunc.__doc__``.
excluded : set, optional
Set of strings or integers representing the positional or keyword
arguments for which the function will not be vectorized. These will be
passed directly to `pyfunc` unmodified.
.. versionadded:: 1.7.0
cache : bool, optional
If `True`, then cache the first function call that determines the number
of outputs if `otypes` is not provided.
.. versionadded:: 1.7.0
Returns
-------
vectorized : callable
Vectorized function.
Examples
--------
>>> def myfunc(a, b):
... "Return a-b if a>b, otherwise return a+b"
... if a > b:
... return a - b
... else:
... return a + b
>>> vfunc = np.vectorize(myfunc)
>>> vfunc([1, 2, 3, 4], 2)
array([3, 4, 1, 2])
The docstring is taken from the input function to `vectorize` unless it
is specified
>>> vfunc.__doc__
'Return a-b if a>b, otherwise return a+b'
>>> vfunc = np.vectorize(myfunc, doc='Vectorized `myfunc`')
>>> vfunc.__doc__
'Vectorized `myfunc`'
The output type is determined by evaluating the first element of the input,
unless it is specified
>>> out = vfunc([1, 2, 3, 4], 2)
>>> type(out[0])
<type 'numpy.int32'>
>>> vfunc = np.vectorize(myfunc, otypes=[np.float])
>>> out = vfunc([1, 2, 3, 4], 2)
>>> type(out[0])
<type 'numpy.float64'>
The `excluded` argument can be used to prevent vectorizing over certain
arguments. This can be useful for array-like arguments of a fixed length
such as the coefficients for a polynomial as in `polyval`:
>>> def mypolyval(p, x):
... _p = list(p)
... res = _p.pop(0)
... while _p:
... res = res*x + _p.pop(0)
... return res
>>> vpolyval = np.vectorize(mypolyval, excluded=['p'])
>>> vpolyval(p=[1, 2, 3], x=[0, 1])
array([3, 6])
Positional arguments may also be excluded by specifying their position:
>>> vpolyval.excluded.add(0)
>>> vpolyval([1, 2, 3], x=[0, 1])
array([3, 6])
Notes
-----
The `vectorize` function is provided primarily for convenience, not for
performance. The implementation is essentially a for loop.
If `otypes` is not specified, then a call to the function with the
first argument will be used to determine the number of outputs. The
results of this call will be cached if `cache` is `True` to prevent
calling the function twice. However, to implement the cache, the
original function must be wrapped which will slow down subsequent
calls, so only do this if your function is expensive.
The new keyword argument interface and `excluded` argument support
further degrades performance.
"""
def __init__(self, pyfunc, otypes='', doc=None, excluded=None,
cache=False):
self.pyfunc = pyfunc
self.cache = cache
self._ufunc = None # Caching to improve default performance
if doc is None:
self.__doc__ = pyfunc.__doc__
else:
self.__doc__ = doc
if isinstance(otypes, str):
self.otypes = otypes
for char in self.otypes:
if char not in typecodes['All']:
raise ValueError(
"Invalid otype specified: %s" % (char,))
elif iterable(otypes):
self.otypes = ''.join([_nx.dtype(x).char for x in otypes])
else:
raise ValueError(
"Invalid otype specification")
# Excluded variable support
if excluded is None:
excluded = set()
self.excluded = set(excluded)
def __call__(self, *args, **kwargs):
"""
Return arrays with the results of `pyfunc` broadcast (vectorized) over
`args` and `kwargs` not in `excluded`.
"""
excluded = self.excluded
if not kwargs and not excluded:
func = self.pyfunc
vargs = args
else:
# The wrapper accepts only positional arguments: we use `names` and
# `inds` to mutate `the_args` and `kwargs` to pass to the original
# function.
nargs = len(args)
names = [_n for _n in kwargs if _n not in excluded]
inds = [_i for _i in range(nargs) if _i not in excluded]
the_args = list(args)
def func(*vargs):
for _n, _i in enumerate(inds):
the_args[_i] = vargs[_n]
kwargs.update(zip(names, vargs[len(inds):]))
return self.pyfunc(*the_args, **kwargs)
vargs = [args[_i] for _i in inds]
vargs.extend([kwargs[_n] for _n in names])
return self._vectorize_call(func=func, args=vargs)
def _get_ufunc_and_otypes(self, func, args):
"""Return (ufunc, otypes)."""
# frompyfunc will fail if args is empty
if not args:
raise ValueError('args can not be empty')
if self.otypes:
otypes = self.otypes
nout = len(otypes)
# Note logic here: We only *use* self._ufunc if func is self.pyfunc
# even though we set self._ufunc regardless.
if func is self.pyfunc and self._ufunc is not None:
ufunc = self._ufunc
else:
ufunc = self._ufunc = frompyfunc(func, len(args), nout)
else:
# Get number of outputs and output types by calling the function on
# the first entries of args. We also cache the result to prevent
# the subsequent call when the ufunc is evaluated.
# Assumes that ufunc first evaluates the 0th elements in the input
# arrays (the input values are not checked to ensure this)
inputs = [asarray(_a).flat[0] for _a in args]
outputs = func(*inputs)
# Performance note: profiling indicates that -- for simple
# functions at least -- this wrapping can almost double the
# execution time.
# Hence we make it optional.
if self.cache:
_cache = [outputs]
def _func(*vargs):
if _cache:
return _cache.pop()
else:
return func(*vargs)
else:
_func = func
if isinstance(outputs, tuple):
nout = len(outputs)
else:
nout = 1
outputs = (outputs,)
otypes = ''.join([asarray(outputs[_k]).dtype.char
for _k in range(nout)])
# Performance note: profiling indicates that creating the ufunc is
# not a significant cost compared with wrapping so it seems not
# worth trying to cache this.
ufunc = frompyfunc(_func, len(args), nout)
return ufunc, otypes
def _vectorize_call(self, func, args):
"""Vectorized call to `func` over positional `args`."""
if not args:
_res = func()
else:
ufunc, otypes = self._get_ufunc_and_otypes(func=func, args=args)
# Convert args to object arrays first
inputs = [array(_a, copy=False, subok=True, dtype=object)
for _a in args]
outputs = ufunc(*inputs)
if ufunc.nout == 1:
_res = array(outputs,
copy=False, subok=True, dtype=otypes[0])
else:
_res = tuple([array(_x, copy=False, subok=True, dtype=_t)
for _x, _t in zip(outputs, otypes)])
return _res
def cov(m, y=None, rowvar=1, bias=0, ddof=None):
"""
Estimate a covariance matrix, given data.
Covariance indicates the level to which two variables vary together.
If we examine N-dimensional samples, :math:`X = [x_1, x_2, ... x_N]^T`,
then the covariance matrix element :math:`C_{ij}` is the covariance of
:math:`x_i` and :math:`x_j`. The element :math:`C_{ii}` is the variance
of :math:`x_i`.
Parameters
----------
m : array_like
A 1-D or 2-D array containing multiple variables and observations.
Each row of `m` represents a variable, and each column a single
observation of all those variables. Also see `rowvar` below.
y : array_like, optional
An additional set of variables and observations. `y` has the same
form as that of `m`.
rowvar : int, optional
If `rowvar` is non-zero (default), then each row represents a
variable, with observations in the columns. Otherwise, the relationship
is transposed: each column represents a variable, while the rows
contain observations.
bias : int, optional
Default normalization is by ``(N - 1)``, where ``N`` is the number of
observations given (unbiased estimate). If `bias` is 1, then
normalization is by ``N``. These values can be overridden by using
the keyword ``ddof`` in numpy versions >= 1.5.
ddof : int, optional
.. versionadded:: 1.5
If not ``None`` normalization is by ``(N - ddof)``, where ``N`` is
the number of observations; this overrides the value implied by
``bias``. The default value is ``None``.
Returns
-------
out : ndarray
The covariance matrix of the variables.
See Also
--------
corrcoef : Normalized covariance matrix
Examples
--------
Consider two variables, :math:`x_0` and :math:`x_1`, which
correlate perfectly, but in opposite directions:
>>> x = np.array([[0, 2], [1, 1], [2, 0]]).T
>>> x
array([[0, 1, 2],
[2, 1, 0]])
Note how :math:`x_0` increases while :math:`x_1` decreases. The covariance
matrix shows this clearly:
>>> np.cov(x)
array([[ 1., -1.],
[-1., 1.]])
Note that element :math:`C_{0,1}`, which shows the correlation between
:math:`x_0` and :math:`x_1`, is negative.
Further, note how `x` and `y` are combined:
>>> x = [-2.1, -1, 4.3]
>>> y = [3, 1.1, 0.12]
>>> X = np.vstack((x,y))
>>> print np.cov(X)
[[ 11.71 -4.286 ]
[ -4.286 2.14413333]]
>>> print np.cov(x, y)
[[ 11.71 -4.286 ]
[ -4.286 2.14413333]]
>>> print np.cov(x)
11.71
"""
# Check inputs
if ddof is not None and ddof != int(ddof):
raise ValueError(
"ddof must be integer")
# Handles complex arrays too
m = np.asarray(m)
if y is None:
dtype = np.result_type(m, np.float64)
else:
y = np.asarray(y)
dtype = np.result_type(m, y, np.float64)
X = array(m, ndmin=2, dtype=dtype)
if X.shape[0] == 1:
rowvar = 1
if rowvar:
N = X.shape[1]
axis = 0
else:
N = X.shape[0]
axis = 1
# check ddof
if ddof is None:
if bias == 0:
ddof = 1
else:
ddof = 0
fact = float(N - ddof)
if fact <= 0:
warnings.warn("Degrees of freedom <= 0 for slice", RuntimeWarning)
fact = 0.0
if y is not None:
y = array(y, copy=False, ndmin=2, dtype=dtype)
X = concatenate((X, y), axis)
X -= X.mean(axis=1-axis, keepdims=True)
if not rowvar:
return (dot(X.T, X.conj()) / fact).squeeze()
else:
return (dot(X, X.T.conj()) / fact).squeeze()
def corrcoef(x, y=None, rowvar=1, bias=0, ddof=None):
"""
Return correlation coefficients.
Please refer to the documentation for `cov` for more detail. The
relationship between the correlation coefficient matrix, `P`, and the
covariance matrix, `C`, is
.. math:: P_{ij} = \\frac{ C_{ij} } { \\sqrt{ C_{ii} * C_{jj} } }
The values of `P` are between -1 and 1, inclusive.
Parameters
----------
x : array_like
A 1-D or 2-D array containing multiple variables and observations.
Each row of `m` represents a variable, and each column a single
observation of all those variables. Also see `rowvar` below.
y : array_like, optional
An additional set of variables and observations. `y` has the same
shape as `m`.
rowvar : int, optional
If `rowvar` is non-zero (default), then each row represents a
variable, with observations in the columns. Otherwise, the relationship
is transposed: each column represents a variable, while the rows
contain observations.
bias : int, optional
Default normalization is by ``(N - 1)``, where ``N`` is the number of
observations (unbiased estimate). If `bias` is 1, then
normalization is by ``N``. These values can be overridden by using
the keyword ``ddof`` in numpy versions >= 1.5.
ddof : {None, int}, optional
.. versionadded:: 1.5
If not ``None`` normalization is by ``(N - ddof)``, where ``N`` is
the number of observations; this overrides the value implied by
``bias``. The default value is ``None``.
Returns
-------
out : ndarray
The correlation coefficient matrix of the variables.
See Also
--------
cov : Covariance matrix
"""
c = cov(x, y, rowvar, bias, ddof)
try:
d = diag(c)
except ValueError: # scalar covariance
# nan if incorrect value (nan, inf, 0), 1 otherwise
return c / c
return c / sqrt(multiply.outer(d, d))
def blackman(M):
"""
Return the Blackman window.
The Blackman window is a taper formed by using the first three
terms of a summation of cosines. It was designed to have close to the
minimal leakage possible. It is close to optimal, only slightly worse
than a Kaiser window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
Returns
-------
out : ndarray
The window, with the maximum value normalized to one (the value one
appears only if the number of samples is odd).
See Also
--------
bartlett, hamming, hanning, kaiser
Notes
-----
The Blackman window is defined as
.. math:: w(n) = 0.42 - 0.5 \\cos(2\\pi n/M) + 0.08 \\cos(4\\pi n/M)
Most references to the Blackman window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function. It is known as a
"near optimal" tapering function, almost as good (by some measures)
as the kaiser window.
References
----------
Blackman, R.B. and Tukey, J.W., (1958) The measurement of power spectra,
Dover Publications, New York.
Oppenheim, A.V., and R.W. Schafer. Discrete-Time Signal Processing.
Upper Saddle River, NJ: Prentice-Hall, 1999, pp. 468-471.
Examples
--------
>>> np.blackman(12)
array([ -1.38777878e-17, 3.26064346e-02, 1.59903635e-01,
4.14397981e-01, 7.36045180e-01, 9.67046769e-01,
9.67046769e-01, 7.36045180e-01, 4.14397981e-01,
1.59903635e-01, 3.26064346e-02, -1.38777878e-17])
Plot the window and the frequency response:
>>> from numpy.fft import fft, fftshift
>>> window = np.blackman(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Blackman window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Blackman window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0, M)
return 0.42 - 0.5*cos(2.0*pi*n/(M-1)) + 0.08*cos(4.0*pi*n/(M-1))
def bartlett(M):
"""
Return the Bartlett window.
The Bartlett window is very similar to a triangular window, except
that the end points are at zero. It is often used in signal
processing for tapering a signal, without generating too much
ripple in the frequency domain.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
Returns
-------
out : array
The triangular window, with the maximum value normalized to one
(the value one appears only if the number of samples is odd), with
the first and last samples equal to zero.
See Also
--------
blackman, hamming, hanning, kaiser
Notes
-----
The Bartlett window is defined as
.. math:: w(n) = \\frac{2}{M-1} \\left(
\\frac{M-1}{2} - \\left|n - \\frac{M-1}{2}\\right|
\\right)
Most references to the Bartlett window come from the signal
processing literature, where it is used as one of many windowing
functions for smoothing values. Note that convolution with this
window produces linear interpolation. It is also known as an
apodization (which means"removing the foot", i.e. smoothing
discontinuities at the beginning and end of the sampled signal) or
tapering function. The fourier transform of the Bartlett is the product
of two sinc functions.
Note the excellent discussion in Kanasewich.
References
----------
.. [1] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra",
Biometrika 37, 1-16, 1950.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
The University of Alberta Press, 1975, pp. 109-110.
.. [3] A.V. Oppenheim and R.W. Schafer, "Discrete-Time Signal
Processing", Prentice-Hall, 1999, pp. 468-471.
.. [4] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [5] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 429.
Examples
--------
>>> np.bartlett(12)
array([ 0. , 0.18181818, 0.36363636, 0.54545455, 0.72727273,
0.90909091, 0.90909091, 0.72727273, 0.54545455, 0.36363636,
0.18181818, 0. ])
Plot the window and its frequency response (requires SciPy and matplotlib):
>>> from numpy.fft import fft, fftshift
>>> window = np.bartlett(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Bartlett window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Bartlett window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0, M)
return where(less_equal(n, (M-1)/2.0), 2.0*n/(M-1), 2.0 - 2.0*n/(M-1))
def hanning(M):
"""
Return the Hanning window.
The Hanning window is a taper formed by using a weighted cosine.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
Returns
-------
out : ndarray, shape(M,)
The window, with the maximum value normalized to one (the value
one appears only if `M` is odd).
See Also
--------
bartlett, blackman, hamming, kaiser
Notes
-----
The Hanning window is defined as
.. math:: w(n) = 0.5 - 0.5cos\\left(\\frac{2\\pi{n}}{M-1}\\right)
\\qquad 0 \\leq n \\leq M-1
The Hanning was named for Julius van Hann, an Austrian meteorologist.
It is also known as the Cosine Bell. Some authors prefer that it be
called a Hann window, to help avoid confusion with the very similar
Hamming window.
Most references to the Hanning window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
The University of Alberta Press, 1975, pp. 106-108.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
>>> np.hanning(12)
array([ 0. , 0.07937323, 0.29229249, 0.57115742, 0.82743037,
0.97974649, 0.97974649, 0.82743037, 0.57115742, 0.29229249,
0.07937323, 0. ])
Plot the window and its frequency response:
>>> from numpy.fft import fft, fftshift
>>> window = np.hanning(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Hann window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of the Hann window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0, M)
return 0.5 - 0.5*cos(2.0*pi*n/(M-1))
def hamming(M):
"""
Return the Hamming window.
The Hamming window is a taper formed by using a weighted cosine.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
Returns
-------
out : ndarray
The window, with the maximum value normalized to one (the value
one appears only if the number of samples is odd).
See Also
--------
bartlett, blackman, hanning, kaiser
Notes
-----
The Hamming window is defined as
.. math:: w(n) = 0.54 - 0.46cos\\left(\\frac{2\\pi{n}}{M-1}\\right)
\\qquad 0 \\leq n \\leq M-1
The Hamming was named for R. W. Hamming, an associate of J. W. Tukey
and is described in Blackman and Tukey. It was recommended for
smoothing the truncated autocovariance function in the time domain.
Most references to the Hamming window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
University of Alberta Press, 1975, pp. 109-110.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
>>> np.hamming(12)
array([ 0.08 , 0.15302337, 0.34890909, 0.60546483, 0.84123594,
0.98136677, 0.98136677, 0.84123594, 0.60546483, 0.34890909,
0.15302337, 0.08 ])
Plot the window and the frequency response:
>>> from numpy.fft import fft, fftshift
>>> window = np.hamming(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Hamming window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Hamming window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0, M)
return 0.54 - 0.46*cos(2.0*pi*n/(M-1))
## Code from cephes for i0
_i0A = [
-4.41534164647933937950E-18,
3.33079451882223809783E-17,
-2.43127984654795469359E-16,
1.71539128555513303061E-15,
-1.16853328779934516808E-14,
7.67618549860493561688E-14,
-4.85644678311192946090E-13,
2.95505266312963983461E-12,
-1.72682629144155570723E-11,
9.67580903537323691224E-11,
-5.18979560163526290666E-10,
2.65982372468238665035E-9,
-1.30002500998624804212E-8,
6.04699502254191894932E-8,
-2.67079385394061173391E-7,
1.11738753912010371815E-6,
-4.41673835845875056359E-6,
1.64484480707288970893E-5,
-5.75419501008210370398E-5,
1.88502885095841655729E-4,
-5.76375574538582365885E-4,
1.63947561694133579842E-3,
-4.32430999505057594430E-3,
1.05464603945949983183E-2,
-2.37374148058994688156E-2,
4.93052842396707084878E-2,
-9.49010970480476444210E-2,
1.71620901522208775349E-1,
-3.04682672343198398683E-1,
6.76795274409476084995E-1
]
_i0B = [
-7.23318048787475395456E-18,
-4.83050448594418207126E-18,
4.46562142029675999901E-17,
3.46122286769746109310E-17,
-2.82762398051658348494E-16,
-3.42548561967721913462E-16,
1.77256013305652638360E-15,
3.81168066935262242075E-15,
-9.55484669882830764870E-15,
-4.15056934728722208663E-14,
1.54008621752140982691E-14,
3.85277838274214270114E-13,
7.18012445138366623367E-13,
-1.79417853150680611778E-12,
-1.32158118404477131188E-11,
-3.14991652796324136454E-11,
1.18891471078464383424E-11,
4.94060238822496958910E-10,
3.39623202570838634515E-9,
2.26666899049817806459E-8,
2.04891858946906374183E-7,
2.89137052083475648297E-6,
6.88975834691682398426E-5,
3.36911647825569408990E-3,
8.04490411014108831608E-1
]
def _chbevl(x, vals):
b0 = vals[0]
b1 = 0.0
for i in range(1, len(vals)):
b2 = b1
b1 = b0
b0 = x*b1 - b2 + vals[i]
return 0.5*(b0 - b2)
def _i0_1(x):
return exp(x) * _chbevl(x/2.0-2, _i0A)
def _i0_2(x):
return exp(x) * _chbevl(32.0/x - 2.0, _i0B) / sqrt(x)
def i0(x):
"""
Modified Bessel function of the first kind, order 0.
Usually denoted :math:`I_0`. This function does broadcast, but will *not*
"up-cast" int dtype arguments unless accompanied by at least one float or
complex dtype argument (see Raises below).
Parameters
----------
x : array_like, dtype float or complex
Argument of the Bessel function.
Returns
-------
out : ndarray, shape = x.shape, dtype = x.dtype
The modified Bessel function evaluated at each of the elements of `x`.
Raises
------
TypeError: array cannot be safely cast to required type
If argument consists exclusively of int dtypes.
See Also
--------
scipy.special.iv, scipy.special.ive
Notes
-----
We use the algorithm published by Clenshaw [1]_ and referenced by
Abramowitz and Stegun [2]_, for which the function domain is
partitioned into the two intervals [0,8] and (8,inf), and Chebyshev
polynomial expansions are employed in each interval. Relative error on
the domain [0,30] using IEEE arithmetic is documented [3]_ as having a
peak of 5.8e-16 with an rms of 1.4e-16 (n = 30000).
References
----------
.. [1] C. W. Clenshaw, "Chebyshev series for mathematical functions", in
*National Physical Laboratory Mathematical Tables*, vol. 5, London:
Her Majesty's Stationery Office, 1962.
.. [2] M. Abramowitz and I. A. Stegun, *Handbook of Mathematical
Functions*, 10th printing, New York: Dover, 1964, pp. 379.
http://www.math.sfu.ca/~cbm/aands/page_379.htm
.. [3] http://kobesearch.cpan.org/htdocs/Math-Cephes/Math/Cephes.html
Examples
--------
>>> np.i0([0.])
array(1.0)
>>> np.i0([0., 1. + 2j])
array([ 1.00000000+0.j , 0.18785373+0.64616944j])
"""
x = atleast_1d(x).copy()
y = empty_like(x)
ind = (x < 0)
x[ind] = -x[ind]
ind = (x <= 8.0)
y[ind] = _i0_1(x[ind])
ind2 = ~ind
y[ind2] = _i0_2(x[ind2])
return y.squeeze()
## End of cephes code for i0
def kaiser(M, beta):
"""
Return the Kaiser window.
The Kaiser window is a taper formed by using a Bessel function.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
beta : float
Shape parameter for window.
Returns
-------
out : array
The window, with the maximum value normalized to one (the value
one appears only if the number of samples is odd).
See Also
--------
bartlett, blackman, hamming, hanning
Notes
-----
The Kaiser window is defined as
.. math:: w(n) = I_0\\left( \\beta \\sqrt{1-\\frac{4n^2}{(M-1)^2}}
\\right)/I_0(\\beta)
with
.. math:: \\quad -\\frac{M-1}{2} \\leq n \\leq \\frac{M-1}{2},
where :math:`I_0` is the modified zeroth-order Bessel function.
The Kaiser was named for Jim Kaiser, who discovered a simple
approximation to the DPSS window based on Bessel functions. The Kaiser
window is a very good approximation to the Digital Prolate Spheroidal
Sequence, or Slepian window, which is the transform which maximizes the
energy in the main lobe of the window relative to total energy.
The Kaiser can approximate many other windows by varying the beta
parameter.
==== =======================
beta Window shape
==== =======================
0 Rectangular
5 Similar to a Hamming
6 Similar to a Hanning
8.6 Similar to a Blackman
==== =======================
A beta value of 14 is probably a good starting point. Note that as beta
gets large, the window narrows, and so the number of samples needs to be
large enough to sample the increasingly narrow spike, otherwise NaNs will
get returned.
Most references to the Kaiser window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] J. F. Kaiser, "Digital Filters" - Ch 7 in "Systems analysis by
digital computer", Editors: F.F. Kuo and J.F. Kaiser, p 218-285.
John Wiley and Sons, New York, (1966).
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
University of Alberta Press, 1975, pp. 177-178.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
Examples
--------
>>> np.kaiser(12, 14)
array([ 7.72686684e-06, 3.46009194e-03, 4.65200189e-02,
2.29737120e-01, 5.99885316e-01, 9.45674898e-01,
9.45674898e-01, 5.99885316e-01, 2.29737120e-01,
4.65200189e-02, 3.46009194e-03, 7.72686684e-06])
Plot the window and the frequency response:
>>> from numpy.fft import fft, fftshift
>>> window = np.kaiser(51, 14)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Kaiser window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Kaiser window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
from numpy.dual import i0
if M == 1:
return np.array([1.])
n = arange(0, M)
alpha = (M-1)/2.0
return i0(beta * sqrt(1-((n-alpha)/alpha)**2.0))/i0(float(beta))
def sinc(x):
"""
Return the sinc function.
The sinc function is :math:`\\sin(\\pi x)/(\\pi x)`.
Parameters
----------
x : ndarray
Array (possibly multi-dimensional) of values for which to to
calculate ``sinc(x)``.
Returns
-------
out : ndarray
``sinc(x)``, which has the same shape as the input.
Notes
-----
``sinc(0)`` is the limit value 1.
The name sinc is short for "sine cardinal" or "sinus cardinalis".
The sinc function is used in various signal processing applications,
including in anti-aliasing, in the construction of a Lanczos resampling
filter, and in interpolation.
For bandlimited interpolation of discrete-time signals, the ideal
interpolation kernel is proportional to the sinc function.
References
----------
.. [1] Weisstein, Eric W. "Sinc Function." From MathWorld--A Wolfram Web
Resource. http://mathworld.wolfram.com/SincFunction.html
.. [2] Wikipedia, "Sinc function",
http://en.wikipedia.org/wiki/Sinc_function
Examples
--------
>>> x = np.linspace(-4, 4, 41)
>>> np.sinc(x)
array([ -3.89804309e-17, -4.92362781e-02, -8.40918587e-02,
-8.90384387e-02, -5.84680802e-02, 3.89804309e-17,
6.68206631e-02, 1.16434881e-01, 1.26137788e-01,
8.50444803e-02, -3.89804309e-17, -1.03943254e-01,
-1.89206682e-01, -2.16236208e-01, -1.55914881e-01,
3.89804309e-17, 2.33872321e-01, 5.04551152e-01,
7.56826729e-01, 9.35489284e-01, 1.00000000e+00,
9.35489284e-01, 7.56826729e-01, 5.04551152e-01,
2.33872321e-01, 3.89804309e-17, -1.55914881e-01,
-2.16236208e-01, -1.89206682e-01, -1.03943254e-01,
-3.89804309e-17, 8.50444803e-02, 1.26137788e-01,
1.16434881e-01, 6.68206631e-02, 3.89804309e-17,
-5.84680802e-02, -8.90384387e-02, -8.40918587e-02,
-4.92362781e-02, -3.89804309e-17])
>>> plt.plot(x, np.sinc(x))
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Sinc Function")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("X")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
It works in 2-D as well:
>>> x = np.linspace(-4, 4, 401)
>>> xx = np.outer(x, x)
>>> plt.imshow(np.sinc(xx))
<matplotlib.image.AxesImage object at 0x...>
"""
x = np.asanyarray(x)
y = pi * where(x == 0, 1.0e-20, x)
return sin(y)/y
def msort(a):
"""
Return a copy of an array sorted along the first axis.
Parameters
----------
a : array_like
Array to be sorted.
Returns
-------
sorted_array : ndarray
Array of the same type and shape as `a`.
See Also
--------
sort
Notes
-----
``np.msort(a)`` is equivalent to ``np.sort(a, axis=0)``.
"""
b = array(a, subok=True, copy=True)
b.sort(0)
return b
def _ureduce(a, func, **kwargs):
"""
Internal Function.
Call `func` with `a` as first argument swapping the axes to use extended
axis on functions that don't support it natively.
Returns result and a.shape with axis dims set to 1.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
func : callable
Reduction function Kapable of receiving an axis argument.
It is is called with `a` as first argument followed by `kwargs`.
kwargs : keyword arguments
additional keyword arguments to pass to `func`.
Returns
-------
result : tuple
Result of func(a, **kwargs) and a.shape with axis dims set to 1
which can be used to reshape the result to the same shape a ufunc with
keepdims=True would produce.
"""
a = np.asanyarray(a)
axis = kwargs.get('axis', None)
if axis is not None:
keepdim = list(a.shape)
nd = a.ndim
try:
axis = operator.index(axis)
if axis >= nd or axis < -nd:
raise IndexError("axis %d out of bounds (%d)" % (axis, a.ndim))
keepdim[axis] = 1
except TypeError:
sax = set()
for x in axis:
if x >= nd or x < -nd:
raise IndexError("axis %d out of bounds (%d)" % (x, nd))
if x in sax:
raise ValueError("duplicate value in axis")
sax.add(x % nd)
keepdim[x] = 1
keep = sax.symmetric_difference(frozenset(range(nd)))
nkeep = len(keep)
# swap axis that should not be reduced to front
for i, s in enumerate(sorted(keep)):
a = a.swapaxes(i, s)
# merge reduced axis
a = a.reshape(a.shape[:nkeep] + (-1,))
kwargs['axis'] = -1
else:
keepdim = [1] * a.ndim
r = func(a, **kwargs)
return r, keepdim
def median(a, axis=None, out=None, overwrite_input=False, keepdims=False):
"""
Compute the median along the specified axis.
Returns the median of the array elements.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
axis : int or sequence of int, optional
Axis along which the medians are computed. The default (axis=None)
is to compute the median along a flattened version of the array.
A sequence of axes is supported since version 1.9.0.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape and buffer length as the expected output, but the
type (of the output) will be cast if necessary.
overwrite_input : bool, optional
If True, then allow use of memory of input array (a) for
calculations. The input array will be modified by the call to
median. This will save memory when you do not need to preserve the
contents of the input array. Treat the input as undefined, but it
will probably be fully or partially sorted. Default is False. Note
that, if `overwrite_input` is True and the input is not already an
ndarray, an error will be raised.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `arr`.
.. versionadded:: 1.9.0
Returns
-------
median : ndarray
A new array holding the result (unless `out` is specified, in which
case that array is returned instead). If the input contains
integers, or floats of smaller precision than 64, then the output
data-type is float64. Otherwise, the output data-type is the same
as that of the input.
See Also
--------
mean, percentile
Notes
-----
Given a vector V of length N, the median of V is the middle value of
a sorted copy of V, ``V_sorted`` - i.e., ``V_sorted[(N-1)/2]``, when N is
odd. When N is even, it is the average of the two middle values of
``V_sorted``.
Examples
--------
>>> a = np.array([[10, 7, 4], [3, 2, 1]])
>>> a
array([[10, 7, 4],
[ 3, 2, 1]])
>>> np.median(a)
3.5
>>> np.median(a, axis=0)
array([ 6.5, 4.5, 2.5])
>>> np.median(a, axis=1)
array([ 7., 2.])
>>> m = np.median(a, axis=0)
>>> out = np.zeros_like(m)
>>> np.median(a, axis=0, out=m)
array([ 6.5, 4.5, 2.5])
>>> m
array([ 6.5, 4.5, 2.5])
>>> b = a.copy()
>>> np.median(b, axis=1, overwrite_input=True)
array([ 7., 2.])
>>> assert not np.all(a==b)
>>> b = a.copy()
>>> np.median(b, axis=None, overwrite_input=True)
3.5
>>> assert not np.all(a==b)
"""
r, k = _ureduce(a, func=_median, axis=axis, out=out,
overwrite_input=overwrite_input)
if keepdims:
return r.reshape(k)
else:
return r
def _median(a, axis=None, out=None, overwrite_input=False):
# can't be reasonably be implemented in terms of percentile as we have to
# call mean to not break astropy
a = np.asanyarray(a)
if axis is not None and axis >= a.ndim:
raise IndexError(
"axis %d out of bounds (%d)" % (axis, a.ndim))
if overwrite_input:
if axis is None:
part = a.ravel()
sz = part.size
if sz % 2 == 0:
szh = sz // 2
part.partition((szh - 1, szh))
else:
part.partition((sz - 1) // 2)
else:
sz = a.shape[axis]
if sz % 2 == 0:
szh = sz // 2
a.partition((szh - 1, szh), axis=axis)
else:
a.partition((sz - 1) // 2, axis=axis)
part = a
else:
if axis is None:
sz = a.size
else:
sz = a.shape[axis]
if sz % 2 == 0:
part = partition(a, ((sz // 2) - 1, sz // 2), axis=axis)
else:
part = partition(a, (sz - 1) // 2, axis=axis)
if part.shape == ():
# make 0-D arrays work
return part.item()
if axis is None:
axis = 0
indexer = [slice(None)] * part.ndim
index = part.shape[axis] // 2
if part.shape[axis] % 2 == 1:
# index with slice to allow mean (below) to work
indexer[axis] = slice(index, index+1)
else:
indexer[axis] = slice(index-1, index+1)
# Use mean in odd and even case to coerce data type
# and check, use out array.
return mean(part[indexer], axis=axis, out=out)
def percentile(a, q, axis=None, out=None,
overwrite_input=False, interpolation='linear', keepdims=False):
"""
Compute the qth percentile of the data along the specified axis.
Returns the qth percentile of the array elements.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
q : float in range of [0,100] (or sequence of floats)
Percentile to compute which must be between 0 and 100 inclusive.
axis : int or sequence of int, optional
Axis along which the percentiles are computed. The default (None)
is to compute the percentiles along a flattened version of the array.
A sequence of axes is supported since version 1.9.0.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output,
but the type (of the output) will be cast if necessary.
overwrite_input : bool, optional
If True, then allow use of memory of input array `a` for
calculations. The input array will be modified by the call to
percentile. This will save memory when you do not need to preserve
the contents of the input array. In this case you should not make
any assumptions about the content of the passed in array `a` after
this function completes -- treat it as undefined. Default is False.
Note that, if the `a` input is not already an array this parameter
will have no effect, `a` will be converted to an array internally
regardless of the value of this parameter.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`:
* linear: `i + (j - i) * fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
* lower: `i`.
* higher: `j`.
* nearest: `i` or `j` whichever is nearest.
* midpoint: (`i` + `j`) / 2.
.. versionadded:: 1.9.0
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `arr`.
.. versionadded:: 1.9.0
Returns
-------
percentile : scalar or ndarray
If a single percentile `q` is given and axis=None a scalar is
returned. If multiple percentiles `q` are given an array holding
the result is returned. The results are listed in the first axis.
(If `out` is specified, in which case that array is returned
instead). If the input contains integers, or floats of smaller
precision than 64, then the output data-type is float64. Otherwise,
the output data-type is the same as that of the input.
See Also
--------
mean, median
Notes
-----
Given a vector V of length N, the q-th percentile of V is the q-th ranked
value in a sorted copy of V. The values and distances of the two
nearest neighbors as well as the `interpolation` parameter will
determine the percentile if the normalized ranking does not match q
exactly. This function is the same as the median if ``q=50``, the same
as the minimum if ``q=0`` and the same as the maximum if ``q=100``.
Examples
--------
>>> a = np.array([[10, 7, 4], [3, 2, 1]])
>>> a
array([[10, 7, 4],
[ 3, 2, 1]])
>>> np.percentile(a, 50)
array([ 3.5])
>>> np.percentile(a, 50, axis=0)
array([[ 6.5, 4.5, 2.5]])
>>> np.percentile(a, 50, axis=1)
array([[ 7.],
[ 2.]])
>>> m = np.percentile(a, 50, axis=0)
>>> out = np.zeros_like(m)
>>> np.percentile(a, 50, axis=0, out=m)
array([[ 6.5, 4.5, 2.5]])
>>> m
array([[ 6.5, 4.5, 2.5]])
>>> b = a.copy()
>>> np.percentile(b, 50, axis=1, overwrite_input=True)
array([[ 7.],
[ 2.]])
>>> assert not np.all(a==b)
>>> b = a.copy()
>>> np.percentile(b, 50, axis=None, overwrite_input=True)
array([ 3.5])
"""
q = array(q, dtype=np.float64, copy=True)
r, k = _ureduce(a, func=_percentile, q=q, axis=axis, out=out,
overwrite_input=overwrite_input,
interpolation=interpolation)
if keepdims:
if q.ndim == 0:
return r.reshape(k)
else:
return r.reshape([len(q)] + k)
else:
return r
def _percentile(a, q, axis=None, out=None,
overwrite_input=False, interpolation='linear', keepdims=False):
a = asarray(a)
if q.ndim == 0:
# Do not allow 0-d arrays because following code fails for scalar
zerod = True
q = q[None]
else:
zerod = False
# avoid expensive reductions, relevant for arrays with < O(1000) elements
if q.size < 10:
for i in range(q.size):
if q[i] < 0. or q[i] > 100.:
raise ValueError("Percentiles must be in the range [0,100]")
q[i] /= 100.
else:
# faster than any()
if np.count_nonzero(q < 0.) or np.count_nonzero(q > 100.):
raise ValueError("Percentiles must be in the range [0,100]")
q /= 100.
# prepare a for partioning
if overwrite_input:
if axis is None:
ap = a.ravel()
else:
ap = a
else:
if axis is None:
ap = a.flatten()
else:
ap = a.copy()
if axis is None:
axis = 0
Nx = ap.shape[axis]
indices = q * (Nx - 1)
# round fractional indices according to interpolation method
if interpolation == 'lower':
indices = floor(indices).astype(intp)
elif interpolation == 'higher':
indices = ceil(indices).astype(intp)
elif interpolation == 'midpoint':
indices = floor(indices) + 0.5
elif interpolation == 'nearest':
indices = around(indices).astype(intp)
elif interpolation == 'linear':
pass # keep index as fraction and interpolate
else:
raise ValueError(
"interpolation can only be 'linear', 'lower' 'higher', "
"'midpoint', or 'nearest'")
if indices.dtype == intp: # take the points along axis
ap.partition(indices, axis=axis)
# ensure axis with qth is first
ap = np.rollaxis(ap, axis, 0)
axis = 0
if zerod:
indices = indices[0]
r = take(ap, indices, axis=axis, out=out)
else: # weight the points above and below the indices
indices_below = floor(indices).astype(intp)
indices_above = indices_below + 1
indices_above[indices_above > Nx - 1] = Nx - 1
weights_above = indices - indices_below
weights_below = 1.0 - weights_above
weights_shape = [1, ] * ap.ndim
weights_shape[axis] = len(indices)
weights_below.shape = weights_shape
weights_above.shape = weights_shape
ap.partition(concatenate((indices_below, indices_above)), axis=axis)
x1 = take(ap, indices_below, axis=axis) * weights_below
x2 = take(ap, indices_above, axis=axis) * weights_above
# ensure axis with qth is first
x1 = np.rollaxis(x1, axis, 0)
x2 = np.rollaxis(x2, axis, 0)
if zerod:
x1 = x1.squeeze(0)
x2 = x2.squeeze(0)
if out is not None:
r = add(x1, x2, out=out)
else:
r = add(x1, x2)
return r
def trapz(y, x=None, dx=1.0, axis=-1):
"""
Integrate along the given axis using the composite trapezoidal rule.
Integrate `y` (`x`) along given axis.
Parameters
----------
y : array_like
Input array to integrate.
x : array_like, optional
If `x` is None, then spacing between all `y` elements is `dx`.
dx : scalar, optional
If `x` is None, spacing given by `dx` is assumed. Default is 1.
axis : int, optional
Specify the axis.
Returns
-------
trapz : float
Definite integral as approximated by trapezoidal rule.
See Also
--------
sum, cumsum
Notes
-----
Image [2]_ illustrates trapezoidal rule -- y-axis locations of points
will be taken from `y` array, by default x-axis distances between
points will be 1.0, alternatively they can be provided with `x` array
or with `dx` scalar. Return value will be equal to combined area under
the red lines.
References
----------
.. [1] Wikipedia page: http://en.wikipedia.org/wiki/Trapezoidal_rule
.. [2] Illustration image:
http://en.wikipedia.org/wiki/File:Composite_trapezoidal_rule_illustration.png
Examples
--------
>>> np.trapz([1,2,3])
4.0
>>> np.trapz([1,2,3], x=[4,6,8])
8.0
>>> np.trapz([1,2,3], dx=2)
8.0
>>> a = np.arange(6).reshape(2, 3)
>>> a
array([[0, 1, 2],
[3, 4, 5]])
>>> np.trapz(a, axis=0)
array([ 1.5, 2.5, 3.5])
>>> np.trapz(a, axis=1)
array([ 2., 8.])
"""
y = asanyarray(y)
if x is None:
d = dx
else:
x = asanyarray(x)
if x.ndim == 1:
d = diff(x)
# reshape to correct shape
shape = [1]*y.ndim
shape[axis] = d.shape[0]
d = d.reshape(shape)
else:
d = diff(x, axis=axis)
nd = len(y.shape)
slice1 = [slice(None)]*nd
slice2 = [slice(None)]*nd
slice1[axis] = slice(1, None)
slice2[axis] = slice(None, -1)
try:
ret = (d * (y[slice1] + y[slice2]) / 2.0).sum(axis)
except ValueError:
# Operations didn't work, cast to ndarray
d = np.asarray(d)
y = np.asarray(y)
ret = add.reduce(d * (y[slice1]+y[slice2])/2.0, axis)
return ret
#always succeed
def add_newdoc(place, obj, doc):
"""Adds documentation to obj which is in module place.
If doc is a string add it to obj as a docstring
If doc is a tuple, then the first element is interpreted as
an attribute of obj and the second as the docstring
(method, docstring)
If doc is a list, then each element of the list should be a
sequence of length two --> [(method1, docstring1),
(method2, docstring2), ...]
This routine never raises an error.
This routine cannot modify read-only docstrings, as appear
in new-style classes or built-in functions. Because this
routine never raises an error the caller must check manually
that the docstrings were changed.
"""
try:
new = getattr(__import__(place, globals(), {}, [obj]), obj)
if isinstance(doc, str):
add_docstring(new, doc.strip())
elif isinstance(doc, tuple):
add_docstring(getattr(new, doc[0]), doc[1].strip())
elif isinstance(doc, list):
for val in doc:
add_docstring(getattr(new, val[0]), val[1].strip())
except:
pass
# Based on scitools meshgrid
def meshgrid(*xi, **kwargs):
"""
Return coordinate matrices from coordinate vectors.
Make N-D coordinate arrays for vectorized evaluations of
N-D scalar/vector fields over N-D grids, given
one-dimensional coordinate arrays x1, x2,..., xn.
.. versionchanged:: 1.9
1-D and 0-D cases are allowed.
Parameters
----------
x1, x2,..., xn : array_like
1-D arrays representing the coordinates of a grid.
indexing : {'xy', 'ij'}, optional
Cartesian ('xy', default) or matrix ('ij') indexing of output.
See Notes for more details.
.. versionadded:: 1.7.0
sparse : bool, optional
If True a sparse grid is returned in order to conserve memory.
Default is False.
.. versionadded:: 1.7.0
copy : bool, optional
If False, a view into the original arrays are returned in order to
conserve memory. Default is True. Please note that
``sparse=False, copy=False`` will likely return non-contiguous
arrays. Furthermore, more than one element of a broadcast array
may refer to a single memory location. If you need to write to the
arrays, make copies first.
.. versionadded:: 1.7.0
Returns
-------
X1, X2,..., XN : ndarray
For vectors `x1`, `x2`,..., 'xn' with lengths ``Ni=len(xi)`` ,
return ``(N1, N2, N3,...Nn)`` shaped arrays if indexing='ij'
or ``(N2, N1, N3,...Nn)`` shaped arrays if indexing='xy'
with the elements of `xi` repeated to fill the matrix along
the first dimension for `x1`, the second for `x2` and so on.
Notes
-----
This function supports both indexing conventions through the indexing
keyword argument. Giving the string 'ij' returns a meshgrid with
matrix indexing, while 'xy' returns a meshgrid with Cartesian indexing.
In the 2-D case with inputs of length M and N, the outputs are of shape
(N, M) for 'xy' indexing and (M, N) for 'ij' indexing. In the 3-D case
with inputs of length M, N and P, outputs are of shape (N, M, P) for
'xy' indexing and (M, N, P) for 'ij' indexing. The difference is
illustrated by the following code snippet::
xv, yv = meshgrid(x, y, sparse=False, indexing='ij')
for i in range(nx):
for j in range(ny):
# treat xv[i,j], yv[i,j]
xv, yv = meshgrid(x, y, sparse=False, indexing='xy')
for i in range(nx):
for j in range(ny):
# treat xv[j,i], yv[j,i]
In the 1-D and 0-D case, the indexing and sparse keywords have no effect.
See Also
--------
index_tricks.mgrid : Construct a multi-dimensional "meshgrid"
using indexing notation.
index_tricks.ogrid : Construct an open multi-dimensional "meshgrid"
using indexing notation.
Examples
--------
>>> nx, ny = (3, 2)
>>> x = np.linspace(0, 1, nx)
>>> y = np.linspace(0, 1, ny)
>>> xv, yv = meshgrid(x, y)
>>> xv
array([[ 0. , 0.5, 1. ],
[ 0. , 0.5, 1. ]])
>>> yv
array([[ 0., 0., 0.],
[ 1., 1., 1.]])
>>> xv, yv = meshgrid(x, y, sparse=True) # make sparse output arrays
>>> xv
array([[ 0. , 0.5, 1. ]])
>>> yv
array([[ 0.],
[ 1.]])
`meshgrid` is very useful to evaluate functions on a grid.
>>> x = np.arange(-5, 5, 0.1)
>>> y = np.arange(-5, 5, 0.1)
>>> xx, yy = meshgrid(x, y, sparse=True)
>>> z = np.sin(xx**2 + yy**2) / (xx**2 + yy**2)
>>> h = plt.contourf(x,y,z)
"""
ndim = len(xi)
copy_ = kwargs.pop('copy', True)
sparse = kwargs.pop('sparse', False)
indexing = kwargs.pop('indexing', 'xy')
if kwargs:
raise TypeError("meshgrid() got an unexpected keyword argument '%s'"
% (list(kwargs)[0],))
if indexing not in ['xy', 'ij']:
raise ValueError(
"Valid values for `indexing` are 'xy' and 'ij'.")
s0 = (1,) * ndim
output = [np.asanyarray(x).reshape(s0[:i] + (-1,) + s0[i + 1::])
for i, x in enumerate(xi)]
shape = [x.size for x in output]
if indexing == 'xy' and ndim > 1:
# switch first and second axis
output[0].shape = (1, -1) + (1,)*(ndim - 2)
output[1].shape = (-1, 1) + (1,)*(ndim - 2)
shape[0], shape[1] = shape[1], shape[0]
if sparse:
if copy_:
return [x.copy() for x in output]
else:
return output
else:
# Return the full N-D matrix (not only the 1-D vector)
if copy_:
mult_fact = np.ones(shape, dtype=int)
return [x * mult_fact for x in output]
else:
return np.broadcast_arrays(*output)
def delete(arr, obj, axis=None):
"""
Return a new array with sub-arrays along an axis deleted. For a one
dimensional array, this returns those entries not returned by
`arr[obj]`.
Parameters
----------
arr : array_like
Input array.
obj : slice, int or array of ints
Indicate which sub-arrays to remove.
axis : int, optional
The axis along which to delete the subarray defined by `obj`.
If `axis` is None, `obj` is applied to the flattened array.
Returns
-------
out : ndarray
A copy of `arr` with the elements specified by `obj` removed. Note
that `delete` does not occur in-place. If `axis` is None, `out` is
a flattened array.
See Also
--------
insert : Insert elements into an array.
append : Append elements at the end of an array.
Notes
-----
Often it is preferable to use a boolean mask. For example:
>>> mask = np.ones(len(arr), dtype=bool)
>>> mask[[0,2,4]] = False
>>> result = arr[mask,...]
Is equivalent to `np.delete(arr, [0,2,4], axis=0)`, but allows further
use of `mask`.
Examples
--------
>>> arr = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]])
>>> arr
array([[ 1, 2, 3, 4],
[ 5, 6, 7, 8],
[ 9, 10, 11, 12]])
>>> np.delete(arr, 1, 0)
array([[ 1, 2, 3, 4],
[ 9, 10, 11, 12]])
>>> np.delete(arr, np.s_[::2], 1)
array([[ 2, 4],
[ 6, 8],
[10, 12]])
>>> np.delete(arr, [1,3,5], None)
array([ 1, 3, 5, 7, 8, 9, 10, 11, 12])
"""
wrap = None
if type(arr) is not ndarray:
try:
wrap = arr.__array_wrap__
except AttributeError:
pass
arr = asarray(arr)
ndim = arr.ndim
if axis is None:
if ndim != 1:
arr = arr.ravel()
ndim = arr.ndim
axis = ndim - 1
if ndim == 0:
warnings.warn(
"in the future the special handling of scalars will be removed "
"from delete and raise an error", DeprecationWarning)
if wrap:
return wrap(arr)
else:
return arr.copy()
slobj = [slice(None)]*ndim
N = arr.shape[axis]
newshape = list(arr.shape)
if isinstance(obj, slice):
start, stop, step = obj.indices(N)
xr = range(start, stop, step)
numtodel = len(xr)
if numtodel <= 0:
if wrap:
return wrap(arr.copy())
else:
return arr.copy()
# Invert if step is negative:
if step < 0:
step = -step
start = xr[-1]
stop = xr[0] + 1
newshape[axis] -= numtodel
new = empty(newshape, arr.dtype, arr.flags.fnc)
# copy initial chunk
if start == 0:
pass
else:
slobj[axis] = slice(None, start)
new[slobj] = arr[slobj]
# copy end chunck
if stop == N:
pass
else:
slobj[axis] = slice(stop-numtodel, None)
slobj2 = [slice(None)]*ndim
slobj2[axis] = slice(stop, None)
new[slobj] = arr[slobj2]
# copy middle pieces
if step == 1:
pass
else: # use array indexing.
keep = ones(stop-start, dtype=bool)
keep[:stop-start:step] = False
slobj[axis] = slice(start, stop-numtodel)
slobj2 = [slice(None)]*ndim
slobj2[axis] = slice(start, stop)
arr = arr[slobj2]
slobj2[axis] = keep
new[slobj] = arr[slobj2]
if wrap:
return wrap(new)
else:
return new
_obj = obj
obj = np.asarray(obj)
# After removing the special handling of booleans and out of
# bounds values, the conversion to the array can be removed.
if obj.dtype == bool:
warnings.warn(
"in the future insert will treat boolean arrays and array-likes "
"as boolean index instead of casting it to integer", FutureWarning)
obj = obj.astype(intp)
if isinstance(_obj, (int, long, integer)):
# optimization for a single value
obj = obj.item()
if (obj < -N or obj >= N):
raise IndexError(
"index %i is out of bounds for axis %i with "
"size %i" % (obj, axis, N))
if (obj < 0):
obj += N
newshape[axis] -= 1
new = empty(newshape, arr.dtype, arr.flags.fnc)
slobj[axis] = slice(None, obj)
new[slobj] = arr[slobj]
slobj[axis] = slice(obj, None)
slobj2 = [slice(None)]*ndim
slobj2[axis] = slice(obj+1, None)
new[slobj] = arr[slobj2]
else:
if obj.size == 0 and not isinstance(_obj, np.ndarray):
obj = obj.astype(intp)
if not np.can_cast(obj, intp, 'same_kind'):
# obj.size = 1 special case always failed and would just
# give superfluous warnings.
warnings.warn(
"using a non-integer array as obj in delete will result in an "
"error in the future", DeprecationWarning)
obj = obj.astype(intp)
keep = ones(N, dtype=bool)
# Test if there are out of bound indices, this is deprecated
inside_bounds = (obj < N) & (obj >= -N)
if not inside_bounds.all():
warnings.warn(
"in the future out of bounds indices will raise an error "
"instead of being ignored by `numpy.delete`.",
DeprecationWarning)
obj = obj[inside_bounds]
positive_indices = obj >= 0
if not positive_indices.all():
warnings.warn(
"in the future negative indices will not be ignored by "
"`numpy.delete`.", FutureWarning)
obj = obj[positive_indices]
keep[obj, ] = False
slobj[axis] = keep
new = arr[slobj]
if wrap:
return wrap(new)
else:
return new
def insert(arr, obj, values, axis=None):
"""
Insert values along the given axis before the given indices.
Parameters
----------
arr : array_like
Input array.
obj : int, slice or sequence of ints
Object that defines the index or indices before which `values` is
inserted.
.. versionadded:: 1.8.0
Support for multiple insertions when `obj` is a single scalar or a
sequence with one element (similar to calling insert multiple
times).
values : array_like
Values to insert into `arr`. If the type of `values` is different
from that of `arr`, `values` is converted to the type of `arr`.
`values` should be shaped so that ``arr[...,obj,...] = values``
is legal.
axis : int, optional
Axis along which to insert `values`. If `axis` is None then `arr`
is flattened first.
Returns
-------
out : ndarray
A copy of `arr` with `values` inserted. Note that `insert`
does not occur in-place: a new array is returned. If
`axis` is None, `out` is a flattened array.
See Also
--------
append : Append elements at the end of an array.
concatenate : Join a sequence of arrays together.
delete : Delete elements from an array.
Notes
-----
Note that for higher dimensional inserts `obj=0` behaves very different
from `obj=[0]` just like `arr[:,0,:] = values` is different from
`arr[:,[0],:] = values`.
Examples
--------
>>> a = np.array([[1, 1], [2, 2], [3, 3]])
>>> a
array([[1, 1],
[2, 2],
[3, 3]])
>>> np.insert(a, 1, 5)
array([1, 5, 1, 2, 2, 3, 3])
>>> np.insert(a, 1, 5, axis=1)
array([[1, 5, 1],
[2, 5, 2],
[3, 5, 3]])
Difference between sequence and scalars:
>>> np.insert(a, [1], [[1],[2],[3]], axis=1)
array([[1, 1, 1],
[2, 2, 2],
[3, 3, 3]])
>>> np.array_equal(np.insert(a, 1, [1, 2, 3], axis=1),
... np.insert(a, [1], [[1],[2],[3]], axis=1))
True
>>> b = a.flatten()
>>> b
array([1, 1, 2, 2, 3, 3])
>>> np.insert(b, [2, 2], [5, 6])
array([1, 1, 5, 6, 2, 2, 3, 3])
>>> np.insert(b, slice(2, 4), [5, 6])
array([1, 1, 5, 2, 6, 2, 3, 3])
>>> np.insert(b, [2, 2], [7.13, False]) # type casting
array([1, 1, 7, 0, 2, 2, 3, 3])
>>> x = np.arange(8).reshape(2, 4)
>>> idx = (1, 3)
>>> np.insert(x, idx, 999, axis=1)
array([[ 0, 999, 1, 2, 999, 3],
[ 4, 999, 5, 6, 999, 7]])
"""
wrap = None
if type(arr) is not ndarray:
try:
wrap = arr.__array_wrap__
except AttributeError:
pass
arr = asarray(arr)
ndim = arr.ndim
if axis is None:
if ndim != 1:
arr = arr.ravel()
ndim = arr.ndim
axis = ndim - 1
else:
if ndim > 0 and (axis < -ndim or axis >= ndim):
raise IndexError(
"axis %i is out of bounds for an array of "
"dimension %i" % (axis, ndim))
if (axis < 0):
axis += ndim
if (ndim == 0):
warnings.warn(
"in the future the special handling of scalars will be removed "
"from insert and raise an error", DeprecationWarning)
arr = arr.copy()
arr[...] = values
if wrap:
return wrap(arr)
else:
return arr
slobj = [slice(None)]*ndim
N = arr.shape[axis]
newshape = list(arr.shape)
if isinstance(obj, slice):
# turn it into a range object
indices = arange(*obj.indices(N), **{'dtype': intp})
else:
# need to copy obj, because indices will be changed in-place
indices = np.array(obj)
if indices.dtype == bool:
# See also delete
warnings.warn(
"in the future insert will treat boolean arrays and "
"array-likes as a boolean index instead of casting it to "
"integer", FutureWarning)
indices = indices.astype(intp)
# Code after warning period:
#if obj.ndim != 1:
# raise ValueError('boolean array argument obj to insert '
# 'must be one dimensional')
#indices = np.flatnonzero(obj)
elif indices.ndim > 1:
raise ValueError(
"index array argument obj to insert must be one dimensional "
"or scalar")
if indices.size == 1:
index = indices.item()
if index < -N or index > N:
raise IndexError(
"index %i is out of bounds for axis %i with "
"size %i" % (obj, axis, N))
if (index < 0):
index += N
# There are some object array corner cases here, but we cannot avoid
# that:
values = array(values, copy=False, ndmin=arr.ndim, dtype=arr.dtype)
if indices.ndim == 0:
# broadcasting is very different here, since a[:,0,:] = ... behaves
# very different from a[:,[0],:] = ...! This changes values so that
# it works likes the second case. (here a[:,0:1,:])
values = np.rollaxis(values, 0, (axis % values.ndim) + 1)
numnew = values.shape[axis]
newshape[axis] += numnew
new = empty(newshape, arr.dtype, arr.flags.fnc)
slobj[axis] = slice(None, index)
new[slobj] = arr[slobj]
slobj[axis] = slice(index, index+numnew)
new[slobj] = values
slobj[axis] = slice(index+numnew, None)
slobj2 = [slice(None)] * ndim
slobj2[axis] = slice(index, None)
new[slobj] = arr[slobj2]
if wrap:
return wrap(new)
return new
elif indices.size == 0 and not isinstance(obj, np.ndarray):
# Can safely cast the empty list to intp
indices = indices.astype(intp)
if not np.can_cast(indices, intp, 'same_kind'):
warnings.warn(
"using a non-integer array as obj in insert will result in an "
"error in the future", DeprecationWarning)
indices = indices.astype(intp)
indices[indices < 0] += N
numnew = len(indices)
order = indices.argsort(kind='mergesort') # stable sort
indices[order] += np.arange(numnew)
newshape[axis] += numnew
old_mask = ones(newshape[axis], dtype=bool)
old_mask[indices] = False
new = empty(newshape, arr.dtype, arr.flags.fnc)
slobj2 = [slice(None)]*ndim
slobj[axis] = indices
slobj2[axis] = old_mask
new[slobj] = values
new[slobj2] = arr
if wrap:
return wrap(new)
return new
def append(arr, values, axis=None):
"""
Append values to the end of an array.
Parameters
----------
arr : array_like
Values are appended to a copy of this array.
values : array_like
These values are appended to a copy of `arr`. It must be of the
correct shape (the same shape as `arr`, excluding `axis`). If
`axis` is not specified, `values` can be any shape and will be
flattened before use.
axis : int, optional
The axis along which `values` are appended. If `axis` is not
given, both `arr` and `values` are flattened before use.
Returns
-------
append : ndarray
A copy of `arr` with `values` appended to `axis`. Note that
`append` does not occur in-place: a new array is allocated and
filled. If `axis` is None, `out` is a flattened array.
See Also
--------
insert : Insert elements into an array.
delete : Delete elements from an array.
Examples
--------
>>> np.append([1, 2, 3], [[4, 5, 6], [7, 8, 9]])
array([1, 2, 3, 4, 5, 6, 7, 8, 9])
When `axis` is specified, `values` must have the correct shape.
>>> np.append([[1, 2, 3], [4, 5, 6]], [[7, 8, 9]], axis=0)
array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
>>> np.append([[1, 2, 3], [4, 5, 6]], [7, 8, 9], axis=0)
Traceback (most recent call last):
...
ValueError: arrays must have same number of dimensions
"""
arr = asanyarray(arr)
if axis is None:
if arr.ndim != 1:
arr = arr.ravel()
values = ravel(values)
axis = arr.ndim-1
return concatenate((arr, values), axis=axis)
| mit |
KrishnaswamyLab/PHATE | Python/phate/phate.py | 1 | 39723 | """
Potential of Heat-diffusion for Affinity-based Trajectory Embedding (PHATE)
"""
# author: Daniel Burkhardt <daniel.burkhardt@yale.edu>
# (C) 2017 Krishnaswamy Lab GPLv2
from __future__ import print_function, division, absolute_import
import numpy as np
import graphtools
from sklearn.base import BaseEstimator
from sklearn.exceptions import NotFittedError
from scipy import sparse
import warnings
import tasklogger
import matplotlib.pyplot as plt
from . import utils, vne, mds
try:
import anndata
except ImportError:
# anndata not installed
pass
try:
import pygsp
except ImportError:
# anndata not installed
pass
_logger = tasklogger.get_tasklogger("graphtools")
class PHATE(BaseEstimator):
"""PHATE operator which performs dimensionality reduction.
Potential of Heat-diffusion for Affinity-based Trajectory Embedding
(PHATE) embeds high dimensional single-cell data into two or three
dimensions for visualization of biological progressions as described
in Moon et al, 2017 [1]_.
Parameters
----------
n_components : int, optional, default: 2
number of dimensions in which the data will be embedded
knn : int, optional, default: 5
number of nearest neighbors on which to build kernel
decay : int, optional, default: 40
sets decay rate of kernel tails.
If None, alpha decaying kernel is not used
n_landmark : int, optional, default: 2000
number of landmarks to use in fast PHATE
t : int, optional, default: 'auto'
power to which the diffusion operator is powered.
This sets the level of diffusion. If 'auto', t is selected
according to the knee point in the Von Neumann Entropy of
the diffusion operator
gamma : float, optional, default: 1
Informational distance constant between -1 and 1.
`gamma=1` gives the PHATE log potential, `gamma=0` gives
a square root potential.
n_pca : int, optional, default: 100
Number of principal components to use for calculating
neighborhoods. For extremely large datasets, using
n_pca < 20 allows neighborhoods to be calculated in
roughly log(n_samples) time.
mds_solver : {'sgd', 'smacof'}, optional (default: 'sgd')
which solver to use for metric MDS. SGD is substantially faster,
but produces slightly less optimal results. Note that SMACOF was used
for all figures in the PHATE paper.
knn_dist : string, optional, default: 'euclidean'
recommended values: 'euclidean', 'cosine', 'precomputed'
Any metric from `scipy.spatial.distance` can be used
distance metric for building kNN graph. Custom distance
functions of form `f(x, y) = d` are also accepted. If 'precomputed',
`data` should be an n_samples x n_samples distance or
affinity matrix. Distance matrices are assumed to have zeros
down the diagonal, while affinity matrices are assumed to have
non-zero values down the diagonal. This is detected automatically using
`data[0,0]`. You can override this detection with
`knn_dist='precomputed_distance'` or `knn_dist='precomputed_affinity'`.
knn_max : int, optional, default: None
Maximum number of neighbors for which alpha decaying kernel
is computed for each point. For very large datasets, setting `knn_max`
to a small multiple of `knn` can speed up computation significantly.
mds_dist : string, optional, default: 'euclidean'
Distance metric for MDS. Recommended values: 'euclidean' and 'cosine'
Any metric from `scipy.spatial.distance` can be used. Custom distance
functions of form `f(x, y) = d` are also accepted
mds : string, optional, default: 'metric'
choose from ['classic', 'metric', 'nonmetric'].
Selects which MDS algorithm is used for dimensionality reduction
n_jobs : integer, optional, default: 1
The number of jobs to use for the computation.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging.
For n_jobs below -1, (n_cpus + 1 + n_jobs) are used. Thus for
n_jobs = -2, all CPUs but one are used
random_state : integer or numpy.RandomState, optional, default: None
The generator used to initialize SMACOF (metric, nonmetric) MDS
If an integer is given, it fixes the seed
Defaults to the global `numpy` random number generator
verbose : `int` or `boolean`, optional (default: 1)
If `True` or `> 0`, print status messages
potential_method : deprecated.
Use `gamma=1` for log transformation and `gamma=0` for square root
transformation.
alpha_decay : deprecated.
Use `decay=None` to disable alpha decay
njobs : deprecated.
Use n_jobs to match `sklearn` standards
k : Deprecated for `knn`
a : Deprecated for `decay`
kwargs : additional arguments for `graphtools.Graph`
Attributes
----------
X : array-like, shape=[n_samples, n_dimensions]
embedding : array-like, shape=[n_samples, n_components]
Stores the position of the dataset in the embedding space
graph : graphtools.base.BaseGraph
The graph built on the input data
optimal_t : int
The automatically selected t, when t = 'auto'.
When t is given, optimal_t is None.
Examples
--------
>>> import phate
>>> import matplotlib.pyplot as plt
>>> tree_data, tree_clusters = phate.tree.gen_dla(n_dim=100, n_branch=20,
... branch_length=100)
>>> tree_data.shape
(2000, 100)
>>> phate_operator = phate.PHATE(knn=5, decay=20, t=150)
>>> tree_phate = phate_operator.fit_transform(tree_data)
>>> tree_phate.shape
(2000, 2)
>>> phate.plot.scatter2d(tree_phate, c=tree_clusters)
References
----------
.. [1] Moon KR, van Dijk D, Zheng W, *et al.* (2017),
*PHATE: A Dimensionality Reduction Method for Visualizing Trajectory
Structures in High-Dimensional Biological Data*,
`BioRxiv <http://biorxiv.org/content/early/2017/03/24/120378>`_.
"""
def __init__(
self,
n_components=2,
knn=5,
decay=40,
n_landmark=2000,
t="auto",
gamma=1,
n_pca=100,
mds_solver="sgd",
knn_dist="euclidean",
knn_max=None,
mds_dist="euclidean",
mds="metric",
n_jobs=1,
random_state=None,
verbose=1,
potential_method=None,
alpha_decay=None,
njobs=None,
k=None,
a=None,
**kwargs
):
if k is not None:
knn = k
if a is not None:
decay = a
self.n_components = n_components
self.decay = decay
self.knn = knn
self.t = t
self.n_landmark = n_landmark
self.mds = mds
self.n_pca = n_pca
self.knn_dist = knn_dist
self.knn_max = knn_max
self.mds_dist = mds_dist
self.mds_solver = mds_solver
self.random_state = random_state
self.kwargs = kwargs
self.graph = None
self._diff_potential = None
self.embedding = None
self.X = None
self.optimal_t = None
if (alpha_decay is True and decay is None) or (
alpha_decay is False and decay is not None
):
warnings.warn(
"alpha_decay is deprecated. Use `decay=None`"
" to disable alpha decay in future.",
FutureWarning,
)
if not alpha_decay:
self.decay = None
if njobs is not None:
warnings.warn(
"njobs is deprecated. Please use n_jobs in future.", FutureWarning
)
n_jobs = njobs
self.n_jobs = n_jobs
if potential_method is not None:
if potential_method == "log":
gamma = 1
elif potential_method == "sqrt":
gamma = 0
else:
raise ValueError(
"potential_method {} not recognized. Please "
"use gamma between -1 and 1".format(potential_method)
)
warnings.warn(
"potential_method is deprecated. "
"Setting gamma to {} to achieve"
" {} transformation.".format(gamma, potential_method),
FutureWarning,
)
elif gamma > 0.99 and gamma < 1:
warnings.warn(
"0.99 < gamma < 1 is numerically unstable. " "Setting gamma to 0.99",
RuntimeWarning,
)
gamma = 0.99
self.gamma = gamma
if verbose is True:
verbose = 1
elif verbose is False:
verbose = 0
self.verbose = verbose
self._check_params()
_logger.set_level(verbose)
@property
def diff_op(self):
"""diff_op : array-like, shape=[n_samples, n_samples] or [n_landmark, n_landmark]
The diffusion operator built from the graph
"""
if self.graph is not None:
if isinstance(self.graph, graphtools.graphs.LandmarkGraph):
diff_op = self.graph.landmark_op
else:
diff_op = self.graph.diff_op
if sparse.issparse(diff_op):
diff_op = diff_op.toarray()
return diff_op
else:
raise NotFittedError(
"This PHATE instance is not fitted yet. Call "
"'fit' with appropriate arguments before "
"using this method."
)
@property
def diff_potential(self):
"""Interpolates the PHATE potential to one entry per cell
This is equivalent to calculating infinite-dimensional PHATE,
or running PHATE without the MDS step.
Returns
-------
diff_potential : ndarray, shape=[n_samples, min(n_landmark, n_samples)]
"""
diff_potential = self._calculate_potential()
if isinstance(self.graph, graphtools.graphs.LandmarkGraph):
diff_potential = self.graph.interpolate(diff_potential)
return diff_potential
def _check_params(self):
"""Check PHATE parameters
This allows us to fail early - otherwise certain unacceptable
parameter choices, such as mds='mmds', would only fail after
minutes of runtime.
Raises
------
ValueError : unacceptable choice of parameters
"""
utils.check_positive(n_components=self.n_components, knn=self.knn)
utils.check_int(
n_components=self.n_components, knn=self.knn, n_jobs=self.n_jobs
)
utils.check_between(-1, 1, gamma=self.gamma)
utils.check_if_not(None, utils.check_positive, decay=self.decay)
utils.check_if_not(
None,
utils.check_positive,
utils.check_int,
n_landmark=self.n_landmark,
n_pca=self.n_pca,
knn_max=self.knn_max,
)
utils.check_if_not("auto", utils.check_positive, utils.check_int, t=self.t)
if not callable(self.knn_dist):
utils.check_in(
[
"euclidean",
"precomputed",
"cosine",
"correlation",
"cityblock",
"l1",
"l2",
"manhattan",
"braycurtis",
"canberra",
"chebyshev",
"dice",
"hamming",
"jaccard",
"kulsinski",
"mahalanobis",
"matching",
"minkowski",
"rogerstanimoto",
"russellrao",
"seuclidean",
"sokalmichener",
"sokalsneath",
"sqeuclidean",
"yule",
"precomputed_affinity",
"precomputed_distance",
],
knn_dist=self.knn_dist,
)
if not callable(self.mds_dist):
utils.check_in(
[
"euclidean",
"cosine",
"correlation",
"braycurtis",
"canberra",
"chebyshev",
"cityblock",
"dice",
"hamming",
"jaccard",
"kulsinski",
"mahalanobis",
"matching",
"minkowski",
"rogerstanimoto",
"russellrao",
"seuclidean",
"sokalmichener",
"sokalsneath",
"sqeuclidean",
"yule",
],
mds_dist=self.mds_dist,
)
utils.check_in(["classic", "metric", "nonmetric"], mds=self.mds)
utils.check_in(["sgd", "smacof"], mds_solver=self.mds_solver)
def _set_graph_params(self, **params):
try:
self.graph.set_params(**params)
except AttributeError:
# graph not defined
pass
def _reset_graph(self):
self.graph = None
self._reset_potential()
def _reset_potential(self):
self._diff_potential = None
self._reset_embedding()
def _reset_embedding(self):
self.embedding = None
def set_params(self, **params):
"""Set the parameters on this estimator.
Any parameters not given as named arguments will be left at their
current value.
Parameters
----------
n_components : int, optional, default: 2
number of dimensions in which the data will be embedded
knn : int, optional, default: 5
number of nearest neighbors on which to build kernel
decay : int, optional, default: 40
sets decay rate of kernel tails.
If None, alpha decaying kernel is not used
n_landmark : int, optional, default: 2000
number of landmarks to use in fast PHATE
t : int, optional, default: 'auto'
power to which the diffusion operator is powered.
This sets the level of diffusion. If 'auto', t is selected
according to the knee point in the Von Neumann Entropy of
the diffusion operator
gamma : float, optional, default: 1
Informational distance constant between -1 and 1.
`gamma=1` gives the PHATE log potential, `gamma=0` gives
a square root potential.
n_pca : int, optional, default: 100
Number of principal components to use for calculating
neighborhoods. For extremely large datasets, using
n_pca < 20 allows neighborhoods to be calculated in
roughly log(n_samples) time.
mds_solver : {'sgd', 'smacof'}, optional (default: 'sgd')
which solver to use for metric MDS. SGD is substantially faster,
but produces slightly less optimal results. Note that SMACOF was used
for all figures in the PHATE paper.
knn_dist : string, optional, default: 'euclidean'
recommended values: 'euclidean', 'cosine', 'precomputed'
Any metric from `scipy.spatial.distance` can be used
distance metric for building kNN graph. Custom distance
functions of form `f(x, y) = d` are also accepted. If 'precomputed',
`data` should be an n_samples x n_samples distance or
affinity matrix. Distance matrices are assumed to have zeros
down the diagonal, while affinity matrices are assumed to have
non-zero values down the diagonal. This is detected automatically
using `data[0,0]`. You can override this detection with
`knn_dist='precomputed_distance'` or `knn_dist='precomputed_affinity'`.
knn_max : int, optional, default: None
Maximum number of neighbors for which alpha decaying kernel
is computed for each point. For very large datasets, setting `knn_max`
to a small multiple of `knn` can speed up computation significantly.
mds_dist : string, optional, default: 'euclidean'
recommended values: 'euclidean' and 'cosine'
Any metric from `scipy.spatial.distance` can be used
distance metric for MDS
mds : string, optional, default: 'metric'
choose from ['classic', 'metric', 'nonmetric'].
Selects which MDS algorithm is used for dimensionality reduction
n_jobs : integer, optional, default: 1
The number of jobs to use for the computation.
If -1 all CPUs are used. If 1 is given, no parallel computing code
is used at all, which is useful for debugging.
For n_jobs below -1, (n_cpus + 1 + n_jobs) are used. Thus for
n_jobs = -2, all CPUs but one are used
random_state : integer or numpy.RandomState, optional, default: None
The generator used to initialize SMACOF (metric, nonmetric) MDS
If an integer is given, it fixes the seed
Defaults to the global `numpy` random number generator
verbose : `int` or `boolean`, optional (default: 1)
If `True` or `> 0`, print status messages
k : Deprecated for `knn`
a : Deprecated for `decay`
Examples
--------
>>> import phate
>>> import matplotlib.pyplot as plt
>>> tree_data, tree_clusters = phate.tree.gen_dla(n_dim=50, n_branch=5,
... branch_length=50)
>>> tree_data.shape
(250, 50)
>>> phate_operator = phate.PHATE(k=5, a=20, t=150)
>>> tree_phate = phate_operator.fit_transform(tree_data)
>>> tree_phate.shape
(250, 2)
>>> phate_operator.set_params(n_components=10)
PHATE(a=20, alpha_decay=None, k=5, knn_dist='euclidean', mds='metric',
mds_dist='euclidean', n_components=10, n_jobs=1, n_landmark=2000,
n_pca=100, njobs=None, potential_method='log', random_state=None, t=150,
verbose=1)
>>> tree_phate = phate_operator.transform()
>>> tree_phate.shape
(250, 10)
>>> # plt.scatter(tree_phate[:,0], tree_phate[:,1], c=tree_clusters)
>>> # plt.show()
Returns
-------
self
"""
reset_kernel = False
reset_potential = False
reset_embedding = False
# mds parameters
if "n_components" in params and params["n_components"] != self.n_components:
self.n_components = params["n_components"]
reset_embedding = True
del params["n_components"]
if "mds" in params and params["mds"] != self.mds:
self.mds = params["mds"]
reset_embedding = True
del params["mds"]
if "mds_solver" in params and params["mds_solver"] != self.mds_solver:
self.mds_solver = params["mds_solver"]
reset_embedding = True
del params["mds_solver"]
if "mds_dist" in params and params["mds_dist"] != self.mds_dist:
self.mds_dist = params["mds_dist"]
reset_embedding = True
del params["mds_dist"]
# diff potential parameters
if "t" in params and params["t"] != self.t:
self.t = params["t"]
reset_potential = True
del params["t"]
if "potential_method" in params:
if params["potential_method"] == "log":
params["gamma"] = 1
elif params["potential_method"] == "sqrt":
params["gamma"] = 0
else:
raise ValueError(
"potential_method {} not recognized. Please "
"use gamma between -1 and 1".format(params["potential_method"])
)
warnings.warn(
"potential_method is deprecated. Setting gamma to {} to "
"achieve {} transformation.".format(
params["gamma"], params["potential_method"]
),
FutureWarning,
)
del params["potential_method"]
if "gamma" in params and params["gamma"] != self.gamma:
self.gamma = params["gamma"]
reset_potential = True
del params["gamma"]
# kernel parameters
if "k" in params and params["k"] != self.knn:
self.knn = params["k"]
reset_kernel = True
del params["k"]
if "a" in params and params["a"] != self.decay:
self.decay = params["a"]
reset_kernel = True
del params["a"]
if "knn" in params and params["knn"] != self.knn:
self.knn = params["knn"]
reset_kernel = True
del params["knn"]
if "knn_max" in params and params["knn_max"] != self.knn_max:
self.knn_max = params["knn_max"]
reset_kernel = True
del params["knn_max"]
if "decay" in params and params["decay"] != self.decay:
self.decay = params["decay"]
reset_kernel = True
del params["decay"]
if "n_pca" in params:
if self.X is not None and params["n_pca"] >= np.min(self.X.shape):
params["n_pca"] = None
if params["n_pca"] != self.n_pca:
self.n_pca = params["n_pca"]
reset_kernel = True
del params["n_pca"]
if "knn_dist" in params and params["knn_dist"] != self.knn_dist:
self.knn_dist = params["knn_dist"]
reset_kernel = True
del params["knn_dist"]
if "n_landmark" in params and params["n_landmark"] != self.n_landmark:
if self.n_landmark is None or params["n_landmark"] is None:
# need a different type of graph, reset entirely
self._reset_graph()
else:
self._set_graph_params(n_landmark=params["n_landmark"])
self.n_landmark = params["n_landmark"]
del params["n_landmark"]
# parameters that don't change the embedding
if "n_jobs" in params:
self.n_jobs = params["n_jobs"]
self._set_graph_params(n_jobs=params["n_jobs"])
del params["n_jobs"]
if "random_state" in params:
self.random_state = params["random_state"]
self._set_graph_params(random_state=params["random_state"])
del params["random_state"]
if "verbose" in params:
self.verbose = params["verbose"]
_logger.set_level(self.verbose)
self._set_graph_params(verbose=params["verbose"])
del params["verbose"]
if reset_kernel:
# can't reset the graph kernel without making a new graph
self._reset_graph()
if reset_potential:
self._reset_potential()
if reset_embedding:
self._reset_embedding()
self._set_graph_params(**params)
self._check_params()
return self
def reset_mds(self, **kwargs):
"""
Deprecated. Reset parameters related to multidimensional scaling
Parameters
----------
n_components : int, optional, default: None
If given, sets number of dimensions in which the data
will be embedded
mds : string, optional, default: None
choose from ['classic', 'metric', 'nonmetric']
If given, sets which MDS algorithm is used for
dimensionality reduction
mds_dist : string, optional, default: None
recommended values: 'euclidean' and 'cosine'
Any metric from scipy.spatial.distance can be used
If given, sets the distance metric for MDS
"""
warnings.warn(
"PHATE.reset_mds is deprecated. " "Please use PHATE.set_params in future.",
FutureWarning,
)
self.set_params(**kwargs)
def reset_potential(self, **kwargs):
"""
Deprecated. Reset parameters related to the diffusion potential
Parameters
----------
t : int or 'auto', optional, default: None
Power to which the diffusion operator is powered
If given, sets the level of diffusion
potential_method : string, optional, default: None
choose from ['log', 'sqrt']
If given, sets which transformation of the diffusional
operator is used to compute the diffusion potential
"""
warnings.warn(
"PHATE.reset_potential is deprecated. "
"Please use PHATE.set_params in future.",
FutureWarning,
)
self.set_params(**kwargs)
def _parse_input(self, X):
# passing graphs to PHATE
if isinstance(X, graphtools.graphs.LandmarkGraph) or (
isinstance(X, graphtools.base.BaseGraph) and self.n_landmark is None
):
self.graph = X
X = X.data
n_pca = self.graph.n_pca
update_graph = False
if isinstance(self.graph, graphtools.graphs.TraditionalGraph):
precomputed = self.graph.precomputed
else:
precomputed = None
return X, n_pca, precomputed, update_graph
elif isinstance(X, graphtools.base.BaseGraph):
self.graph = None
X = X.kernel
precomputed = "affinity"
n_pca = None
update_graph = False
return X, n_pca, precomputed, update_graph
else:
try:
if isinstance(X, pygsp.graphs.Graph):
self.graph = None
X = X.W
precomputed = "adjacency"
update_graph = False
n_pca = None
return X, n_pca, precomputed, update_graph
except NameError:
# pygsp not installed
pass
# checks on regular data
update_graph = True
try:
if isinstance(X, anndata.AnnData):
X = X.X
except NameError:
# anndata not installed
pass
if not callable(self.knn_dist) and self.knn_dist.startswith("precomputed"):
if self.knn_dist == "precomputed":
# automatic detection
if isinstance(X, sparse.coo_matrix):
X = X.tocsr()
if X[0, 0] == 0:
precomputed = "distance"
else:
precomputed = "affinity"
elif self.knn_dist in ["precomputed_affinity", "precomputed_distance"]:
precomputed = self.knn_dist.split("_")[1]
else:
raise ValueError(
"knn_dist {} not recognized. Did you mean "
"'precomputed_distance', "
"'precomputed_affinity', or 'precomputed' "
"(automatically detects distance or affinity)?"
)
n_pca = None
else:
precomputed = None
if self.n_pca is None or self.n_pca >= np.min(X.shape):
n_pca = None
else:
n_pca = self.n_pca
return X, n_pca, precomputed, update_graph
def _update_graph(self, X, precomputed, n_pca, n_landmark):
if self.X is not None and not utils.matrix_is_equivalent(X, self.X):
"""
If the same data is used, we can reuse existing kernel and
diffusion matrices. Otherwise we have to recompute.
"""
self._reset_graph()
else:
try:
self.graph.set_params(
decay=self.decay,
knn=self.knn,
knn_max=self.knn_max,
distance=self.knn_dist,
precomputed=precomputed,
n_jobs=self.n_jobs,
verbose=self.verbose,
n_pca=n_pca,
n_landmark=n_landmark,
random_state=self.random_state,
)
_logger.info("Using precomputed graph and diffusion operator...")
except ValueError as e:
# something changed that should have invalidated the graph
_logger.debug("Reset graph due to {}".format(str(e)))
self._reset_graph()
def fit(self, X):
"""Computes the diffusion operator
Parameters
----------
X : array, shape=[n_samples, n_features]
input data with `n_samples` samples and `n_dimensions`
dimensions. Accepted data types: `numpy.ndarray`,
`scipy.sparse.spmatrix`, `pd.DataFrame`, `anndata.AnnData`. If
`knn_dist` is 'precomputed', `data` should be a n_samples x
n_samples distance or affinity matrix
Returns
-------
phate_operator : PHATE
The estimator object
"""
X, n_pca, precomputed, update_graph = self._parse_input(X)
if precomputed is None:
_logger.info(
"Running PHATE on {} observations and {} variables.".format(
X.shape[0], X.shape[1]
)
)
else:
_logger.info(
"Running PHATE on precomputed {} matrix with {} observations.".format(
precomputed, X.shape[0]
)
)
if self.n_landmark is None or X.shape[0] <= self.n_landmark:
n_landmark = None
else:
n_landmark = self.n_landmark
if self.graph is not None and update_graph:
self._update_graph(X, precomputed, n_pca, n_landmark)
self.X = X
if self.graph is None:
with _logger.task("graph and diffusion operator"):
self.graph = graphtools.Graph(
X,
n_pca=n_pca,
n_landmark=n_landmark,
distance=self.knn_dist,
precomputed=precomputed,
knn=self.knn,
knn_max=self.knn_max,
decay=self.decay,
thresh=1e-4,
n_jobs=self.n_jobs,
verbose=self.verbose,
random_state=self.random_state,
**(self.kwargs)
)
# landmark op doesn't build unless forced
self.diff_op
return self
def transform(self, X=None, t_max=100, plot_optimal_t=False, ax=None):
"""Computes the position of the cells in the embedding space
Parameters
----------
X : array, optional, shape=[n_samples, n_features]
input data with `n_samples` samples and `n_dimensions`
dimensions. Not required, since PHATE does not currently embed
cells not given in the input matrix to `PHATE.fit()`.
Accepted data types: `numpy.ndarray`,
`scipy.sparse.spmatrix`, `pd.DataFrame`, `anndata.AnnData`. If
`knn_dist` is 'precomputed', `data` should be a n_samples x
n_samples distance or affinity matrix
t_max : int, optional, default: 100
maximum t to test if `t` is set to 'auto'
plot_optimal_t : boolean, optional, default: False
If true and `t` is set to 'auto', plot the Von Neumann
entropy used to select t
ax : matplotlib.axes.Axes, optional
If given and `plot_optimal_t` is true, plot will be drawn
on the given axis.
Returns
-------
embedding : array, shape=[n_samples, n_dimensions]
The cells embedded in a lower dimensional space using PHATE
"""
if self.graph is None:
raise NotFittedError(
"This PHATE instance is not fitted yet. Call "
"'fit' with appropriate arguments before "
"using this method."
)
elif X is not None and not utils.matrix_is_equivalent(X, self.X):
# fit to external data
warnings.warn(
"Pre-fit PHATE should not be used to transform a "
"new data matrix. Please fit PHATE to the new"
" data by running 'fit' with the new data.",
RuntimeWarning,
)
if (
isinstance(self.graph, graphtools.graphs.TraditionalGraph)
and self.graph.precomputed is not None
):
raise ValueError(
"Cannot transform additional data using a "
"precomputed distance matrix."
)
else:
if self.embedding is None:
self.transform()
transitions = self.graph.extend_to_data(X)
return self.graph.interpolate(self.embedding, transitions)
else:
diff_potential = self._calculate_potential(
t_max=t_max, plot_optimal_t=plot_optimal_t, ax=ax
)
if self.embedding is None:
with _logger.task("{} MDS".format(self.mds)):
self.embedding = mds.embed_MDS(
diff_potential,
ndim=self.n_components,
how=self.mds,
solver=self.mds_solver,
distance_metric=self.mds_dist,
n_jobs=self.n_jobs,
seed=self.random_state,
verbose=max(self.verbose - 1, 0),
)
if isinstance(self.graph, graphtools.graphs.LandmarkGraph):
_logger.debug("Extending to original data...")
return self.graph.interpolate(self.embedding)
else:
return self.embedding
def fit_transform(self, X, **kwargs):
"""Computes the diffusion operator and the position of the cells in the
embedding space
Parameters
----------
X : array, shape=[n_samples, n_features]
input data with `n_samples` samples and `n_dimensions`
dimensions. Accepted data types: `numpy.ndarray`,
`scipy.sparse.spmatrix`, `pd.DataFrame`, `anndata.AnnData` If
`knn_dist` is 'precomputed', `data` should be a n_samples x
n_samples distance or affinity matrix
kwargs : further arguments for `PHATE.transform()`
Keyword arguments as specified in :func:`~phate.PHATE.transform`
Returns
-------
embedding : array, shape=[n_samples, n_dimensions]
The cells embedded in a lower dimensional space using PHATE
"""
with _logger.task("PHATE"):
self.fit(X)
embedding = self.transform(**kwargs)
return embedding
def _calculate_potential(self, t=None, t_max=100, plot_optimal_t=False, ax=None):
"""Calculates the diffusion potential
Parameters
----------
t : int
power to which the diffusion operator is powered
sets the level of diffusion
t_max : int, default: 100
Maximum value of `t` to test
plot_optimal_t : boolean, default: False
If true, plots the Von Neumann Entropy and knee point
ax : matplotlib.Axes, default: None
If plot=True and ax is not None, plots the VNE on the given axis
Otherwise, creates a new axis and displays the plot
Returns
-------
diff_potential : array-like, shape=[n_samples, n_samples]
The diffusion potential fit on the input data
"""
if t is None:
t = self.t
if self._diff_potential is None:
if t == "auto":
t = self._find_optimal_t(t_max=t_max, plot=plot_optimal_t, ax=ax)
else:
t = self.t
with _logger.task("diffusion potential"):
# diffused diffusion operator
diff_op_t = np.linalg.matrix_power(self.diff_op, t)
if self.gamma == 1:
# handling small values
diff_op_t = diff_op_t + 1e-7
self._diff_potential = -1 * np.log(diff_op_t)
elif self.gamma == -1:
self._diff_potential = diff_op_t
else:
c = (1 - self.gamma) / 2
self._diff_potential = ((diff_op_t) ** c) / c
elif plot_optimal_t:
self._find_optimal_t(t_max=t_max, plot=plot_optimal_t, ax=ax)
return self._diff_potential
def _von_neumann_entropy(self, t_max=100):
"""Calculate Von Neumann Entropy
Determines the Von Neumann entropy of the diffusion affinities
at varying levels of `t`. The user should select a value of `t`
around the "knee" of the entropy curve.
We require that 'fit' stores the value of `PHATE.diff_op`
in order to calculate the Von Neumann entropy.
Parameters
----------
t_max : int, default: 100
Maximum value of `t` to test
Returns
-------
entropy : array, shape=[t_max]
The entropy of the diffusion affinities for each value of `t`
"""
t = np.arange(t_max)
return t, vne.compute_von_neumann_entropy(self.diff_op, t_max=t_max)
def _find_optimal_t(self, t_max=100, plot=False, ax=None):
"""Find the optimal value of t
Selects the optimal value of t based on the knee point of the
Von Neumann Entropy of the diffusion operator.
Parameters
----------
t_max : int, default: 100
Maximum value of t to test
plot : boolean, default: False
If true, plots the Von Neumann Entropy and knee point
ax : matplotlib.Axes, default: None
If plot=True and ax is not None, plots the VNE on the given axis
Otherwise, creates a new axis and displays the plot
Returns
-------
t_opt : int
The optimal value of t
"""
with _logger.task("optimal t"):
t, h = self._von_neumann_entropy(t_max=t_max)
t_opt = vne.find_knee_point(y=h, x=t)
_logger.info("Automatically selected t = {}".format(t_opt))
if plot:
if ax is None:
fig, ax = plt.subplots()
show = True
else:
show = False
ax.plot(t, h)
ax.scatter(t_opt, h[t == t_opt], marker="*", c="k", s=50)
ax.set_xlabel("t")
ax.set_ylabel("Von Neumann Entropy")
ax.set_title("Optimal t = {}".format(t_opt))
if show:
plt.show()
self.optimal_t = t_opt
return t_opt
| gpl-2.0 |
InspurUSA/kudu | python/kudu/tests/test_scanner.py | 2 | 14089 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import division
from kudu.compat import unittest
from kudu.tests.util import TestScanBase
from kudu.tests.common import KuduTestBase, TimeoutError
import kudu
import datetime
import time
import pytest
class TestScanner(TestScanBase):
@classmethod
def setUpClass(self):
super(TestScanner, self).setUpClass()
def setUp(self):
pass
def test_scan_rows_basic(self):
# Let's scan with no predicates
scanner = self.table.scanner().open()
tuples = scanner.read_all_tuples()
self.assertEqual(sorted(tuples), self.tuples)
def test_scan_rows_simple_predicate(self):
key = self.table['key']
preds = [key > 19, key < 50]
def _read_predicates(preds):
scanner = self.table.scanner()
scanner.add_predicates(preds)
scanner.open()
return scanner.read_all_tuples()
tuples = _read_predicates(preds)
self.assertEqual(sorted(tuples), self.tuples[20:50])
# verify predicates reusable
tuples = _read_predicates(preds)
self.assertEqual(sorted(tuples), self.tuples[20:50])
def test_scan_limit(self):
# Set limits both below and above the max number of rows.
limits = [self.nrows - 1, self.nrows, self.nrows + 1]
for limit in limits:
scanner = self.table.scanner()
scanner.set_limit(limit)
tuples = scanner.read_all_tuples()
self.assertEqual(len(tuples), min(limit, self.nrows))
def test_scan_rows_string_predicate_and_projection(self):
scanner = self.table.scanner()
scanner.set_projected_column_names(['key', 'string_val'])
sv = self.table['string_val']
scanner.add_predicates([sv >= 'hello_20',
sv <= 'hello_22'])
scanner.set_fault_tolerant()
scanner.open()
tuples = scanner.read_all_tuples()
self.assertEqual(sorted(tuples), [(20, 'hello_20'), (22, 'hello_22')])
def test_scan_rows_in_list_predicate(self):
"""
Test scanner with an InList predicate and
a string comparison predicate
"""
key_list = [2, 98]
scanner = self.table.scanner()
scanner.set_fault_tolerant()\
.add_predicates([
self.table[0].in_list(key_list),
self.table['string_val'] >= 'hello_9'
])
scanner.open()
tuples = scanner.read_all_tuples()
self.assertEqual(tuples, [self.tuples[98]])
def test_scan_rows_is_not_null_predicate(self):
"""
Test scanner with an IsNotNull predicate on string_val column
"""
pred = self.table['string_val'].is_not_null()
scanner = self.table.scanner()
scanner.add_predicate(pred)
scanner.open()
tuples = scanner.read_all_tuples()
rows = [i for i in range(100) if i % 2 == 0]
self.assertEqual(sorted(tuples), [self.tuples[i] for i in rows])
def test_scan_rows_is_null_predicate(self):
"""
Test scanner with an IsNull predicate on string_val column
"""
pred = self.table['string_val'].is_null()
scanner = self.table.scanner()
scanner.add_predicate(pred)
scanner.open()
tuples = scanner.read_all_tuples()
rows = [i for i in range(100) if i % 2 != 0]
self.assertEqual(sorted(tuples), [self.tuples[i] for i in rows])
def test_index_projection_with_schema(self):
scanner = self.table.scanner()
scanner.set_projected_column_indexes([0, 1])
scanner.set_fault_tolerant()
scanner.open()
tuples = scanner.read_all_tuples()
# Build schema to check against
builder = kudu.schema_builder()
builder.add_column('key', kudu.int32, nullable=False)
builder.add_column('int_val', kudu.int32)
builder.set_primary_keys(['key'])
expected_schema = builder.build()
# Build new schema from projection schema
builder = kudu.schema_builder()
for col in scanner.get_projection_schema():
builder.copy_column(col)
builder.set_primary_keys(['key'])
new_schema = builder.build()
self.assertEqual(tuples, [t[0:2] for t in self.tuples])
self.assertTrue(expected_schema.equals(new_schema))
def test_scan_with_bounds(self):
scanner = self.table.scanner()
scanner.set_fault_tolerant()\
.add_lower_bound({'key': 50})\
.add_exclusive_upper_bound({'key': 55})
scanner.open()
tuples = scanner.read_all_tuples()
self.assertEqual(sorted(tuples), self.tuples[50:55])
def test_scan_invalid_predicates(self):
scanner = self.table.scanner()
sv = self.table['string_val']
with self.assertRaises(TypeError):
scanner.add_predicates([sv >= None])
with self.assertRaises(TypeError):
scanner.add_predicates([sv >= 1])
with self.assertRaises(TypeError):
scanner.add_predicates([sv.in_list(['testing',
datetime.datetime.utcnow()])])
with self.assertRaises(TypeError):
scanner.add_predicates([sv.in_list([
'hello_20',
120
])])
def test_scan_batch_by_batch(self):
scanner = self.table.scanner()
scanner.set_fault_tolerant()
lower_bound = scanner.new_bound()
lower_bound['key'] = 10
scanner.add_lower_bound(lower_bound)
upper_bound = scanner.new_bound()
upper_bound['key'] = 90
scanner.add_exclusive_upper_bound(upper_bound)
scanner.open()
tuples = []
while scanner.has_more_rows():
batch = scanner.next_batch()
tuples.extend(batch.as_tuples())
self.assertEqual(sorted(tuples), self.tuples[10:90])
def test_unixtime_micros(self):
"""
Test setting and getting unixtime_micros fields
"""
# Insert new rows
self.insert_new_unixtime_micros_rows()
# Validate results
scanner = self.table.scanner()
scanner.set_fault_tolerant().open()
self.assertEqual(sorted(self.tuples), scanner.read_all_tuples())
def test_read_mode(self):
"""
Test scanning in latest, snapshot and read_your_writes read modes.
"""
# Delete row
self.delete_insert_row_for_read_test()
# Check scanner results prior to delete
scanner = self.table.scanner()
scanner.set_read_mode('snapshot')\
.set_snapshot(self.snapshot_timestamp)\
.open()
self.assertEqual(sorted(self.tuples[1:]), sorted(scanner.read_all_tuples()))
# Check scanner results after delete with latest mode
timeout = time.time() + 10
check_tuples = []
while check_tuples != sorted(self.tuples):
if time.time() > timeout:
raise TimeoutError("Could not validate results in allocated" +
"time.")
scanner = self.table.scanner()
scanner.set_read_mode(kudu.READ_LATEST)\
.open()
check_tuples = sorted(scanner.read_all_tuples())
# Avoid tight looping
time.sleep(0.05)
# Check scanner results after delete with read_your_writes mode
scanner = self.table.scanner()
scanner.set_read_mode('read_your_writes')\
.open()
self.assertEqual(sorted(self.tuples), sorted(scanner.read_all_tuples()))
def test_resource_metrics_and_cache_blocks(self):
"""
Test getting the resource metrics after scanning and
setting the scanner to not cache blocks.
"""
# Build scanner and read through all batches and retrieve metrics.
scanner = self.table.scanner()
scanner.set_fault_tolerant().set_cache_blocks(False).open()
scanner.read_all_tuples()
metrics = scanner.get_resource_metrics()
# Confirm that the scanner returned cache hit and miss values.
self.assertTrue('cfile_cache_hit_bytes' in metrics)
self.assertTrue('cfile_cache_miss_bytes' in metrics)
def verify_pred_type_scans(self, preds, row_indexes, count_only=False):
# Using the incoming list of predicates, verify that the row returned
# matches the inserted tuple at the row indexes specified in a
# slice object
scanner = self.type_table.scanner()
scanner.set_fault_tolerant()
scanner.add_predicates(preds)
scanner.set_projected_column_names(self.projected_names_w_o_float)
tuples = scanner.open().read_all_tuples()
# verify rows
if count_only:
self.assertEqual(len(self.type_test_rows[row_indexes]), len(tuples))
else:
self.assertEqual(sorted(self.type_test_rows[row_indexes]), tuples)
def test_unixtime_micros_pred(self):
# Test unixtime_micros value predicate
self._test_unixtime_micros_pred()
def test_bool_pred(self):
# Test a boolean value predicate
self._test_bool_pred()
def test_double_pred(self):
# Test a double precision float predicate
self._test_double_pred()
def test_float_pred(self):
# Test a single precision float predicate
# Does a row check count only
self._test_float_pred()
def test_decimal_pred(self):
if kudu.CLIENT_SUPPORTS_DECIMAL:
# Test a decimal predicate
self._test_decimal_pred()
def test_binary_pred(self):
# Test a binary predicate
self._test_binary_pred()
def test_scan_selection(self):
"""
This test confirms that setting the scan selection policy on the
scanner does not cause any errors. There is no way to confirm
that the policy was actually set. This functionality is
tested in the C++ test:
ClientTest.TestReplicatedMultiTabletTableFailover.
"""
for policy in ['leader', kudu.CLOSEST_REPLICA, 2]:
scanner = self.table.scanner()
scanner.set_selection(policy)
scanner.open()
self.assertEqual(sorted(scanner.read_all_tuples()),
sorted(self.tuples))
@pytest.mark.skipif(not (kudu.CLIENT_SUPPORTS_PANDAS),
reason="Pandas required to run this test.")
def test_scanner_to_pandas_types(self):
"""
This test confirms that data types are converted as expected to Pandas.
"""
import numpy as np
scanner = self.type_table.scanner()
df = scanner.to_pandas()
types = df.dtypes
if kudu.CLIENT_SUPPORTS_DECIMAL:
self.assertEqual(types[0], np.int64)
self.assertEqual(types[1], 'datetime64[ns, UTC]')
self.assertEqual(types[2], np.object)
self.assertEqual(types[3], np.object)
self.assertEqual(types[4], np.bool)
self.assertEqual(types[5], np.float64)
self.assertEqual(types[6], np.int8)
self.assertEqual(types[7], np.object)
self.assertEqual(types[8], np.float32)
else:
self.assertEqual(types[0], np.int64)
self.assertEqual(types[1], 'datetime64[ns, UTC]')
self.assertEqual(types[2], np.object)
self.assertEqual(types[3], np.bool)
self.assertEqual(types[4], np.float64)
self.assertEqual(types[5], np.int8)
self.assertEqual(types[6], np.object)
self.assertEqual(types[7], np.float32)
@pytest.mark.skipif(not (kudu.CLIENT_SUPPORTS_PANDAS),
reason="Pandas required to run this test.")
def test_scanner_to_pandas_row_count(self):
"""
This test confirms that the record counts match between Pandas and the scanner.
"""
scanner = self.type_table.scanner()
scanner_count = len(scanner.read_all_tuples())
scanner = self.type_table.scanner()
df = scanner.to_pandas()
self.assertEqual(scanner_count, df.shape[0])
@pytest.mark.skipif(not (kudu.CLIENT_SUPPORTS_PANDAS),
reason="Pandas required to run this test.")
def test_scanner_to_pandas_index(self):
"""
This test confirms that an index is correctly applied.
"""
scanner = self.type_table.scanner()
df = scanner.to_pandas(index='key')
self.assertEqual(df.index.name, 'key')
self.assertEqual(list(df.index), [1, 2])
@pytest.mark.skipif((not(kudu.CLIENT_SUPPORTS_PANDAS) or
(not(kudu.CLIENT_SUPPORTS_DECIMAL))),
reason="Pandas and Decimal support required to run this test.")
def test_scanner_to_pandas_index(self):
"""
This test confirms that a decimal column is coerced to a double when specified.
"""
import numpy as np
scanner = self.type_table.scanner()
df = scanner.to_pandas(coerce_float=True)
types = df.dtypes
self.assertEqual(types[2], np.float64)
| apache-2.0 |
rexshihaoren/scikit-learn | examples/linear_model/plot_sgd_separating_hyperplane.py | 260 | 1219 | """
=========================================
SGD: Maximum margin separating hyperplane
=========================================
Plot the maximum margin separating hyperplane within a two-class
separable dataset using a linear Support Vector Machines classifier
trained using SGD.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import SGDClassifier
from sklearn.datasets.samples_generator import make_blobs
# we create 50 separable points
X, Y = make_blobs(n_samples=50, centers=2, random_state=0, cluster_std=0.60)
# fit the model
clf = SGDClassifier(loss="hinge", alpha=0.01, n_iter=200, fit_intercept=True)
clf.fit(X, Y)
# plot the line, the points, and the nearest vectors to the plane
xx = np.linspace(-1, 5, 10)
yy = np.linspace(-1, 5, 10)
X1, X2 = np.meshgrid(xx, yy)
Z = np.empty(X1.shape)
for (i, j), val in np.ndenumerate(X1):
x1 = val
x2 = X2[i, j]
p = clf.decision_function([x1, x2])
Z[i, j] = p[0]
levels = [-1.0, 0.0, 1.0]
linestyles = ['dashed', 'solid', 'dashed']
colors = 'k'
plt.contour(X1, X2, Z, levels, colors=colors, linestyles=linestyles)
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.axis('tight')
plt.show()
| bsd-3-clause |
mjgrav2001/scikit-learn | sklearn/feature_extraction/text.py | 110 | 50157 | # -*- coding: utf-8 -*-
# Authors: Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# Robert Layton <robertlayton@gmail.com>
# Jochen Wersdörfer <jochen@wersdoerfer.de>
# Roman Sinayev <roman.sinayev@gmail.com>
#
# License: BSD 3 clause
"""
The :mod:`sklearn.feature_extraction.text` submodule gathers utilities to
build feature vectors from text documents.
"""
from __future__ import unicode_literals
import array
from collections import Mapping, defaultdict
import numbers
from operator import itemgetter
import re
import unicodedata
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.six.moves import xrange
from ..preprocessing import normalize
from .hashing import FeatureHasher
from .stop_words import ENGLISH_STOP_WORDS
from ..utils import deprecated
from ..utils.fixes import frombuffer_empty, bincount
from ..utils.validation import check_is_fitted
__all__ = ['CountVectorizer',
'ENGLISH_STOP_WORDS',
'TfidfTransformer',
'TfidfVectorizer',
'strip_accents_ascii',
'strip_accents_unicode',
'strip_tags']
def strip_accents_unicode(s):
"""Transform accentuated unicode symbols into their simple counterpart
Warning: the python-level loop and join operations make this
implementation 20 times slower than the strip_accents_ascii basic
normalization.
See also
--------
strip_accents_ascii
Remove accentuated char for any unicode symbol that has a direct
ASCII equivalent.
"""
return ''.join([c for c in unicodedata.normalize('NFKD', s)
if not unicodedata.combining(c)])
def strip_accents_ascii(s):
"""Transform accentuated unicode symbols into ascii or nothing
Warning: this solution is only suited for languages that have a direct
transliteration to ASCII symbols.
See also
--------
strip_accents_unicode
Remove accentuated char for any unicode symbol.
"""
nkfd_form = unicodedata.normalize('NFKD', s)
return nkfd_form.encode('ASCII', 'ignore').decode('ASCII')
def strip_tags(s):
"""Basic regexp based HTML / XML tag stripper function
For serious HTML/XML preprocessing you should rather use an external
library such as lxml or BeautifulSoup.
"""
return re.compile(r"<([^>]+)>", flags=re.UNICODE).sub(" ", s)
def _check_stop_list(stop):
if stop == "english":
return ENGLISH_STOP_WORDS
elif isinstance(stop, six.string_types):
raise ValueError("not a built-in stop list: %s" % stop)
elif stop is None:
return None
else: # assume it's a collection
return frozenset(stop)
class VectorizerMixin(object):
"""Provides common code for text vectorizers (tokenization logic)."""
_white_spaces = re.compile(r"\s\s+")
def decode(self, doc):
"""Decode the input into a string of unicode symbols
The decoding strategy depends on the vectorizer parameters.
"""
if self.input == 'filename':
with open(doc, 'rb') as fh:
doc = fh.read()
elif self.input == 'file':
doc = doc.read()
if isinstance(doc, bytes):
doc = doc.decode(self.encoding, self.decode_error)
if doc is np.nan:
raise ValueError("np.nan is an invalid document, expected byte or "
"unicode string.")
return doc
def _word_ngrams(self, tokens, stop_words=None):
"""Turn tokens into a sequence of n-grams after stop words filtering"""
# handle stop words
if stop_words is not None:
tokens = [w for w in tokens if w not in stop_words]
# handle token n-grams
min_n, max_n = self.ngram_range
if max_n != 1:
original_tokens = tokens
tokens = []
n_original_tokens = len(original_tokens)
for n in xrange(min_n,
min(max_n + 1, n_original_tokens + 1)):
for i in xrange(n_original_tokens - n + 1):
tokens.append(" ".join(original_tokens[i: i + n]))
return tokens
def _char_ngrams(self, text_document):
"""Tokenize text_document into a sequence of character n-grams"""
# normalize white spaces
text_document = self._white_spaces.sub(" ", text_document)
text_len = len(text_document)
ngrams = []
min_n, max_n = self.ngram_range
for n in xrange(min_n, min(max_n + 1, text_len + 1)):
for i in xrange(text_len - n + 1):
ngrams.append(text_document[i: i + n])
return ngrams
def _char_wb_ngrams(self, text_document):
"""Whitespace sensitive char-n-gram tokenization.
Tokenize text_document into a sequence of character n-grams
excluding any whitespace (operating only inside word boundaries)"""
# normalize white spaces
text_document = self._white_spaces.sub(" ", text_document)
min_n, max_n = self.ngram_range
ngrams = []
for w in text_document.split():
w = ' ' + w + ' '
w_len = len(w)
for n in xrange(min_n, max_n + 1):
offset = 0
ngrams.append(w[offset:offset + n])
while offset + n < w_len:
offset += 1
ngrams.append(w[offset:offset + n])
if offset == 0: # count a short word (w_len < n) only once
break
return ngrams
def build_preprocessor(self):
"""Return a function to preprocess the text before tokenization"""
if self.preprocessor is not None:
return self.preprocessor
# unfortunately python functools package does not have an efficient
# `compose` function that would have allowed us to chain a dynamic
# number of functions. However the cost of a lambda call is a few
# hundreds of nanoseconds which is negligible when compared to the
# cost of tokenizing a string of 1000 chars for instance.
noop = lambda x: x
# accent stripping
if not self.strip_accents:
strip_accents = noop
elif callable(self.strip_accents):
strip_accents = self.strip_accents
elif self.strip_accents == 'ascii':
strip_accents = strip_accents_ascii
elif self.strip_accents == 'unicode':
strip_accents = strip_accents_unicode
else:
raise ValueError('Invalid value for "strip_accents": %s' %
self.strip_accents)
if self.lowercase:
return lambda x: strip_accents(x.lower())
else:
return strip_accents
def build_tokenizer(self):
"""Return a function that splits a string into a sequence of tokens"""
if self.tokenizer is not None:
return self.tokenizer
token_pattern = re.compile(self.token_pattern)
return lambda doc: token_pattern.findall(doc)
def get_stop_words(self):
"""Build or fetch the effective stop words list"""
return _check_stop_list(self.stop_words)
def build_analyzer(self):
"""Return a callable that handles preprocessing and tokenization"""
if callable(self.analyzer):
return self.analyzer
preprocess = self.build_preprocessor()
if self.analyzer == 'char':
return lambda doc: self._char_ngrams(preprocess(self.decode(doc)))
elif self.analyzer == 'char_wb':
return lambda doc: self._char_wb_ngrams(
preprocess(self.decode(doc)))
elif self.analyzer == 'word':
stop_words = self.get_stop_words()
tokenize = self.build_tokenizer()
return lambda doc: self._word_ngrams(
tokenize(preprocess(self.decode(doc))), stop_words)
else:
raise ValueError('%s is not a valid tokenization scheme/analyzer' %
self.analyzer)
def _validate_vocabulary(self):
vocabulary = self.vocabulary
if vocabulary is not None:
if not isinstance(vocabulary, Mapping):
vocab = {}
for i, t in enumerate(vocabulary):
if vocab.setdefault(t, i) != i:
msg = "Duplicate term in vocabulary: %r" % t
raise ValueError(msg)
vocabulary = vocab
else:
indices = set(six.itervalues(vocabulary))
if len(indices) != len(vocabulary):
raise ValueError("Vocabulary contains repeated indices.")
for i in xrange(len(vocabulary)):
if i not in indices:
msg = ("Vocabulary of size %d doesn't contain index "
"%d." % (len(vocabulary), i))
raise ValueError(msg)
if not vocabulary:
raise ValueError("empty vocabulary passed to fit")
self.fixed_vocabulary_ = True
self.vocabulary_ = dict(vocabulary)
else:
self.fixed_vocabulary_ = False
def _check_vocabulary(self):
"""Check if vocabulary is empty or missing (not fit-ed)"""
msg = "%(name)s - Vocabulary wasn't fitted."
check_is_fitted(self, 'vocabulary_', msg=msg),
if len(self.vocabulary_) == 0:
raise ValueError("Vocabulary is empty")
@property
@deprecated("The `fixed_vocabulary` attribute is deprecated and will be "
"removed in 0.18. Please use `fixed_vocabulary_` instead.")
def fixed_vocabulary(self):
return self.fixed_vocabulary_
class HashingVectorizer(BaseEstimator, VectorizerMixin):
"""Convert a collection of text documents to a matrix of token occurrences
It turns a collection of text documents into a scipy.sparse matrix holding
token occurrence counts (or binary occurrence information), possibly
normalized as token frequencies if norm='l1' or projected on the euclidean
unit sphere if norm='l2'.
This text vectorizer implementation uses the hashing trick to find the
token string name to feature integer index mapping.
This strategy has several advantages:
- it is very low memory scalable to large datasets as there is no need to
store a vocabulary dictionary in memory
- it is fast to pickle and un-pickle as it holds no state besides the
constructor parameters
- it can be used in a streaming (partial fit) or parallel pipeline as there
is no state computed during fit.
There are also a couple of cons (vs using a CountVectorizer with an
in-memory vocabulary):
- there is no way to compute the inverse transform (from feature indices to
string feature names) which can be a problem when trying to introspect
which features are most important to a model.
- there can be collisions: distinct tokens can be mapped to the same
feature index. However in practice this is rarely an issue if n_features
is large enough (e.g. 2 ** 18 for text classification problems).
- no IDF weighting as this would render the transformer stateful.
The hash function employed is the signed 32-bit version of Murmurhash3.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, default='utf-8'
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char', 'char_wb'} or callable
Whether the feature should be made of word or character n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n), default=(1, 1)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If 'english', a built-in stop word list for English is used.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
lowercase : boolean, default=True
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp selects tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
n_features : integer, default=(2 ** 20)
The number of features (columns) in the output matrices. Small numbers
of features are likely to cause hash collisions, but large numbers
will cause larger coefficient dimensions in linear learners.
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
binary: boolean, default=False.
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
dtype: type, optional
Type of the matrix returned by fit_transform() or transform().
non_negative : boolean, default=False
Whether output matrices should contain non-negative values only;
effectively calls abs on the matrix prior to returning it.
When True, output values can be interpreted as frequencies.
When False, output values will have expected value zero.
See also
--------
CountVectorizer, TfidfVectorizer
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None,
lowercase=True, preprocessor=None, tokenizer=None,
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), analyzer='word', n_features=(2 ** 20),
binary=False, norm='l2', non_negative=False,
dtype=np.float64):
self.input = input
self.encoding = encoding
self.decode_error = decode_error
self.strip_accents = strip_accents
self.preprocessor = preprocessor
self.tokenizer = tokenizer
self.analyzer = analyzer
self.lowercase = lowercase
self.token_pattern = token_pattern
self.stop_words = stop_words
self.n_features = n_features
self.ngram_range = ngram_range
self.binary = binary
self.norm = norm
self.non_negative = non_negative
self.dtype = dtype
def partial_fit(self, X, y=None):
"""Does nothing: this transformer is stateless.
This method is just there to mark the fact that this transformer
can work in a streaming setup.
"""
return self
def fit(self, X, y=None):
"""Does nothing: this transformer is stateless."""
# triggers a parameter validation
self._get_hasher().fit(X, y=y)
return self
def transform(self, X, y=None):
"""Transform a sequence of documents to a document-term matrix.
Parameters
----------
X : iterable over raw text documents, length = n_samples
Samples. Each sample must be a text document (either bytes or
unicode strings, file name or file object depending on the
constructor argument) which will be tokenized and hashed.
y : (ignored)
Returns
-------
X : scipy.sparse matrix, shape = (n_samples, self.n_features)
Document-term matrix.
"""
analyzer = self.build_analyzer()
X = self._get_hasher().transform(analyzer(doc) for doc in X)
if self.binary:
X.data.fill(1)
if self.norm is not None:
X = normalize(X, norm=self.norm, copy=False)
return X
# Alias transform to fit_transform for convenience
fit_transform = transform
def _get_hasher(self):
return FeatureHasher(n_features=self.n_features,
input_type='string', dtype=self.dtype,
non_negative=self.non_negative)
def _document_frequency(X):
"""Count the number of non-zero values for each feature in sparse X."""
if sp.isspmatrix_csr(X):
return bincount(X.indices, minlength=X.shape[1])
else:
return np.diff(sp.csc_matrix(X, copy=False).indptr)
class CountVectorizer(BaseEstimator, VectorizerMixin):
"""Convert a collection of text documents to a matrix of token counts
This implementation produces a sparse representation of the counts using
scipy.sparse.coo_matrix.
If you do not provide an a-priori dictionary and you do not use an analyzer
that does some kind of feature selection then the number of features will
be equal to the vocabulary size found by analyzing the data.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, 'utf-8' by default.
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char', 'char_wb'} or callable
Whether the feature should be made of word or character n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
Only applies if ``analyzer == 'word'``.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If 'english', a built-in stop word list for English is used.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
If None, no stop words will be used. max_df can be set to a value
in the range [0.7, 1.0) to automatically detect and filter stop
words based on intra corpus document frequency of terms.
lowercase : boolean, True by default
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp select tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
max_df : float in range [0.0, 1.0] or int, default=1.0
When building the vocabulary ignore terms that have a document
frequency strictly higher than the given threshold (corpus-specific
stop words).
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
min_df : float in range [0.0, 1.0] or int, default=1
When building the vocabulary ignore terms that have a document
frequency strictly lower than the given threshold. This value is also
called cut-off in the literature.
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
max_features : int or None, default=None
If not None, build a vocabulary that only consider the top
max_features ordered by term frequency across the corpus.
This parameter is ignored if vocabulary is not None.
vocabulary : Mapping or iterable, optional
Either a Mapping (e.g., a dict) where keys are terms and values are
indices in the feature matrix, or an iterable over terms. If not
given, a vocabulary is determined from the input documents. Indices
in the mapping should not be repeated and should not have any gap
between 0 and the largest index.
binary : boolean, default=False
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
dtype : type, optional
Type of the matrix returned by fit_transform() or transform().
Attributes
----------
vocabulary_ : dict
A mapping of terms to feature indices.
stop_words_ : set
Terms that were ignored because they either:
- occurred in too many documents (`max_df`)
- occurred in too few documents (`min_df`)
- were cut off by feature selection (`max_features`).
This is only available if no vocabulary was given.
See also
--------
HashingVectorizer, TfidfVectorizer
Notes
-----
The ``stop_words_`` attribute can get large and increase the model size
when pickling. This attribute is provided only for introspection and can
be safely removed using delattr or set to None before pickling.
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None,
lowercase=True, preprocessor=None, tokenizer=None,
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), analyzer='word',
max_df=1.0, min_df=1, max_features=None,
vocabulary=None, binary=False, dtype=np.int64):
self.input = input
self.encoding = encoding
self.decode_error = decode_error
self.strip_accents = strip_accents
self.preprocessor = preprocessor
self.tokenizer = tokenizer
self.analyzer = analyzer
self.lowercase = lowercase
self.token_pattern = token_pattern
self.stop_words = stop_words
self.max_df = max_df
self.min_df = min_df
if max_df < 0 or min_df < 0:
raise ValueError("negative value for max_df of min_df")
self.max_features = max_features
if max_features is not None:
if (not isinstance(max_features, numbers.Integral) or
max_features <= 0):
raise ValueError(
"max_features=%r, neither a positive integer nor None"
% max_features)
self.ngram_range = ngram_range
self.vocabulary = vocabulary
self.binary = binary
self.dtype = dtype
def _sort_features(self, X, vocabulary):
"""Sort features by name
Returns a reordered matrix and modifies the vocabulary in place
"""
sorted_features = sorted(six.iteritems(vocabulary))
map_index = np.empty(len(sorted_features), dtype=np.int32)
for new_val, (term, old_val) in enumerate(sorted_features):
map_index[new_val] = old_val
vocabulary[term] = new_val
return X[:, map_index]
def _limit_features(self, X, vocabulary, high=None, low=None,
limit=None):
"""Remove too rare or too common features.
Prune features that are non zero in more samples than high or less
documents than low, modifying the vocabulary, and restricting it to
at most the limit most frequent.
This does not prune samples with zero features.
"""
if high is None and low is None and limit is None:
return X, set()
# Calculate a mask based on document frequencies
dfs = _document_frequency(X)
tfs = np.asarray(X.sum(axis=0)).ravel()
mask = np.ones(len(dfs), dtype=bool)
if high is not None:
mask &= dfs <= high
if low is not None:
mask &= dfs >= low
if limit is not None and mask.sum() > limit:
mask_inds = (-tfs[mask]).argsort()[:limit]
new_mask = np.zeros(len(dfs), dtype=bool)
new_mask[np.where(mask)[0][mask_inds]] = True
mask = new_mask
new_indices = np.cumsum(mask) - 1 # maps old indices to new
removed_terms = set()
for term, old_index in list(six.iteritems(vocabulary)):
if mask[old_index]:
vocabulary[term] = new_indices[old_index]
else:
del vocabulary[term]
removed_terms.add(term)
kept_indices = np.where(mask)[0]
if len(kept_indices) == 0:
raise ValueError("After pruning, no terms remain. Try a lower"
" min_df or a higher max_df.")
return X[:, kept_indices], removed_terms
def _count_vocab(self, raw_documents, fixed_vocab):
"""Create sparse feature matrix, and vocabulary where fixed_vocab=False
"""
if fixed_vocab:
vocabulary = self.vocabulary_
else:
# Add a new value when a new vocabulary item is seen
vocabulary = defaultdict()
vocabulary.default_factory = vocabulary.__len__
analyze = self.build_analyzer()
j_indices = _make_int_array()
indptr = _make_int_array()
indptr.append(0)
for doc in raw_documents:
for feature in analyze(doc):
try:
j_indices.append(vocabulary[feature])
except KeyError:
# Ignore out-of-vocabulary items for fixed_vocab=True
continue
indptr.append(len(j_indices))
if not fixed_vocab:
# disable defaultdict behaviour
vocabulary = dict(vocabulary)
if not vocabulary:
raise ValueError("empty vocabulary; perhaps the documents only"
" contain stop words")
j_indices = frombuffer_empty(j_indices, dtype=np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc)
values = np.ones(len(j_indices))
X = sp.csr_matrix((values, j_indices, indptr),
shape=(len(indptr) - 1, len(vocabulary)),
dtype=self.dtype)
X.sum_duplicates()
return vocabulary, X
def fit(self, raw_documents, y=None):
"""Learn a vocabulary dictionary of all tokens in the raw documents.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
self
"""
self.fit_transform(raw_documents)
return self
def fit_transform(self, raw_documents, y=None):
"""Learn the vocabulary dictionary and return term-document matrix.
This is equivalent to fit followed by transform, but more efficiently
implemented.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
X : array, [n_samples, n_features]
Document-term matrix.
"""
# We intentionally don't call the transform method to make
# fit_transform overridable without unwanted side effects in
# TfidfVectorizer.
self._validate_vocabulary()
max_df = self.max_df
min_df = self.min_df
max_features = self.max_features
vocabulary, X = self._count_vocab(raw_documents,
self.fixed_vocabulary_)
if self.binary:
X.data.fill(1)
if not self.fixed_vocabulary_:
X = self._sort_features(X, vocabulary)
n_doc = X.shape[0]
max_doc_count = (max_df
if isinstance(max_df, numbers.Integral)
else max_df * n_doc)
min_doc_count = (min_df
if isinstance(min_df, numbers.Integral)
else min_df * n_doc)
if max_doc_count < min_doc_count:
raise ValueError(
"max_df corresponds to < documents than min_df")
X, self.stop_words_ = self._limit_features(X, vocabulary,
max_doc_count,
min_doc_count,
max_features)
self.vocabulary_ = vocabulary
return X
def transform(self, raw_documents):
"""Transform documents to document-term matrix.
Extract token counts out of raw text documents using the vocabulary
fitted with fit or the one provided to the constructor.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
X : sparse matrix, [n_samples, n_features]
Document-term matrix.
"""
if not hasattr(self, 'vocabulary_'):
self._validate_vocabulary()
self._check_vocabulary()
# use the same matrix-building strategy as fit_transform
_, X = self._count_vocab(raw_documents, fixed_vocab=True)
if self.binary:
X.data.fill(1)
return X
def inverse_transform(self, X):
"""Return terms per document with nonzero entries in X.
Parameters
----------
X : {array, sparse matrix}, shape = [n_samples, n_features]
Returns
-------
X_inv : list of arrays, len = n_samples
List of arrays of terms.
"""
self._check_vocabulary()
if sp.issparse(X):
# We need CSR format for fast row manipulations.
X = X.tocsr()
else:
# We need to convert X to a matrix, so that the indexing
# returns 2D objects
X = np.asmatrix(X)
n_samples = X.shape[0]
terms = np.array(list(self.vocabulary_.keys()))
indices = np.array(list(self.vocabulary_.values()))
inverse_vocabulary = terms[np.argsort(indices)]
return [inverse_vocabulary[X[i, :].nonzero()[1]].ravel()
for i in range(n_samples)]
def get_feature_names(self):
"""Array mapping from feature integer indices to feature name"""
self._check_vocabulary()
return [t for t, i in sorted(six.iteritems(self.vocabulary_),
key=itemgetter(1))]
def _make_int_array():
"""Construct an array.array of a type suitable for scipy.sparse indices."""
return array.array(str("i"))
class TfidfTransformer(BaseEstimator, TransformerMixin):
"""Transform a count matrix to a normalized tf or tf-idf representation
Tf means term-frequency while tf-idf means term-frequency times inverse
document-frequency. This is a common term weighting scheme in information
retrieval, that has also found good use in document classification.
The goal of using tf-idf instead of the raw frequencies of occurrence of a
token in a given document is to scale down the impact of tokens that occur
very frequently in a given corpus and that are hence empirically less
informative than features that occur in a small fraction of the training
corpus.
The actual formula used for tf-idf is tf * (idf + 1) = tf + tf * idf,
instead of tf * idf. The effect of this is that terms with zero idf, i.e.
that occur in all documents of a training set, will not be entirely
ignored. The formulas used to compute tf and idf depend on parameter
settings that correspond to the SMART notation used in IR, as follows:
Tf is "n" (natural) by default, "l" (logarithmic) when sublinear_tf=True.
Idf is "t" when use_idf is given, "n" (none) otherwise.
Normalization is "c" (cosine) when norm='l2', "n" (none) when norm=None.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
use_idf : boolean, default=True
Enable inverse-document-frequency reweighting.
smooth_idf : boolean, default=True
Smooth idf weights by adding one to document frequencies, as if an
extra document was seen containing every term in the collection
exactly once. Prevents zero divisions.
sublinear_tf : boolean, default=False
Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).
References
----------
.. [Yates2011] `R. Baeza-Yates and B. Ribeiro-Neto (2011). Modern
Information Retrieval. Addison Wesley, pp. 68-74.`
.. [MRS2008] `C.D. Manning, P. Raghavan and H. Schuetze (2008).
Introduction to Information Retrieval. Cambridge University
Press, pp. 118-120.`
"""
def __init__(self, norm='l2', use_idf=True, smooth_idf=True,
sublinear_tf=False):
self.norm = norm
self.use_idf = use_idf
self.smooth_idf = smooth_idf
self.sublinear_tf = sublinear_tf
def fit(self, X, y=None):
"""Learn the idf vector (global term weights)
Parameters
----------
X : sparse matrix, [n_samples, n_features]
a matrix of term/token counts
"""
if not sp.issparse(X):
X = sp.csc_matrix(X)
if self.use_idf:
n_samples, n_features = X.shape
df = _document_frequency(X)
# perform idf smoothing if required
df += int(self.smooth_idf)
n_samples += int(self.smooth_idf)
# log+1 instead of log makes sure terms with zero idf don't get
# suppressed entirely.
idf = np.log(float(n_samples) / df) + 1.0
self._idf_diag = sp.spdiags(idf,
diags=0, m=n_features, n=n_features)
return self
def transform(self, X, copy=True):
"""Transform a count matrix to a tf or tf-idf representation
Parameters
----------
X : sparse matrix, [n_samples, n_features]
a matrix of term/token counts
copy : boolean, default True
Whether to copy X and operate on the copy or perform in-place
operations.
Returns
-------
vectors : sparse matrix, [n_samples, n_features]
"""
if hasattr(X, 'dtype') and np.issubdtype(X.dtype, np.float):
# preserve float family dtype
X = sp.csr_matrix(X, copy=copy)
else:
# convert counts or binary occurrences to floats
X = sp.csr_matrix(X, dtype=np.float64, copy=copy)
n_samples, n_features = X.shape
if self.sublinear_tf:
np.log(X.data, X.data)
X.data += 1
if self.use_idf:
check_is_fitted(self, '_idf_diag', 'idf vector is not fitted')
expected_n_features = self._idf_diag.shape[0]
if n_features != expected_n_features:
raise ValueError("Input has n_features=%d while the model"
" has been trained with n_features=%d" % (
n_features, expected_n_features))
# *= doesn't work
X = X * self._idf_diag
if self.norm:
X = normalize(X, norm=self.norm, copy=False)
return X
@property
def idf_(self):
if hasattr(self, "_idf_diag"):
return np.ravel(self._idf_diag.sum(axis=0))
else:
return None
class TfidfVectorizer(CountVectorizer):
"""Convert a collection of raw documents to a matrix of TF-IDF features.
Equivalent to CountVectorizer followed by TfidfTransformer.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, 'utf-8' by default.
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char'} or callable
Whether the feature should be made of word or character n-grams.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If a string, it is passed to _check_stop_list and the appropriate stop
list is returned. 'english' is currently the only supported string
value.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
If None, no stop words will be used. max_df can be set to a value
in the range [0.7, 1.0) to automatically detect and filter stop
words based on intra corpus document frequency of terms.
lowercase : boolean, default True
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp selects tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
max_df : float in range [0.0, 1.0] or int, default=1.0
When building the vocabulary ignore terms that have a document
frequency strictly higher than the given threshold (corpus-specific
stop words).
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
min_df : float in range [0.0, 1.0] or int, default=1
When building the vocabulary ignore terms that have a document
frequency strictly lower than the given threshold. This value is also
called cut-off in the literature.
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
max_features : int or None, default=None
If not None, build a vocabulary that only consider the top
max_features ordered by term frequency across the corpus.
This parameter is ignored if vocabulary is not None.
vocabulary : Mapping or iterable, optional
Either a Mapping (e.g., a dict) where keys are terms and values are
indices in the feature matrix, or an iterable over terms. If not
given, a vocabulary is determined from the input documents.
binary : boolean, default=False
If True, all non-zero term counts are set to 1. This does not mean
outputs will have only 0/1 values, only that the tf term in tf-idf
is binary. (Set idf and normalization to False to get 0/1 outputs.)
dtype : type, optional
Type of the matrix returned by fit_transform() or transform().
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
use_idf : boolean, default=True
Enable inverse-document-frequency reweighting.
smooth_idf : boolean, default=True
Smooth idf weights by adding one to document frequencies, as if an
extra document was seen containing every term in the collection
exactly once. Prevents zero divisions.
sublinear_tf : boolean, default=False
Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).
Attributes
----------
idf_ : array, shape = [n_features], or None
The learned idf vector (global term weights)
when ``use_idf`` is set to True, None otherwise.
stop_words_ : set
Terms that were ignored because they either:
- occurred in too many documents (`max_df`)
- occurred in too few documents (`min_df`)
- were cut off by feature selection (`max_features`).
This is only available if no vocabulary was given.
See also
--------
CountVectorizer
Tokenize the documents and count the occurrences of token and return
them as a sparse matrix
TfidfTransformer
Apply Term Frequency Inverse Document Frequency normalization to a
sparse matrix of occurrence counts.
Notes
-----
The ``stop_words_`` attribute can get large and increase the model size
when pickling. This attribute is provided only for introspection and can
be safely removed using delattr or set to None before pickling.
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None, lowercase=True,
preprocessor=None, tokenizer=None, analyzer='word',
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), max_df=1.0, min_df=1,
max_features=None, vocabulary=None, binary=False,
dtype=np.int64, norm='l2', use_idf=True, smooth_idf=True,
sublinear_tf=False):
super(TfidfVectorizer, self).__init__(
input=input, encoding=encoding, decode_error=decode_error,
strip_accents=strip_accents, lowercase=lowercase,
preprocessor=preprocessor, tokenizer=tokenizer, analyzer=analyzer,
stop_words=stop_words, token_pattern=token_pattern,
ngram_range=ngram_range, max_df=max_df, min_df=min_df,
max_features=max_features, vocabulary=vocabulary, binary=binary,
dtype=dtype)
self._tfidf = TfidfTransformer(norm=norm, use_idf=use_idf,
smooth_idf=smooth_idf,
sublinear_tf=sublinear_tf)
# Broadcast the TF-IDF parameters to the underlying transformer instance
# for easy grid search and repr
@property
def norm(self):
return self._tfidf.norm
@norm.setter
def norm(self, value):
self._tfidf.norm = value
@property
def use_idf(self):
return self._tfidf.use_idf
@use_idf.setter
def use_idf(self, value):
self._tfidf.use_idf = value
@property
def smooth_idf(self):
return self._tfidf.smooth_idf
@smooth_idf.setter
def smooth_idf(self, value):
self._tfidf.smooth_idf = value
@property
def sublinear_tf(self):
return self._tfidf.sublinear_tf
@sublinear_tf.setter
def sublinear_tf(self, value):
self._tfidf.sublinear_tf = value
@property
def idf_(self):
return self._tfidf.idf_
def fit(self, raw_documents, y=None):
"""Learn vocabulary and idf from training set.
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
Returns
-------
self : TfidfVectorizer
"""
X = super(TfidfVectorizer, self).fit_transform(raw_documents)
self._tfidf.fit(X)
return self
def fit_transform(self, raw_documents, y=None):
"""Learn vocabulary and idf, return term-document matrix.
This is equivalent to fit followed by transform, but more efficiently
implemented.
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
Returns
-------
X : sparse matrix, [n_samples, n_features]
Tf-idf-weighted document-term matrix.
"""
X = super(TfidfVectorizer, self).fit_transform(raw_documents)
self._tfidf.fit(X)
# X is already a transformed view of raw_documents so
# we set copy to False
return self._tfidf.transform(X, copy=False)
def transform(self, raw_documents, copy=True):
"""Transform documents to document-term matrix.
Uses the vocabulary and document frequencies (df) learned by fit (or
fit_transform).
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
copy : boolean, default True
Whether to copy X and operate on the copy or perform in-place
operations.
Returns
-------
X : sparse matrix, [n_samples, n_features]
Tf-idf-weighted document-term matrix.
"""
check_is_fitted(self, '_tfidf', 'The tfidf vector is not fitted')
X = super(TfidfVectorizer, self).transform(raw_documents)
return self._tfidf.transform(X, copy=False)
| bsd-3-clause |
salkinium/bachelor | experiment_control/visualizer.py | 1 | 6167 | # -*- coding: utf-8 -*-
# Copyright (c) 2014, Niklas Hauser
# All rights reserved.
#
# The file is part of my bachelor thesis and is released under the 3-clause BSD
# license. See the file `LICENSE` for the full license governing this code.
# -----------------------------------------------------------------------------
import os
import sys
from multiprocessing import Process, Value, Array
import numpy as np
from matplotlib import pyplot as plt
import time
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'link_analysis'))
from string_message import StringMessage
from link import Link
class MessageVisualizer(Process, object):
def __init__(self, nodes=5, messages=100):
super(MessageVisualizer, self).__init__()
self.nodes = nodes if nodes > 0 else 1
self.link = None
self.fig = plt.figure(num=None, figsize=(16, 12), dpi=80, facecolor='w', edgecolor='k')
self.fig.subplots_adjust(top=0.8)
self.temperature = self.fig.add_axes([0.05, 0.7, 0.9, 0.2], label='temperature')
self.temperature.axes.get_xaxis().set_visible(False)
self.temperature.set_ylim(20, 100)
self.temperature.set_ylabel('Temperature')
self.errors = self.fig.add_axes([0.05, 0.5, 0.9, 0.2], label='errors')
self.errors.axes.get_xaxis().set_visible(False)
self.errors.set_ylim(0, 100)
self.errors.set_ylabel('rx bit errors')
self.lqi = self.fig.add_axes([0.05, 0.3, 0.9, 0.2], label='lqi')
self.lqi.axes.get_xaxis().set_visible(False)
self.lqi.set_ylim(40, 120)
self.lqi.set_ylabel('LQI')
self.rssi = self.fig.add_axes([0.05, 0.1, 0.9, 0.2], label='rssi')
self.rssi.set_ylim(-100, -60)
self.rssi.set_ylabel('RSSI in dB')
self.rssi.set_xlabel('messages')
# colors
self.colors = ['r', 'b', 'g', 'c', 'm', 'b']
self.lw = 1.5
self.messages = messages if messages >= 10 else 10
# reserve the values
self.x_values = np.arange(0.0, self.messages, 1.0)
self.temperature_values = []
self.rssi_values = []
self.error_values = []
self.lqi_values = []
for node in range(self.nodes):
self.temperature_values.append(np.zeros(self.messages))
self.rssi_values.append(np.zeros(self.messages))
self.lqi_values.append(np.zeros(self.messages))
self.error_values.append(np.zeros(self.messages))
self.errors_plot = []
self.temperature_plot = []
self.rssi_plot = []
self.lqi_plot = []
for node in range(self.nodes):
self.errors_plot.append(self.errors.plot(self.x_values, self.error_values[node], color=self.colors[node], lw=self.lw)[0])
self.rssi_plot.append(self.rssi.plot(self.x_values, self.rssi_values[node], color=self.colors[node], lw=self.lw)[0])
self.lqi_plot.append(self.lqi.plot(self.x_values, self.lqi_values[node], color=self.colors[node], lw=self.lw)[0])
self.temperature_plot.append(self.temperature.plot(self.x_values, self.temperature_values[node], color=self.colors[node], lw=self.lw)[0])
plt.ion()
plt.show()
# multiprocessing support
self._tx_message = Array('c', 2000)
self._rx_message = Array('c', 2000)
self._add_rx = Value('b', False)
self._add_tx = Value('b', False)
self.start()
def add_tx_message(self, message):
while(self._add_tx.value):
time.sleep(0)
self._tx_message.value = message
self._add_tx.value = True
def add_rx_message(self, message):
while (self._add_rx.value):
time.sleep(0)
self._rx_message.value = message
self._add_rx.value = True
def add_link(self, link):
for node in range(self.nodes):
if node == link.tx['id']:
continue
# left shift all values
self.temperature_values[node] = np.roll(self.temperature_values[node], 1)
self.error_values[node] = np.roll(self.error_values[node], 1)
self.rssi_values[node] = np.roll(self.rssi_values[node], 1)
self.lqi_values[node] = np.roll(self.lqi_values[node], 1)
# for every received message in the link
for rx in link.rx:
rx_id = rx['id']
# can't plot too many things
if rx_id > self.nodes:
continue
# add the required values to the last entry
self.temperature_values[rx_id][0] = rx['temperature']
if rx['timeout'] == 0:
self.rssi_values[rx_id][0] = rx['rssi']
self.lqi_values[rx_id][0] = rx['lqi']
if 'bit_errors' in rx:
self.error_values[rx_id][0] = rx['bit_errors']
else:
self.rssi_values[rx_id][0] = 0
self.lqi_values[rx_id][0] = 0
self.error_values[rx_id][0] = 100
for node in range(self.nodes):
self.errors_plot[node].set_ydata(self.error_values[node])
self.lqi_plot[node].set_ydata(self.lqi_values[node])
self.rssi_plot[node].set_ydata(self.rssi_values[node])
self.temperature_plot[node].set_ydata(self.temperature_values[node])
# update the graph
plt.draw()
def run(self):
while True:
if self._add_tx.value:
tx = StringMessage(str(self._tx_message.value))
if tx:
if self.link:
self.add_link(self.link)
self.link = Link(tx)
else:
self.link = None
self._add_tx.value = False
if self._add_rx.value:
if self.link:
rx = StringMessage(str(self._rx_message.value))
if rx:
self.link.add_rx(rx)
self._add_rx.value = False
time.sleep(0.1)
def __repr__(self):
return self.__str__()
def __str__(self):
return "MessageVisualizer"
| bsd-2-clause |
darcamo/pyphysim | apps/metis_scenarios/simulate_metis_scenario2.py | 1 | 18526 | #!/usr/bin/env python
"""
Simulator for the SINRs and capacity of a dense indoor scenario.
The scenario is a very simplified version of the Test Case 2 from the METIS
project. Only one floor of one building is simulated and only the indoor
access points are considered.
"""
import numpy as np
from matplotlib import gridspec
from matplotlib import pyplot as plt
from apps.metis_scenarios.simulate_metis_scenario import (
calc_num_walls, calc_room_positions_square, get_ap_positions,
plot_all_rooms)
from pyphysim.cell import shapes
from pyphysim.channels import pathloss
from pyphysim.channels.noise import calc_thermal_noise_power_dBm
from pyphysim.util.conversion import dB2Linear, dBm2Linear, linear2dB
def find_ap_assoc_best_channel(pl_all_plus_wl):
"""
Find with which AP each user should associate with using the best
channel criterion.
Basically the user will associate with the AP with the lowest path loss
Parameters
----------
pl_all_plus_wl : np.ndarray
The path loss in linear scale including also any wall losses. This is
a 2D numpy array (Dim: num users x num APs).
Return
------
ap_assoc : np.ndarray
The int vector indicating with which AP each user is associated. This
is a 1D int numpy array and the number of elements in this vector is
equal to the number of users and each element is the index of the AP
that the user will associate with.
"""
ap_assoc = np.argmax(pl_all_plus_wl, axis=-1)
return ap_assoc
def simulate_for_a_given_ap_assoc(pl_plus_wl_tx_aps, ap_assoc,
transmitting_aps, Pt, noise_var):
"""
Perform the simulation for a given AP association.
Parameters
----------
pl_plus_wl_tx_aps : np.ndarray
ap_assoc : np.ndarray
transmitting_aps : np.ndarray
Pt : float | np.ndarray
noise_var : float
The noise variance.
Returns
-------
(np.ndarray, np.ndarray)
A tuple with the SINRs and the capacity.
"""
# Output variables
sinr_array = np.empty(ap_assoc.shape, dtype=float)
capacity = np.empty(ap_assoc.shape, dtype=float)
num_users, = ap_assoc.shape
# For each transmitting AP
for index, ap in enumerate(transmitting_aps):
# 'ap' is the index of the current AP in the list of all APs
# (including the non transmitting APs), while 'index' is the index
# or the current AP in transmitting_aps
# Index of the users associated with the current AP
current_ap_users_idx = np.arange(num_users)[ap_assoc == ap]
# Mask to get the interfering APs
mask_i_aps = np.ones(len(transmitting_aps), dtype=bool)
mask_i_aps[index] = False
# Desired power of these users
desired_power = (Pt * pl_plus_wl_tx_aps[current_ap_users_idx, index])
undesired_power = np.sum(
Pt * pl_plus_wl_tx_aps[current_ap_users_idx][:, mask_i_aps],
axis=-1)
sinr_array[current_ap_users_idx] = (desired_power /
(undesired_power + noise_var))
# The capacity (actually, the spectral efficiency since we didn't
# multiply by the bandwidth) is calculated from the SINR. However,
# if there is more then one user associated with the current AP we
# assume bandwidth will be equally divided among all of them.
capacity[current_ap_users_idx] = (
np.log2(1 + sinr_array[current_ap_users_idx]) /
len(current_ap_users_idx))
return linear2dB(sinr_array), capacity
def perform_simulation(
scenario_params, # pylint: disable=R0914
power_params,
plot_results_bool=True):
"""
Run the simulation.
Parameters
----------
scenario_params : dict
Dictionary with simulation parameters.
The keys are: 'side_length', 'single_wall_loss_dB',
'num_rooms_per_side' and 'ap_decimation'.
power_params : dict
Dictionary with the power related parameters.
The keys are 'Pt_dBm' and 'noise_power_dBm'.
plot_results_bool : bool
True if results should be plotted after the simulation finishes.
Returns
-------
(np.ndarray, np.ndarray)
Tuple with (sinr_array_pl_metis_ps7_dB, capacity_metis_ps7)
"""
# xxxxxxxxxx Simulation Scenario Configuration xxxxxxxxxxxxxxxxxxxxxxxx
# The size of the side of each square room
side_length = scenario_params['side_length']
# How much (in dB) is lost for each wall teh signal has to pass
single_wall_loss_dB = scenario_params['single_wall_loss_dB']
# Square of 12 x 12 square rooms
num_rooms_per_side = scenario_params['num_rooms_per_side']
# Total number of rooms in the grid
num_rooms = num_rooms_per_side**2
# 1 means 1 ap every room. 2 means 1 ap every 2 rooms and so on. Valid
# values are: 1, 2, 4 and 9.
ap_decimation = scenario_params['ap_decimation']
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# xxxxxxxxxx Simulation Power Configuration xxxxxxxxxxxxxxxxxxxxxxxxxxx
# Transmit power of each access point
Pt_dBm = power_params['Pt_dBm']
noise_power_dBm = power_params['noise_power_dBm']
Pt = dBm2Linear(Pt_dBm) # 20 dBm transmit power
noise_var = dBm2Linear(noise_power_dBm)
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# xxxxxxxxxx Calculate the positions of all rooms xxxxxxxxxxxxxxxxxxxxx
room_positions = calc_room_positions_square(side_length, num_rooms)
room_positions.shape = (num_rooms_per_side, num_rooms_per_side)
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# xxxxxxxxxx Create the path loss object xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
pl_metis_ps7_obj = pathloss.PathLossMetisPS7()
pl_metis_ps7_obj.handle_small_distances_bool = True
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# xxxxxxxxxx Add users in random positions in the 2D grid xxxxxxxxxxxxx
num_users = 100 # We will create this many users in the 2D grid
users_positions = (num_rooms_per_side * side_length *
(np.random.random_sample(num_users) +
1j * np.random.random_sample(num_users) - 0.5 - 0.5j))
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# xxxxxxxxxx AP Allocation xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# 1 AP in each room
ap_positions = get_ap_positions(room_positions, ap_decimation)
num_aps = ap_positions.size
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# xxxxxxxxxx Calculate distances: each user to each AP xxxxxxxxxxxxxxxx
# Dimension: (num_users, num_APs)
dists_m = np.abs(users_positions[:, np.newaxis] -
ap_positions[np.newaxis, :])
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# xxxxxxxxxx Calculate AP association xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# INPUTS
# Find in which room each user is
users_rooms = np.argmin(np.abs(
room_positions.reshape([-1, 1]) - users_positions[np.newaxis, :]),
axis=0)
# Number of walls from each room to each other room
num_walls_all_rooms = calc_num_walls(side_length, room_positions,
ap_positions)
# Number of walls from each room that has at least one user to each
# room with an AP
num_walls_rooms_with_users = num_walls_all_rooms[users_rooms]
# Path loss from each user to each AP (no matter if it will be a
# transmitting AP or not, since we still have to perform the AP
# association)
pl_all = pl_metis_ps7_obj.calc_path_loss(
dists_m, num_walls=num_walls_rooms_with_users)
# Calculate wall losses from each user to each AP (no matter if it will
# be a transmitting AP or not, since we still have to perform the AP
# association)
wall_losses_dB_all = num_walls_rooms_with_users * single_wall_loss_dB
# Calculate path loss plus wall losses (we multiply the linear values)
# from each user to each AP (no matter if it will be a transmitting AP
# or not, since we still have to perform the AP association)
pl_all_plus_wl = pl_all * dB2Linear(-wall_losses_dB_all)
# OUTPUTS
# Determine with which AP each user is associated with.
# Each user will associate with the CLOSEST access point.
ap_assoc = find_ap_assoc_best_channel(pl_all_plus_wl)
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# xxxxxxxxxx Find which Access Points should stay on xxxxxxxxxxxxxxxxxx
# Indexes of the active APs
transmitting_aps, users_count = np.unique(ap_assoc, return_counts=True)
# Asserts to tell pycharm that these are numpy arrays
assert isinstance(transmitting_aps, np.ndarray)
assert isinstance(users_count, np.ndarray)
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# xxxxxxxxxx Calculate the SINRs for each path loss model xxxxxxxxxxxxx
# Take the path loss plus wall losses only for the transmitting aps
pl_all_plus_wall_losses_tx_aps = pl_all_plus_wl.take(transmitting_aps,
axis=1)
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# xxxxxxxxxx Calculate the SINRs and capacity xxxxxxxxxxxxxxxxxxxxxxxxx
sinr_array_pl_metis_ps7_dB, capacity_metis_ps7 \
= simulate_for_a_given_ap_assoc(
pl_all_plus_wall_losses_tx_aps, ap_assoc,
transmitting_aps, Pt, noise_var)
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# xxxxxxxxxx Plot the results xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
if plot_results_bool is True:
print(("\nMin/Mean/Max SINR value (METIS PS7):"
"\n {0}\n {1}\n {2}").format(
sinr_array_pl_metis_ps7_dB.min(),
sinr_array_pl_metis_ps7_dB.mean(),
sinr_array_pl_metis_ps7_dB.max()))
print(("\nMin/Mean/Max Capacity value (METIS PS7):"
"\n {0}\n {1}\n {2}").format(capacity_metis_ps7.min(),
capacity_metis_ps7.mean(),
capacity_metis_ps7.max()))
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# xxxxxxxxxxxxxxx Plot the results xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# Create a mask for the active APs
transmitting_aps_mask = np.zeros(num_aps, dtype=bool)
transmitting_aps_mask[transmitting_aps] = True
# Save how many users are associated with each AP
users_per_ap = np.zeros(num_aps, dtype=int)
users_per_ap[transmitting_aps_mask] = users_count
# xxxxxxxxxx Plot all rooms and users xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
all_rooms = [
shapes.Rectangle(pos - side_length / 2. - side_length * 1j / 2.,
pos + side_length / 2. + side_length * 1j / 2.)
for pos in room_positions.flatten()
]
# Plot all Rooms and save the axis where they were plotted
plt.figure(figsize=(10, 10))
gs = gridspec.GridSpec(2, 1, height_ratios=[4, 1])
# ax1 is where we will plot everything
ax1 = plt.subplot(gs[0])
ax1.set_xlabel("Position X coordinate")
ax1.set_ylabel("Position Y coordinate")
ax1.set_title("Plot of all Rooms")
ax1.set_ylim([-60, 60])
ax1.set_xlim([-60, 60])
ax1 = plot_all_rooms(all_rooms, ax1)
#ax1.hold(True)
# ax2 will be used for annotations
ax2 = plt.subplot(gs[1])
plt.setp(ax2.get_xticklabels(), visible=False)
plt.setp(ax2.get_yticklabels(), visible=False)
ax2.set_ylim([0, 10])
ax2.set_xlim([0, 10])
details = ax2.text(5,
5,
'Details',
verticalalignment='center',
horizontalalignment='center',
family='monospace')
# Set the an array with colors for the access points. Transmitting APs
# will be blue, while inactive APs will be gray
ap_colors = np.empty(ap_positions.shape, dtype='U4')
ap_colors[transmitting_aps_mask] = 'b'
ap_colors[np.logical_not(transmitting_aps_mask)] = 'gray'
# Plot the access points. We set linewidth to 0.0 so that there is no
# border. We set the size ('s' keyword) to 50 to make it larger. The
# colors are set according to the ap_colors array.
# Note that we set a 5 points tolerance for the pick event.
aps_plt = ax1.scatter(ap_positions.real,
ap_positions.imag,
marker='^',
c=ap_colors,
linewidths=0.1,
s=50,
picker=3)
# Plot the users
# Note that we set a 5 points tolerance for the pick event.
users_plt = ax1.scatter(users_positions.real,
users_positions.imag,
marker='*',
c='r',
linewidth=0.1,
s=50,
picker=3)
# xxxxxxxxxx Define a function to call for the pick_event Circle used
# to select an AP. We will set its visibility to False here. When an AP
# is selected, we move this circle to its position and set its
# visibility to True.
selected_ap_circle = ax1.plot([0], [0],
'o',
ms=12,
alpha=0.4,
color='yellow',
visible=False)[0]
# Define the callback function for the pick event
def on_pick(event):
"""Callback for the pick event in the matplotlib plot.
Parameters
----------
event : Matplotlib event
"""
# We will reset users colors on each pick
users_colors = np.empty(ap_assoc.size, dtype='U1')
users_colors[:] = 'r'
# Index of the point clicked
ind = event.ind[0]
if event.artist == aps_plt:
# Disable the circle in the AP
selected_ap_circle.set_visible(False)
if ind not in ap_assoc:
# Text information for the disabled AP
text = "AP {0} (Disabled)".format(ind)
else:
# Text information for the selected AP
text = "AP {0} with {1} user(s)\nTotal throughput: {2:7.4f}"
text = text.format(
ind, users_per_ap[ind],
np.sum(capacity_metis_ps7[ap_assoc == ind]))
# Change the colors of the users associated with the
# current AP to green
users_colors[ap_assoc == ind] = 'g'
elif event.artist == users_plt:
# Text information for the selected user
text = "User {0}\n SINR: {1:7.4f}\nCapacity: {2:7.4f}".format(
ind, sinr_array_pl_metis_ps7_dB[ind],
capacity_metis_ps7[ind])
# If there other users are associated with the same AP of the
# current user
if users_per_ap[ap_assoc[ind]] > 1:
text = "{0}\nShares AP with {1} other user(s)".format(
text, users_per_ap[ap_assoc[ind]] - 1)
users_AP = ap_assoc[ind]
# Plot a yellow circle in the user's AP
ap_pos = ap_positions[users_AP]
# Change the color of other users in the same AP to green and
# the current user to cyan
users_colors[ap_assoc == users_AP] = 'g'
users_colors[ind] = 'c'
selected_ap_circle.set_visible(True)
selected_ap_circle.set_data([ap_pos.real], [ap_pos.imag])
# Set users colors
users_plt.set_color(users_colors)
# Set the details text
# noinspection PyUnboundLocalVariable
details.set_text(text)
ax1.figure.canvas.draw()
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# Connect the on_pick function with the pick event
ax1.figure.canvas.mpl_connect('pick_event', on_pick)
plt.show()
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# xxxxxxxxxx Return the results xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
return sinr_array_pl_metis_ps7_dB, capacity_metis_ps7
if __name__ == '__main__':
scenario_params = {
'side_length': 10., # 10 meters side length
'single_wall_loss_dB': 5.,
'num_rooms_per_side': 12,
'ap_decimation': 2
}
power_params = {
'Pt_dBm': 20., # 20 dBm transmit power
# Noise power for 25°C for a bandwidth of 5 MHz -> -106.87 dBm
'noise_power_dBm': calc_thermal_noise_power_dBm(25, 5e6)
}
out = perform_simulation(scenario_params,
power_params,
plot_results_bool=True)
sinr_array_pl_metis_ps7_dB, capacity_metis_ps7 = out
| gpl-2.0 |
ryfeus/lambda-packs | Tensorflow_Pandas_Numpy/source3.6/pandas/core/reshape/merge.py | 1 | 61841 | """
SQL-style merge routines
"""
import copy
import warnings
import string
import numpy as np
from pandas.compat import range, lzip, zip, map, filter
import pandas.compat as compat
from pandas import (Categorical, DataFrame,
Index, MultiIndex, Timedelta)
from pandas.core.arrays.categorical import _recode_for_categories
from pandas.core.frame import _merge_doc
from pandas.core.dtypes.common import (
is_datetime64tz_dtype,
is_datetime64_dtype,
needs_i8_conversion,
is_int64_dtype,
is_array_like,
is_categorical_dtype,
is_integer_dtype,
is_float_dtype,
is_numeric_dtype,
is_integer,
is_int_or_datetime_dtype,
is_dtype_equal,
is_bool,
is_bool_dtype,
is_list_like,
is_datetimelike,
_ensure_int64,
_ensure_float64,
_ensure_object,
_get_dtype)
from pandas.core.dtypes.missing import na_value_for_dtype
from pandas.core.internals import (items_overlap_with_suffix,
concatenate_block_managers)
from pandas.util._decorators import Appender, Substitution
from pandas.core.sorting import is_int64_overflow_possible
import pandas.core.algorithms as algos
import pandas.core.sorting as sorting
import pandas.core.common as com
from pandas._libs import hashtable as libhashtable, join as libjoin, lib
from pandas.errors import MergeError
@Substitution('\nleft : DataFrame')
@Appender(_merge_doc, indents=0)
def merge(left, right, how='inner', on=None, left_on=None, right_on=None,
left_index=False, right_index=False, sort=False,
suffixes=('_x', '_y'), copy=True, indicator=False,
validate=None):
op = _MergeOperation(left, right, how=how, on=on, left_on=left_on,
right_on=right_on, left_index=left_index,
right_index=right_index, sort=sort, suffixes=suffixes,
copy=copy, indicator=indicator,
validate=validate)
return op.get_result()
if __debug__:
merge.__doc__ = _merge_doc % '\nleft : DataFrame'
def _groupby_and_merge(by, on, left, right, _merge_pieces,
check_duplicates=True):
"""
groupby & merge; we are always performing a left-by type operation
Parameters
----------
by: field to group
on: duplicates field
left: left frame
right: right frame
_merge_pieces: function for merging
check_duplicates: boolean, default True
should we check & clean duplicates
"""
pieces = []
if not isinstance(by, (list, tuple)):
by = [by]
lby = left.groupby(by, sort=False)
# if we can groupby the rhs
# then we can get vastly better perf
try:
# we will check & remove duplicates if indicated
if check_duplicates:
if on is None:
on = []
elif not isinstance(on, (list, tuple)):
on = [on]
if right.duplicated(by + on).any():
right = right.drop_duplicates(by + on, keep='last')
rby = right.groupby(by, sort=False)
except KeyError:
rby = None
for key, lhs in lby:
if rby is None:
rhs = right
else:
try:
rhs = right.take(rby.indices[key])
except KeyError:
# key doesn't exist in left
lcols = lhs.columns.tolist()
cols = lcols + [r for r in right.columns
if r not in set(lcols)]
merged = lhs.reindex(columns=cols)
merged.index = range(len(merged))
pieces.append(merged)
continue
merged = _merge_pieces(lhs, rhs)
# make sure join keys are in the merged
# TODO, should _merge_pieces do this?
for k in by:
try:
if k in merged:
merged[k] = key
except KeyError:
pass
pieces.append(merged)
# preserve the original order
# if we have a missing piece this can be reset
from pandas.core.reshape.concat import concat
result = concat(pieces, ignore_index=True)
result = result.reindex(columns=pieces[0].columns, copy=False)
return result, lby
def merge_ordered(left, right, on=None,
left_on=None, right_on=None,
left_by=None, right_by=None,
fill_method=None, suffixes=('_x', '_y'),
how='outer'):
"""Perform merge with optional filling/interpolation designed for ordered
data like time series data. Optionally perform group-wise merge (see
examples)
Parameters
----------
left : DataFrame
right : DataFrame
on : label or list
Field names to join on. Must be found in both DataFrames.
left_on : label or list, or array-like
Field names to join on in left DataFrame. Can be a vector or list of
vectors of the length of the DataFrame to use a particular vector as
the join key instead of columns
right_on : label or list, or array-like
Field names to join on in right DataFrame or vector/list of vectors per
left_on docs
left_by : column name or list of column names
Group left DataFrame by group columns and merge piece by piece with
right DataFrame
right_by : column name or list of column names
Group right DataFrame by group columns and merge piece by piece with
left DataFrame
fill_method : {'ffill', None}, default None
Interpolation method for data
suffixes : 2-length sequence (tuple, list, ...)
Suffix to apply to overlapping column names in the left and right
side, respectively
how : {'left', 'right', 'outer', 'inner'}, default 'outer'
* left: use only keys from left frame (SQL: left outer join)
* right: use only keys from right frame (SQL: right outer join)
* outer: use union of keys from both frames (SQL: full outer join)
* inner: use intersection of keys from both frames (SQL: inner join)
.. versionadded:: 0.19.0
Examples
--------
>>> A >>> B
key lvalue group key rvalue
0 a 1 a 0 b 1
1 c 2 a 1 c 2
2 e 3 a 2 d 3
3 a 1 b
4 c 2 b
5 e 3 b
>>> merge_ordered(A, B, fill_method='ffill', left_by='group')
group key lvalue rvalue
0 a a 1 NaN
1 a b 1 1.0
2 a c 2 2.0
3 a d 2 3.0
4 a e 3 3.0
5 b a 1 NaN
6 b b 1 1.0
7 b c 2 2.0
8 b d 2 3.0
9 b e 3 3.0
Returns
-------
merged : DataFrame
The output type will the be same as 'left', if it is a subclass
of DataFrame.
See also
--------
merge
merge_asof
"""
def _merger(x, y):
# perform the ordered merge operation
op = _OrderedMerge(x, y, on=on, left_on=left_on, right_on=right_on,
suffixes=suffixes, fill_method=fill_method,
how=how)
return op.get_result()
if left_by is not None and right_by is not None:
raise ValueError('Can only group either left or right frames')
elif left_by is not None:
result, _ = _groupby_and_merge(left_by, on, left, right,
lambda x, y: _merger(x, y),
check_duplicates=False)
elif right_by is not None:
result, _ = _groupby_and_merge(right_by, on, right, left,
lambda x, y: _merger(y, x),
check_duplicates=False)
else:
result = _merger(left, right)
return result
def merge_asof(left, right, on=None,
left_on=None, right_on=None,
left_index=False, right_index=False,
by=None, left_by=None, right_by=None,
suffixes=('_x', '_y'),
tolerance=None,
allow_exact_matches=True,
direction='backward'):
"""Perform an asof merge. This is similar to a left-join except that we
match on nearest key rather than equal keys.
Both DataFrames must be sorted by the key.
For each row in the left DataFrame:
- A "backward" search selects the last row in the right DataFrame whose
'on' key is less than or equal to the left's key.
- A "forward" search selects the first row in the right DataFrame whose
'on' key is greater than or equal to the left's key.
- A "nearest" search selects the row in the right DataFrame whose 'on'
key is closest in absolute distance to the left's key.
The default is "backward" and is compatible in versions below 0.20.0.
The direction parameter was added in version 0.20.0 and introduces
"forward" and "nearest".
Optionally match on equivalent keys with 'by' before searching with 'on'.
.. versionadded:: 0.19.0
Parameters
----------
left : DataFrame
right : DataFrame
on : label
Field name to join on. Must be found in both DataFrames.
The data MUST be ordered. Furthermore this must be a numeric column,
such as datetimelike, integer, or float. On or left_on/right_on
must be given.
left_on : label
Field name to join on in left DataFrame.
right_on : label
Field name to join on in right DataFrame.
left_index : boolean
Use the index of the left DataFrame as the join key.
.. versionadded:: 0.19.2
right_index : boolean
Use the index of the right DataFrame as the join key.
.. versionadded:: 0.19.2
by : column name or list of column names
Match on these columns before performing merge operation.
left_by : column name
Field names to match on in the left DataFrame.
.. versionadded:: 0.19.2
right_by : column name
Field names to match on in the right DataFrame.
.. versionadded:: 0.19.2
suffixes : 2-length sequence (tuple, list, ...)
Suffix to apply to overlapping column names in the left and right
side, respectively.
tolerance : integer or Timedelta, optional, default None
Select asof tolerance within this range; must be compatible
with the merge index.
allow_exact_matches : boolean, default True
- If True, allow matching with the same 'on' value
(i.e. less-than-or-equal-to / greater-than-or-equal-to)
- If False, don't match the same 'on' value
(i.e., stricly less-than / strictly greater-than)
direction : 'backward' (default), 'forward', or 'nearest'
Whether to search for prior, subsequent, or closest matches.
.. versionadded:: 0.20.0
Returns
-------
merged : DataFrame
Examples
--------
>>> left = pd.DataFrame({'a': [1, 5, 10], 'left_val': ['a', 'b', 'c']})
>>> left
a left_val
0 1 a
1 5 b
2 10 c
>>> right = pd.DataFrame({'a': [1, 2, 3, 6, 7],
... 'right_val': [1, 2, 3, 6, 7]})
>>> right
a right_val
0 1 1
1 2 2
2 3 3
3 6 6
4 7 7
>>> pd.merge_asof(left, right, on='a')
a left_val right_val
0 1 a 1
1 5 b 3
2 10 c 7
>>> pd.merge_asof(left, right, on='a', allow_exact_matches=False)
a left_val right_val
0 1 a NaN
1 5 b 3.0
2 10 c 7.0
>>> pd.merge_asof(left, right, on='a', direction='forward')
a left_val right_val
0 1 a 1.0
1 5 b 6.0
2 10 c NaN
>>> pd.merge_asof(left, right, on='a', direction='nearest')
a left_val right_val
0 1 a 1
1 5 b 6
2 10 c 7
We can use indexed DataFrames as well.
>>> left = pd.DataFrame({'left_val': ['a', 'b', 'c']}, index=[1, 5, 10])
>>> left
left_val
1 a
5 b
10 c
>>> right = pd.DataFrame({'right_val': [1, 2, 3, 6, 7]},
... index=[1, 2, 3, 6, 7])
>>> right
right_val
1 1
2 2
3 3
6 6
7 7
>>> pd.merge_asof(left, right, left_index=True, right_index=True)
left_val right_val
1 a 1
5 b 3
10 c 7
Here is a real-world times-series example
>>> quotes
time ticker bid ask
0 2016-05-25 13:30:00.023 GOOG 720.50 720.93
1 2016-05-25 13:30:00.023 MSFT 51.95 51.96
2 2016-05-25 13:30:00.030 MSFT 51.97 51.98
3 2016-05-25 13:30:00.041 MSFT 51.99 52.00
4 2016-05-25 13:30:00.048 GOOG 720.50 720.93
5 2016-05-25 13:30:00.049 AAPL 97.99 98.01
6 2016-05-25 13:30:00.072 GOOG 720.50 720.88
7 2016-05-25 13:30:00.075 MSFT 52.01 52.03
>>> trades
time ticker price quantity
0 2016-05-25 13:30:00.023 MSFT 51.95 75
1 2016-05-25 13:30:00.038 MSFT 51.95 155
2 2016-05-25 13:30:00.048 GOOG 720.77 100
3 2016-05-25 13:30:00.048 GOOG 720.92 100
4 2016-05-25 13:30:00.048 AAPL 98.00 100
By default we are taking the asof of the quotes
>>> pd.merge_asof(trades, quotes,
... on='time',
... by='ticker')
time ticker price quantity bid ask
0 2016-05-25 13:30:00.023 MSFT 51.95 75 51.95 51.96
1 2016-05-25 13:30:00.038 MSFT 51.95 155 51.97 51.98
2 2016-05-25 13:30:00.048 GOOG 720.77 100 720.50 720.93
3 2016-05-25 13:30:00.048 GOOG 720.92 100 720.50 720.93
4 2016-05-25 13:30:00.048 AAPL 98.00 100 NaN NaN
We only asof within 2ms between the quote time and the trade time
>>> pd.merge_asof(trades, quotes,
... on='time',
... by='ticker',
... tolerance=pd.Timedelta('2ms'))
time ticker price quantity bid ask
0 2016-05-25 13:30:00.023 MSFT 51.95 75 51.95 51.96
1 2016-05-25 13:30:00.038 MSFT 51.95 155 NaN NaN
2 2016-05-25 13:30:00.048 GOOG 720.77 100 720.50 720.93
3 2016-05-25 13:30:00.048 GOOG 720.92 100 720.50 720.93
4 2016-05-25 13:30:00.048 AAPL 98.00 100 NaN NaN
We only asof within 10ms between the quote time and the trade time
and we exclude exact matches on time. However *prior* data will
propagate forward
>>> pd.merge_asof(trades, quotes,
... on='time',
... by='ticker',
... tolerance=pd.Timedelta('10ms'),
... allow_exact_matches=False)
time ticker price quantity bid ask
0 2016-05-25 13:30:00.023 MSFT 51.95 75 NaN NaN
1 2016-05-25 13:30:00.038 MSFT 51.95 155 51.97 51.98
2 2016-05-25 13:30:00.048 GOOG 720.77 100 NaN NaN
3 2016-05-25 13:30:00.048 GOOG 720.92 100 NaN NaN
4 2016-05-25 13:30:00.048 AAPL 98.00 100 NaN NaN
See also
--------
merge
merge_ordered
"""
op = _AsOfMerge(left, right,
on=on, left_on=left_on, right_on=right_on,
left_index=left_index, right_index=right_index,
by=by, left_by=left_by, right_by=right_by,
suffixes=suffixes,
how='asof', tolerance=tolerance,
allow_exact_matches=allow_exact_matches,
direction=direction)
return op.get_result()
# TODO: transformations??
# TODO: only copy DataFrames when modification necessary
class _MergeOperation(object):
"""
Perform a database (SQL) merge operation between two DataFrame objects
using either columns as keys or their row indexes
"""
_merge_type = 'merge'
def __init__(self, left, right, how='inner', on=None,
left_on=None, right_on=None, axis=1,
left_index=False, right_index=False, sort=True,
suffixes=('_x', '_y'), copy=True, indicator=False,
validate=None):
self.left = self.orig_left = left
self.right = self.orig_right = right
self.how = how
self.axis = axis
self.on = com._maybe_make_list(on)
self.left_on = com._maybe_make_list(left_on)
self.right_on = com._maybe_make_list(right_on)
self.copy = copy
self.suffixes = suffixes
self.sort = sort
self.left_index = left_index
self.right_index = right_index
self.indicator = indicator
if isinstance(self.indicator, compat.string_types):
self.indicator_name = self.indicator
elif isinstance(self.indicator, bool):
self.indicator_name = '_merge' if self.indicator else None
else:
raise ValueError(
'indicator option can only accept boolean or string arguments')
if not isinstance(left, DataFrame):
raise ValueError('can not merge DataFrame with instance of '
'type {left}'.format(left=type(left)))
if not isinstance(right, DataFrame):
raise ValueError('can not merge DataFrame with instance of '
'type {right}'.format(right=type(right)))
if not is_bool(left_index):
raise ValueError(
'left_index parameter must be of type bool, not '
'{left_index}'.format(left_index=type(left_index)))
if not is_bool(right_index):
raise ValueError(
'right_index parameter must be of type bool, not '
'{right_index}'.format(right_index=type(right_index)))
# warn user when merging between different levels
if left.columns.nlevels != right.columns.nlevels:
msg = ('merging between different levels can give an unintended '
'result ({left} levels on the left, {right} on the right)'
).format(left=left.columns.nlevels,
right=right.columns.nlevels)
warnings.warn(msg, UserWarning)
self._validate_specification()
# note this function has side effects
(self.left_join_keys,
self.right_join_keys,
self.join_names) = self._get_merge_keys()
# validate the merge keys dtypes. We may need to coerce
# to avoid incompat dtypes
self._maybe_coerce_merge_keys()
# If argument passed to validate,
# check if columns specified as unique
# are in fact unique.
if validate is not None:
self._validate(validate)
def get_result(self):
if self.indicator:
self.left, self.right = self._indicator_pre_merge(
self.left, self.right)
join_index, left_indexer, right_indexer = self._get_join_info()
ldata, rdata = self.left._data, self.right._data
lsuf, rsuf = self.suffixes
llabels, rlabels = items_overlap_with_suffix(ldata.items, lsuf,
rdata.items, rsuf)
lindexers = {1: left_indexer} if left_indexer is not None else {}
rindexers = {1: right_indexer} if right_indexer is not None else {}
result_data = concatenate_block_managers(
[(ldata, lindexers), (rdata, rindexers)],
axes=[llabels.append(rlabels), join_index],
concat_axis=0, copy=self.copy)
typ = self.left._constructor
result = typ(result_data).__finalize__(self, method=self._merge_type)
if self.indicator:
result = self._indicator_post_merge(result)
self._maybe_add_join_keys(result, left_indexer, right_indexer)
self._maybe_restore_index_levels(result)
return result
def _indicator_pre_merge(self, left, right):
columns = left.columns.union(right.columns)
for i in ['_left_indicator', '_right_indicator']:
if i in columns:
raise ValueError("Cannot use `indicator=True` option when "
"data contains a column named {name}"
.format(name=i))
if self.indicator_name in columns:
raise ValueError(
"Cannot use name of an existing column for indicator column")
left = left.copy()
right = right.copy()
left['_left_indicator'] = 1
left['_left_indicator'] = left['_left_indicator'].astype('int8')
right['_right_indicator'] = 2
right['_right_indicator'] = right['_right_indicator'].astype('int8')
return left, right
def _indicator_post_merge(self, result):
result['_left_indicator'] = result['_left_indicator'].fillna(0)
result['_right_indicator'] = result['_right_indicator'].fillna(0)
result[self.indicator_name] = Categorical((result['_left_indicator'] +
result['_right_indicator']),
categories=[1, 2, 3])
result[self.indicator_name] = (
result[self.indicator_name]
.cat.rename_categories(['left_only', 'right_only', 'both']))
result = result.drop(labels=['_left_indicator', '_right_indicator'],
axis=1)
return result
def _maybe_restore_index_levels(self, result):
"""
Restore index levels specified as `on` parameters
Here we check for cases where `self.left_on` and `self.right_on` pairs
each reference an index level in their respective DataFrames. The
joined columns corresponding to these pairs are then restored to the
index of `result`.
**Note:** This method has side effects. It modifies `result` in-place
Parameters
----------
result: DataFrame
merge result
Returns
-------
None
"""
names_to_restore = []
for name, left_key, right_key in zip(self.join_names,
self.left_on,
self.right_on):
if (self.orig_left._is_level_reference(left_key) and
self.orig_right._is_level_reference(right_key) and
name not in result.index.names):
names_to_restore.append(name)
if names_to_restore:
result.set_index(names_to_restore, inplace=True)
def _maybe_add_join_keys(self, result, left_indexer, right_indexer):
left_has_missing = None
right_has_missing = None
keys = zip(self.join_names, self.left_on, self.right_on)
for i, (name, lname, rname) in enumerate(keys):
if not _should_fill(lname, rname):
continue
take_left, take_right = None, None
if name in result:
if left_indexer is not None and right_indexer is not None:
if name in self.left:
if left_has_missing is None:
left_has_missing = (left_indexer == -1).any()
if left_has_missing:
take_right = self.right_join_keys[i]
if not is_dtype_equal(result[name].dtype,
self.left[name].dtype):
take_left = self.left[name]._values
elif name in self.right:
if right_has_missing is None:
right_has_missing = (right_indexer == -1).any()
if right_has_missing:
take_left = self.left_join_keys[i]
if not is_dtype_equal(result[name].dtype,
self.right[name].dtype):
take_right = self.right[name]._values
elif left_indexer is not None \
and is_array_like(self.left_join_keys[i]):
take_left = self.left_join_keys[i]
take_right = self.right_join_keys[i]
if take_left is not None or take_right is not None:
if take_left is None:
lvals = result[name]._values
else:
lfill = na_value_for_dtype(take_left.dtype)
lvals = algos.take_1d(take_left, left_indexer,
fill_value=lfill)
if take_right is None:
rvals = result[name]._values
else:
rfill = na_value_for_dtype(take_right.dtype)
rvals = algos.take_1d(take_right, right_indexer,
fill_value=rfill)
# if we have an all missing left_indexer
# make sure to just use the right values
mask = left_indexer == -1
if mask.all():
key_col = rvals
else:
key_col = Index(lvals).where(~mask, rvals)
if result._is_label_reference(name):
result[name] = key_col
elif result._is_level_reference(name):
if isinstance(result.index, MultiIndex):
idx_list = [result.index.get_level_values(level_name)
if level_name != name else key_col
for level_name in result.index.names]
result.set_index(idx_list, inplace=True)
else:
result.index = Index(key_col, name=name)
else:
result.insert(i, name or 'key_{i}'.format(i=i), key_col)
def _get_join_indexers(self):
""" return the join indexers """
return _get_join_indexers(self.left_join_keys,
self.right_join_keys,
sort=self.sort,
how=self.how)
def _get_join_info(self):
left_ax = self.left._data.axes[self.axis]
right_ax = self.right._data.axes[self.axis]
if self.left_index and self.right_index and self.how != 'asof':
join_index, left_indexer, right_indexer = \
left_ax.join(right_ax, how=self.how, return_indexers=True,
sort=self.sort)
elif self.right_index and self.how == 'left':
join_index, left_indexer, right_indexer = \
_left_join_on_index(left_ax, right_ax, self.left_join_keys,
sort=self.sort)
elif self.left_index and self.how == 'right':
join_index, right_indexer, left_indexer = \
_left_join_on_index(right_ax, left_ax, self.right_join_keys,
sort=self.sort)
else:
(left_indexer,
right_indexer) = self._get_join_indexers()
if self.right_index:
if len(self.left) > 0:
join_index = self.left.index.take(left_indexer)
else:
join_index = self.right.index.take(right_indexer)
left_indexer = np.array([-1] * len(join_index))
elif self.left_index:
if len(self.right) > 0:
join_index = self.right.index.take(right_indexer)
else:
join_index = self.left.index.take(left_indexer)
right_indexer = np.array([-1] * len(join_index))
else:
join_index = Index(np.arange(len(left_indexer)))
if len(join_index) == 0:
join_index = join_index.astype(object)
return join_index, left_indexer, right_indexer
def _get_merge_keys(self):
"""
Note: has side effects (copy/delete key columns)
Parameters
----------
left
right
on
Returns
-------
left_keys, right_keys
"""
left_keys = []
right_keys = []
join_names = []
right_drop = []
left_drop = []
left, right = self.left, self.right
stacklevel = 5 # Number of stack levels from df.merge
is_lkey = lambda x: is_array_like(x) and len(x) == len(left)
is_rkey = lambda x: is_array_like(x) and len(x) == len(right)
# Note that pd.merge_asof() has separate 'on' and 'by' parameters. A
# user could, for example, request 'left_index' and 'left_by'. In a
# regular pd.merge(), users cannot specify both 'left_index' and
# 'left_on'. (Instead, users have a MultiIndex). That means the
# self.left_on in this function is always empty in a pd.merge(), but
# a pd.merge_asof(left_index=True, left_by=...) will result in a
# self.left_on array with a None in the middle of it. This requires
# a work-around as designated in the code below.
# See _validate_specification() for where this happens.
# ugh, spaghetti re #733
if _any(self.left_on) and _any(self.right_on):
for lk, rk in zip(self.left_on, self.right_on):
if is_lkey(lk):
left_keys.append(lk)
if is_rkey(rk):
right_keys.append(rk)
join_names.append(None) # what to do?
else:
if rk is not None:
right_keys.append(
right._get_label_or_level_values(
rk, stacklevel=stacklevel))
join_names.append(rk)
else:
# work-around for merge_asof(right_index=True)
right_keys.append(right.index)
join_names.append(right.index.name)
else:
if not is_rkey(rk):
if rk is not None:
right_keys.append(
right._get_label_or_level_values(
rk, stacklevel=stacklevel))
else:
# work-around for merge_asof(right_index=True)
right_keys.append(right.index)
if lk is not None and lk == rk:
# avoid key upcast in corner case (length-0)
if len(left) > 0:
right_drop.append(rk)
else:
left_drop.append(lk)
else:
right_keys.append(rk)
if lk is not None:
left_keys.append(left._get_label_or_level_values(
lk, stacklevel=stacklevel))
join_names.append(lk)
else:
# work-around for merge_asof(left_index=True)
left_keys.append(left.index)
join_names.append(left.index.name)
elif _any(self.left_on):
for k in self.left_on:
if is_lkey(k):
left_keys.append(k)
join_names.append(None)
else:
left_keys.append(left._get_label_or_level_values(
k, stacklevel=stacklevel))
join_names.append(k)
if isinstance(self.right.index, MultiIndex):
right_keys = [lev._values.take(lab)
for lev, lab in zip(self.right.index.levels,
self.right.index.labels)]
else:
right_keys = [self.right.index.values]
elif _any(self.right_on):
for k in self.right_on:
if is_rkey(k):
right_keys.append(k)
join_names.append(None)
else:
right_keys.append(right._get_label_or_level_values(
k, stacklevel=stacklevel))
join_names.append(k)
if isinstance(self.left.index, MultiIndex):
left_keys = [lev._values.take(lab)
for lev, lab in zip(self.left.index.levels,
self.left.index.labels)]
else:
left_keys = [self.left.index.values]
if left_drop:
self.left = self.left._drop_labels_or_levels(left_drop)
if right_drop:
self.right = self.right._drop_labels_or_levels(right_drop)
return left_keys, right_keys, join_names
def _maybe_coerce_merge_keys(self):
# we have valid mergees but we may have to further
# coerce these if they are originally incompatible types
#
# for example if these are categorical, but are not dtype_equal
# or if we have object and integer dtypes
for lk, rk, name in zip(self.left_join_keys,
self.right_join_keys,
self.join_names):
if (len(lk) and not len(rk)) or (not len(lk) and len(rk)):
continue
lk_is_cat = is_categorical_dtype(lk)
rk_is_cat = is_categorical_dtype(rk)
# if either left or right is a categorical
# then the must match exactly in categories & ordered
if lk_is_cat and rk_is_cat:
if lk.is_dtype_equal(rk):
continue
elif lk_is_cat or rk_is_cat:
pass
elif is_dtype_equal(lk.dtype, rk.dtype):
continue
msg = ("You are trying to merge on {lk_dtype} and "
"{rk_dtype} columns. If you wish to proceed "
"you should use pd.concat".format(lk_dtype=lk.dtype,
rk_dtype=rk.dtype))
# if we are numeric, then allow differing
# kinds to proceed, eg. int64 and int8, int and float
# further if we are object, but we infer to
# the same, then proceed
if is_numeric_dtype(lk) and is_numeric_dtype(rk):
if lk.dtype.kind == rk.dtype.kind:
pass
# check whether ints and floats
elif is_integer_dtype(rk) and is_float_dtype(lk):
if not (lk == lk.astype(rk.dtype))[~np.isnan(lk)].all():
warnings.warn('You are merging on int and float '
'columns where the float values '
'are not equal to their int '
'representation', UserWarning)
elif is_float_dtype(rk) and is_integer_dtype(lk):
if not (rk == rk.astype(lk.dtype))[~np.isnan(rk)].all():
warnings.warn('You are merging on int and float '
'columns where the float values '
'are not equal to their int '
'representation', UserWarning)
# let's infer and see if we are ok
elif lib.infer_dtype(lk) == lib.infer_dtype(rk):
pass
# Check if we are trying to merge on obviously
# incompatible dtypes GH 9780, GH 15800
# boolean values are considered as numeric, but are still allowed
# to be merged on object boolean values
elif ((is_numeric_dtype(lk) and not is_bool_dtype(lk))
and not is_numeric_dtype(rk)):
raise ValueError(msg)
elif (not is_numeric_dtype(lk)
and (is_numeric_dtype(rk) and not is_bool_dtype(rk))):
raise ValueError(msg)
elif is_datetimelike(lk) and not is_datetimelike(rk):
raise ValueError(msg)
elif not is_datetimelike(lk) and is_datetimelike(rk):
raise ValueError(msg)
elif is_datetime64tz_dtype(lk) and not is_datetime64tz_dtype(rk):
raise ValueError(msg)
elif not is_datetime64tz_dtype(lk) and is_datetime64tz_dtype(rk):
raise ValueError(msg)
# Houston, we have a problem!
# let's coerce to object if the dtypes aren't
# categorical, otherwise coerce to the category
# dtype. If we coerced categories to object,
# then we would lose type information on some
# columns, and end up trying to merge
# incompatible dtypes. See GH 16900.
else:
if name in self.left.columns:
typ = lk.categories.dtype if lk_is_cat else object
self.left = self.left.assign(
**{name: self.left[name].astype(typ)})
if name in self.right.columns:
typ = rk.categories.dtype if rk_is_cat else object
self.right = self.right.assign(
**{name: self.right[name].astype(typ)})
def _validate_specification(self):
# Hm, any way to make this logic less complicated??
if self.on is None and self.left_on is None and self.right_on is None:
if self.left_index and self.right_index:
self.left_on, self.right_on = (), ()
elif self.left_index:
if self.right_on is None:
raise MergeError('Must pass right_on or right_index=True')
elif self.right_index:
if self.left_on is None:
raise MergeError('Must pass left_on or left_index=True')
else:
# use the common columns
common_cols = self.left.columns.intersection(
self.right.columns)
if len(common_cols) == 0:
raise MergeError(
'No common columns to perform merge on. '
'Merge options: left_on={lon}, right_on={ron}, '
'left_index={lidx}, right_index={ridx}'
.format(lon=self.left_on, ron=self.right_on,
lidx=self.left_index, ridx=self.right_index))
if not common_cols.is_unique:
raise MergeError("Data columns not unique: {common!r}"
.format(common=common_cols))
self.left_on = self.right_on = common_cols
elif self.on is not None:
if self.left_on is not None or self.right_on is not None:
raise MergeError('Can only pass argument "on" OR "left_on" '
'and "right_on", not a combination of both.')
self.left_on = self.right_on = self.on
elif self.left_on is not None:
n = len(self.left_on)
if self.right_index:
if len(self.left_on) != self.right.index.nlevels:
raise ValueError('len(left_on) must equal the number '
'of levels in the index of "right"')
self.right_on = [None] * n
elif self.right_on is not None:
n = len(self.right_on)
if self.left_index:
if len(self.right_on) != self.left.index.nlevels:
raise ValueError('len(right_on) must equal the number '
'of levels in the index of "left"')
self.left_on = [None] * n
if len(self.right_on) != len(self.left_on):
raise ValueError("len(right_on) must equal len(left_on)")
def _validate(self, validate):
# Check uniqueness of each
if self.left_index:
left_unique = self.orig_left.index.is_unique
else:
left_unique = MultiIndex.from_arrays(self.left_join_keys
).is_unique
if self.right_index:
right_unique = self.orig_right.index.is_unique
else:
right_unique = MultiIndex.from_arrays(self.right_join_keys
).is_unique
# Check data integrity
if validate in ["one_to_one", "1:1"]:
if not left_unique and not right_unique:
raise MergeError("Merge keys are not unique in either left"
" or right dataset; not a one-to-one merge")
elif not left_unique:
raise MergeError("Merge keys are not unique in left dataset;"
" not a one-to-one merge")
elif not right_unique:
raise MergeError("Merge keys are not unique in right dataset;"
" not a one-to-one merge")
elif validate in ["one_to_many", "1:m"]:
if not left_unique:
raise MergeError("Merge keys are not unique in left dataset;"
"not a one-to-many merge")
elif validate in ["many_to_one", "m:1"]:
if not right_unique:
raise MergeError("Merge keys are not unique in right dataset;"
" not a many-to-one merge")
elif validate in ['many_to_many', 'm:m']:
pass
else:
raise ValueError("Not a valid argument for validate")
def _get_join_indexers(left_keys, right_keys, sort=False, how='inner',
**kwargs):
"""
Parameters
----------
left_keys: ndarray, Index, Series
right_keys: ndarray, Index, Series
sort: boolean, default False
how: string {'inner', 'outer', 'left', 'right'}, default 'inner'
Returns
-------
tuple of (left_indexer, right_indexer)
indexers into the left_keys, right_keys
"""
from functools import partial
assert len(left_keys) == len(right_keys), \
'left_key and right_keys must be the same length'
# bind `sort` arg. of _factorize_keys
fkeys = partial(_factorize_keys, sort=sort)
# get left & right join labels and num. of levels at each location
llab, rlab, shape = map(list, zip(* map(fkeys, left_keys, right_keys)))
# get flat i8 keys from label lists
lkey, rkey = _get_join_keys(llab, rlab, shape, sort)
# factorize keys to a dense i8 space
# `count` is the num. of unique keys
# set(lkey) | set(rkey) == range(count)
lkey, rkey, count = fkeys(lkey, rkey)
# preserve left frame order if how == 'left' and sort == False
kwargs = copy.copy(kwargs)
if how == 'left':
kwargs['sort'] = sort
join_func = _join_functions[how]
return join_func(lkey, rkey, count, **kwargs)
class _OrderedMerge(_MergeOperation):
_merge_type = 'ordered_merge'
def __init__(self, left, right, on=None, left_on=None, right_on=None,
left_index=False, right_index=False, axis=1,
suffixes=('_x', '_y'), copy=True,
fill_method=None, how='outer'):
self.fill_method = fill_method
_MergeOperation.__init__(self, left, right, on=on, left_on=left_on,
left_index=left_index,
right_index=right_index,
right_on=right_on, axis=axis,
how=how, suffixes=suffixes,
sort=True # factorize sorts
)
def get_result(self):
join_index, left_indexer, right_indexer = self._get_join_info()
# this is a bit kludgy
ldata, rdata = self.left._data, self.right._data
lsuf, rsuf = self.suffixes
llabels, rlabels = items_overlap_with_suffix(ldata.items, lsuf,
rdata.items, rsuf)
if self.fill_method == 'ffill':
left_join_indexer = libjoin.ffill_indexer(left_indexer)
right_join_indexer = libjoin.ffill_indexer(right_indexer)
else:
left_join_indexer = left_indexer
right_join_indexer = right_indexer
lindexers = {
1: left_join_indexer} if left_join_indexer is not None else {}
rindexers = {
1: right_join_indexer} if right_join_indexer is not None else {}
result_data = concatenate_block_managers(
[(ldata, lindexers), (rdata, rindexers)],
axes=[llabels.append(rlabels), join_index],
concat_axis=0, copy=self.copy)
typ = self.left._constructor
result = typ(result_data).__finalize__(self, method=self._merge_type)
self._maybe_add_join_keys(result, left_indexer, right_indexer)
return result
def _asof_function(direction, on_type):
name = 'asof_join_{dir}_{on}'.format(dir=direction, on=on_type)
return getattr(libjoin, name, None)
def _asof_by_function(direction, on_type, by_type):
name = 'asof_join_{dir}_{on}_by_{by}'.format(
dir=direction, on=on_type, by=by_type)
return getattr(libjoin, name, None)
_type_casters = {
'int64_t': _ensure_int64,
'double': _ensure_float64,
'object': _ensure_object,
}
_cython_types = {
'uint8': 'uint8_t',
'uint32': 'uint32_t',
'uint16': 'uint16_t',
'uint64': 'uint64_t',
'int8': 'int8_t',
'int32': 'int32_t',
'int16': 'int16_t',
'int64': 'int64_t',
'float16': 'error',
'float32': 'float',
'float64': 'double',
}
def _get_cython_type(dtype):
""" Given a dtype, return a C name like 'int64_t' or 'double' """
type_name = _get_dtype(dtype).name
ctype = _cython_types.get(type_name, 'object')
if ctype == 'error':
raise MergeError('unsupported type: {type}'.format(type=type_name))
return ctype
def _get_cython_type_upcast(dtype):
""" Upcast a dtype to 'int64_t', 'double', or 'object' """
if is_integer_dtype(dtype):
return 'int64_t'
elif is_float_dtype(dtype):
return 'double'
else:
return 'object'
class _AsOfMerge(_OrderedMerge):
_merge_type = 'asof_merge'
def __init__(self, left, right, on=None, left_on=None, right_on=None,
left_index=False, right_index=False,
by=None, left_by=None, right_by=None,
axis=1, suffixes=('_x', '_y'), copy=True,
fill_method=None,
how='asof', tolerance=None,
allow_exact_matches=True,
direction='backward'):
self.by = by
self.left_by = left_by
self.right_by = right_by
self.tolerance = tolerance
self.allow_exact_matches = allow_exact_matches
self.direction = direction
_OrderedMerge.__init__(self, left, right, on=on, left_on=left_on,
right_on=right_on, left_index=left_index,
right_index=right_index, axis=axis,
how=how, suffixes=suffixes,
fill_method=fill_method)
def _validate_specification(self):
super(_AsOfMerge, self)._validate_specification()
# we only allow on to be a single item for on
if len(self.left_on) != 1 and not self.left_index:
raise MergeError("can only asof on a key for left")
if len(self.right_on) != 1 and not self.right_index:
raise MergeError("can only asof on a key for right")
if self.left_index and isinstance(self.left.index, MultiIndex):
raise MergeError("left can only have one index")
if self.right_index and isinstance(self.right.index, MultiIndex):
raise MergeError("right can only have one index")
# set 'by' columns
if self.by is not None:
if self.left_by is not None or self.right_by is not None:
raise MergeError('Can only pass by OR left_by '
'and right_by')
self.left_by = self.right_by = self.by
if self.left_by is None and self.right_by is not None:
raise MergeError('missing left_by')
if self.left_by is not None and self.right_by is None:
raise MergeError('missing right_by')
# add 'by' to our key-list so we can have it in the
# output as a key
if self.left_by is not None:
if not is_list_like(self.left_by):
self.left_by = [self.left_by]
if not is_list_like(self.right_by):
self.right_by = [self.right_by]
if len(self.left_by) != len(self.right_by):
raise MergeError('left_by and right_by must be same length')
self.left_on = self.left_by + list(self.left_on)
self.right_on = self.right_by + list(self.right_on)
# check 'direction' is valid
if self.direction not in ['backward', 'forward', 'nearest']:
raise MergeError('direction invalid: {direction}'
.format(direction=self.direction))
@property
def _asof_key(self):
""" This is our asof key, the 'on' """
return self.left_on[-1]
def _get_merge_keys(self):
# note this function has side effects
(left_join_keys,
right_join_keys,
join_names) = super(_AsOfMerge, self)._get_merge_keys()
# validate index types are the same
for i, (lk, rk) in enumerate(zip(left_join_keys, right_join_keys)):
if not is_dtype_equal(lk.dtype, rk.dtype):
raise MergeError("incompatible merge keys [{i}] {lkdtype} and "
"{rkdtype}, must be the same type"
.format(i=i, lkdtype=lk.dtype,
rkdtype=rk.dtype))
# validate tolerance; must be a Timedelta if we have a DTI
if self.tolerance is not None:
if self.left_index:
lt = self.left.index
else:
lt = left_join_keys[-1]
msg = ("incompatible tolerance {tolerance}, must be compat "
"with type {lkdtype}".format(
tolerance=type(self.tolerance),
lkdtype=lt.dtype))
if is_datetime64_dtype(lt) or is_datetime64tz_dtype(lt):
if not isinstance(self.tolerance, Timedelta):
raise MergeError(msg)
if self.tolerance < Timedelta(0):
raise MergeError("tolerance must be positive")
elif is_int64_dtype(lt):
if not is_integer(self.tolerance):
raise MergeError(msg)
if self.tolerance < 0:
raise MergeError("tolerance must be positive")
else:
raise MergeError("key must be integer or timestamp")
# validate allow_exact_matches
if not is_bool(self.allow_exact_matches):
msg = "allow_exact_matches must be boolean, passed {passed}"
raise MergeError(msg.format(passed=self.allow_exact_matches))
return left_join_keys, right_join_keys, join_names
def _get_join_indexers(self):
""" return the join indexers """
def flip(xs):
""" unlike np.transpose, this returns an array of tuples """
labels = list(string.ascii_lowercase[:len(xs)])
dtypes = [x.dtype for x in xs]
labeled_dtypes = list(zip(labels, dtypes))
return np.array(lzip(*xs), labeled_dtypes)
# values to compare
left_values = (self.left.index.values if self.left_index else
self.left_join_keys[-1])
right_values = (self.right.index.values if self.right_index else
self.right_join_keys[-1])
tolerance = self.tolerance
# we required sortedness in the join keys
msg = "{side} keys must be sorted"
if not Index(left_values).is_monotonic:
raise ValueError(msg.format(side='left'))
if not Index(right_values).is_monotonic:
raise ValueError(msg.format(side='right'))
# initial type conversion as needed
if needs_i8_conversion(left_values):
left_values = left_values.view('i8')
right_values = right_values.view('i8')
if tolerance is not None:
tolerance = tolerance.value
# a "by" parameter requires special handling
if self.left_by is not None:
# remove 'on' parameter from values if one existed
if self.left_index and self.right_index:
left_by_values = self.left_join_keys
right_by_values = self.right_join_keys
else:
left_by_values = self.left_join_keys[0:-1]
right_by_values = self.right_join_keys[0:-1]
# get tuple representation of values if more than one
if len(left_by_values) == 1:
left_by_values = left_by_values[0]
right_by_values = right_by_values[0]
else:
left_by_values = flip(left_by_values)
right_by_values = flip(right_by_values)
# upcast 'by' parameter because HashTable is limited
by_type = _get_cython_type_upcast(left_by_values.dtype)
by_type_caster = _type_casters[by_type]
left_by_values = by_type_caster(left_by_values)
right_by_values = by_type_caster(right_by_values)
# choose appropriate function by type
on_type = _get_cython_type(left_values.dtype)
func = _asof_by_function(self.direction, on_type, by_type)
return func(left_values,
right_values,
left_by_values,
right_by_values,
self.allow_exact_matches,
tolerance)
else:
# choose appropriate function by type
on_type = _get_cython_type(left_values.dtype)
func = _asof_function(self.direction, on_type)
return func(left_values,
right_values,
self.allow_exact_matches,
tolerance)
def _get_multiindex_indexer(join_keys, index, sort):
from functools import partial
# bind `sort` argument
fkeys = partial(_factorize_keys, sort=sort)
# left & right join labels and num. of levels at each location
rlab, llab, shape = map(list, zip(* map(fkeys, index.levels, join_keys)))
if sort:
rlab = list(map(np.take, rlab, index.labels))
else:
i8copy = lambda a: a.astype('i8', subok=False, copy=True)
rlab = list(map(i8copy, index.labels))
# fix right labels if there were any nulls
for i in range(len(join_keys)):
mask = index.labels[i] == -1
if mask.any():
# check if there already was any nulls at this location
# if there was, it is factorized to `shape[i] - 1`
a = join_keys[i][llab[i] == shape[i] - 1]
if a.size == 0 or not a[0] != a[0]:
shape[i] += 1
rlab[i][mask] = shape[i] - 1
# get flat i8 join keys
lkey, rkey = _get_join_keys(llab, rlab, shape, sort)
# factorize keys to a dense i8 space
lkey, rkey, count = fkeys(lkey, rkey)
return libjoin.left_outer_join(lkey, rkey, count, sort=sort)
def _get_single_indexer(join_key, index, sort=False):
left_key, right_key, count = _factorize_keys(join_key, index, sort=sort)
left_indexer, right_indexer = libjoin.left_outer_join(
_ensure_int64(left_key),
_ensure_int64(right_key),
count, sort=sort)
return left_indexer, right_indexer
def _left_join_on_index(left_ax, right_ax, join_keys, sort=False):
if len(join_keys) > 1:
if not ((isinstance(right_ax, MultiIndex) and
len(join_keys) == right_ax.nlevels)):
raise AssertionError("If more than one join key is given then "
"'right_ax' must be a MultiIndex and the "
"number of join keys must be the number of "
"levels in right_ax")
left_indexer, right_indexer = \
_get_multiindex_indexer(join_keys, right_ax, sort=sort)
else:
jkey = join_keys[0]
left_indexer, right_indexer = \
_get_single_indexer(jkey, right_ax, sort=sort)
if sort or len(left_ax) != len(left_indexer):
# if asked to sort or there are 1-to-many matches
join_index = left_ax.take(left_indexer)
return join_index, left_indexer, right_indexer
# left frame preserves order & length of its index
return left_ax, None, right_indexer
def _right_outer_join(x, y, max_groups):
right_indexer, left_indexer = libjoin.left_outer_join(y, x, max_groups)
return left_indexer, right_indexer
_join_functions = {
'inner': libjoin.inner_join,
'left': libjoin.left_outer_join,
'right': _right_outer_join,
'outer': libjoin.full_outer_join,
}
def _factorize_keys(lk, rk, sort=True):
if is_datetime64tz_dtype(lk) and is_datetime64tz_dtype(rk):
lk = lk.values
rk = rk.values
# if we exactly match in categories, allow us to factorize on codes
if (is_categorical_dtype(lk) and
is_categorical_dtype(rk) and
lk.is_dtype_equal(rk)):
klass = libhashtable.Int64Factorizer
if lk.categories.equals(rk.categories):
rk = rk.codes
else:
# Same categories in different orders -> recode
rk = _recode_for_categories(rk.codes, rk.categories, lk.categories)
lk = _ensure_int64(lk.codes)
rk = _ensure_int64(rk)
elif is_int_or_datetime_dtype(lk) and is_int_or_datetime_dtype(rk):
klass = libhashtable.Int64Factorizer
lk = _ensure_int64(com._values_from_object(lk))
rk = _ensure_int64(com._values_from_object(rk))
else:
klass = libhashtable.Factorizer
lk = _ensure_object(lk)
rk = _ensure_object(rk)
rizer = klass(max(len(lk), len(rk)))
llab = rizer.factorize(lk)
rlab = rizer.factorize(rk)
count = rizer.get_count()
if sort:
uniques = rizer.uniques.to_array()
llab, rlab = _sort_labels(uniques, llab, rlab)
# NA group
lmask = llab == -1
lany = lmask.any()
rmask = rlab == -1
rany = rmask.any()
if lany or rany:
if lany:
np.putmask(llab, lmask, count)
if rany:
np.putmask(rlab, rmask, count)
count += 1
return llab, rlab, count
def _sort_labels(uniques, left, right):
if not isinstance(uniques, np.ndarray):
# tuplesafe
uniques = Index(uniques).values
llength = len(left)
labels = np.concatenate([left, right])
_, new_labels = sorting.safe_sort(uniques, labels, na_sentinel=-1)
new_labels = _ensure_int64(new_labels)
new_left, new_right = new_labels[:llength], new_labels[llength:]
return new_left, new_right
def _get_join_keys(llab, rlab, shape, sort):
# how many levels can be done without overflow
pred = lambda i: not is_int64_overflow_possible(shape[:i])
nlev = next(filter(pred, range(len(shape), 0, -1)))
# get keys for the first `nlev` levels
stride = np.prod(shape[1:nlev], dtype='i8')
lkey = stride * llab[0].astype('i8', subok=False, copy=False)
rkey = stride * rlab[0].astype('i8', subok=False, copy=False)
for i in range(1, nlev):
with np.errstate(divide='ignore'):
stride //= shape[i]
lkey += llab[i] * stride
rkey += rlab[i] * stride
if nlev == len(shape): # all done!
return lkey, rkey
# densify current keys to avoid overflow
lkey, rkey, count = _factorize_keys(lkey, rkey, sort=sort)
llab = [lkey] + llab[nlev:]
rlab = [rkey] + rlab[nlev:]
shape = [count] + shape[nlev:]
return _get_join_keys(llab, rlab, shape, sort)
def _should_fill(lname, rname):
if (not isinstance(lname, compat.string_types) or
not isinstance(rname, compat.string_types)):
return True
return lname == rname
def _any(x):
return x is not None and com._any_not_none(*x)
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.