repo_name stringlengths 6 67 | path stringlengths 5 185 | copies stringlengths 1 3 | size stringlengths 4 6 | content stringlengths 1.02k 962k | license stringclasses 15 values |
|---|---|---|---|---|---|
jhamman/xarray | asv_bench/benchmarks/rolling.py | 3 | 2217 | import numpy as np
import pandas as pd
import xarray as xr
from . import parameterized, randn, requires_dask
nx = 3000
long_nx = 30000000
ny = 2000
nt = 1000
window = 20
randn_xy = randn((nx, ny), frac_nan=0.1)
randn_xt = randn((nx, nt))
randn_t = randn((nt,))
randn_long = randn((long_nx,), frac_nan=0.1)
class Rolling:
def setup(self, *args, **kwargs):
self.ds = xr.Dataset(
{
"var1": (("x", "y"), randn_xy),
"var2": (("x", "t"), randn_xt),
"var3": (("t",), randn_t),
},
coords={
"x": np.arange(nx),
"y": np.linspace(0, 1, ny),
"t": pd.date_range("1970-01-01", periods=nt, freq="D"),
"x_coords": ("x", np.linspace(1.1, 2.1, nx)),
},
)
self.da_long = xr.DataArray(
randn_long, dims="x", coords={"x": np.arange(long_nx) * 0.1}
)
@parameterized(["func", "center"], (["mean", "count"], [True, False]))
def time_rolling(self, func, center):
getattr(self.ds.rolling(x=window, center=center), func)().load()
@parameterized(["func", "pandas"], (["mean", "count"], [True, False]))
def time_rolling_long(self, func, pandas):
if pandas:
se = self.da_long.to_series()
getattr(se.rolling(window=window), func)()
else:
getattr(self.da_long.rolling(x=window), func)().load()
@parameterized(["window_", "min_periods"], ([20, 40], [5, None]))
def time_rolling_np(self, window_, min_periods):
self.ds.rolling(x=window_, center=False, min_periods=min_periods).reduce(
getattr(np, "nanmean")
).load()
@parameterized(["center", "stride"], ([True, False], [1, 200]))
def time_rolling_construct(self, center, stride):
self.ds.rolling(x=window, center=center).construct(
"window_dim", stride=stride
).mean(dim="window_dim").load()
class RollingDask(Rolling):
def setup(self, *args, **kwargs):
requires_dask()
super().setup(**kwargs)
self.ds = self.ds.chunk({"x": 100, "y": 50, "t": 50})
self.da_long = self.da_long.chunk({"x": 10000})
| apache-2.0 |
sonium0/pymatgen | pymatgen/electronic_structure/plotter.py | 2 | 42465 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals, print_function
"""
This module implements plotter for DOS and band structure.
"""
__author__ = "Shyue Ping Ong, Geoffroy Hautier"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "May 1, 2012"
import logging
import math
import itertools
from collections import OrderedDict
import numpy as np
from monty.json import jsanitize
from pymatgen.electronic_structure.core import Spin
from pymatgen.electronic_structure.bandstructure import BandStructureSymmLine
logger = logging.getLogger('BSPlotter')
class DosPlotter(object):
"""
Class for plotting DOSs. Note that the interface is extremely flexible
given that there are many different ways in which people want to view
DOS. The typical usage is::
# Initializes plotter with some optional args. Defaults are usually
# fine,
plotter = DosPlotter()
# Adds a DOS with a label.
plotter.add_dos("Total DOS", dos)
# Alternatively, you can add a dict of DOSs. This is the typical
# form returned by CompleteDos.get_spd/element/others_dos().
plotter.add_dos_dict({"dos1": dos1, "dos2": dos2})
plotter.add_dos_dict(complete_dos.get_spd_dos())
Args:
zero_at_efermi: Whether to shift all Dos to have zero energy at the
fermi energy. Defaults to True.
stack: Whether to plot the DOS as a stacked area graph
key_sort_func: function used to sort the dos_dict keys.
sigma: A float specifying a standard deviation for Gaussian smearing
the DOS for nicer looking plots. Defaults to None for no
smearing.
"""
def __init__(self, zero_at_efermi=True, stack=False, sigma=None):
self.zero_at_efermi = zero_at_efermi
self.stack = stack
self.sigma = sigma
self._doses = OrderedDict()
def add_dos(self, label, dos):
"""
Adds a dos for plotting.
Args:
label:
label for the DOS. Must be unique.
dos:
Dos object
"""
energies = dos.energies - dos.efermi if self.zero_at_efermi \
else dos.energies
densities = dos.get_smeared_densities(self.sigma) if self.sigma \
else dos.densities
efermi = dos.efermi
self._doses[label] = {'energies': energies, 'densities': densities,
'efermi': efermi}
def add_dos_dict(self, dos_dict, key_sort_func=None):
"""
Add a dictionary of doses, with an optional sorting function for the
keys.
Args:
dos_dict: dict of {label: Dos}
key_sort_func: function used to sort the dos_dict keys.
"""
if key_sort_func:
keys = sorted(dos_dict.keys(), key=key_sort_func)
else:
keys = dos_dict.keys()
for label in keys:
self.add_dos(label, dos_dict[label])
def get_dos_dict(self):
"""
Returns the added doses as a json-serializable dict. Note that if you
have specified smearing for the DOS plot, the densities returned will
be the smeared densities, not the original densities.
Returns:
Dict of dos data. Generally of the form, {label: {'energies':..,
'densities': {'up':...}, 'efermi':efermi}}
"""
return jsanitize(self._doses)
def get_plot(self, xlim=None, ylim=None):
"""
Get a matplotlib plot showing the DOS.
Args:
xlim: Specifies the x-axis limits. Set to None for automatic
determination.
ylim: Specifies the y-axis limits.
"""
import prettyplotlib as ppl
from prettyplotlib import brewer2mpl
from pymatgen.util.plotting_utils import get_publication_quality_plot
ncolors = max(3, len(self._doses))
ncolors = min(9, ncolors)
colors = brewer2mpl.get_map('Set1', 'qualitative', ncolors).mpl_colors
y = None
alldensities = []
allenergies = []
plt = get_publication_quality_plot(12, 8)
# Note that this complicated processing of energies is to allow for
# stacked plots in matplotlib.
for key, dos in self._doses.items():
energies = dos['energies']
densities = dos['densities']
if not y:
y = {Spin.up: np.zeros(energies.shape),
Spin.down: np.zeros(energies.shape)}
newdens = {}
for spin in [Spin.up, Spin.down]:
if spin in densities:
if self.stack:
y[spin] += densities[spin]
newdens[spin] = y[spin].copy()
else:
newdens[spin] = densities[spin]
allenergies.append(energies)
alldensities.append(newdens)
keys = list(self._doses.keys())
keys.reverse()
alldensities.reverse()
allenergies.reverse()
allpts = []
for i, key in enumerate(keys):
x = []
y = []
for spin in [Spin.up, Spin.down]:
if spin in alldensities[i]:
densities = list(int(spin) * alldensities[i][spin])
energies = list(allenergies[i])
if spin == Spin.down:
energies.reverse()
densities.reverse()
x.extend(energies)
y.extend(densities)
allpts.extend(list(zip(x, y)))
if self.stack:
plt.fill(x, y, color=colors[i % ncolors],
label=str(key))
else:
ppl.plot(x, y, color=colors[i % ncolors],
label=str(key), linewidth=3)
if not self.zero_at_efermi:
ylim = plt.ylim()
ppl.plot([self._doses[key]['efermi'],
self._doses[key]['efermi']], ylim,
color=colors[i % ncolors],
linestyle='--', linewidth=2)
if xlim:
plt.xlim(xlim)
if ylim:
plt.ylim(ylim)
else:
xlim = plt.xlim()
relevanty = [p[1] for p in allpts
if xlim[0] < p[0] < xlim[1]]
plt.ylim((min(relevanty), max(relevanty)))
if self.zero_at_efermi:
ylim = plt.ylim()
plt.plot([0, 0], ylim, 'k--', linewidth=2)
plt.xlabel('Energies (eV)')
plt.ylabel('Density of states')
plt.legend()
leg = plt.gca().get_legend()
ltext = leg.get_texts() # all the text.Text instance in the legend
plt.setp(ltext, fontsize=30)
plt.tight_layout()
return plt
def save_plot(self, filename, img_format="eps", xlim=None, ylim=None):
"""
Save matplotlib plot to a file.
Args:
filename: Filename to write to.
img_format: Image format to use. Defaults to EPS.
xlim: Specifies the x-axis limits. Set to None for automatic
determination.
ylim: Specifies the y-axis limits.
"""
plt = self.get_plot(xlim, ylim)
plt.savefig(filename, format=img_format)
def show(self, xlim=None, ylim=None):
"""
Show the plot using matplotlib.
Args:
xlim: Specifies the x-axis limits. Set to None for automatic
determination.
ylim: Specifies the y-axis limits.
"""
plt = self.get_plot(xlim, ylim)
plt.show()
class BSPlotter(object):
"""
Class to plot or get data to facilitate the plot of band structure objects.
Args:
bs: A BandStructureSymmLine object.
"""
def __init__(self, bs):
if not isinstance(bs, BandStructureSymmLine):
raise ValueError(
"BSPlotter only works with BandStructureSymmLine objects. "
"A BandStructure object (on a uniform grid for instance and "
"not along symmetry lines won't work)")
self._bs = bs
# TODO: come with an intelligent way to cut the highest unconverged
# bands
self._nb_bands = self._bs._nb_bands
def _maketicks(self, plt):
"""
utility private method to add ticks to a band structure
"""
ticks = self.get_ticks()
# Sanitize only plot the uniq values
uniq_d = []
uniq_l = []
temp_ticks = list(zip(ticks['distance'], ticks['label']))
for i in range(len(temp_ticks)):
if i == 0:
uniq_d.append(temp_ticks[i][0])
uniq_l.append(temp_ticks[i][1])
logger.debug("Adding label {l} at {d}".format(
l=temp_ticks[i][0], d=temp_ticks[i][1]))
else:
if temp_ticks[i][1] == temp_ticks[i - 1][1]:
logger.debug("Skipping label {i}".format(
i=temp_ticks[i][1]))
else:
logger.debug("Adding label {l} at {d}".format(
l=temp_ticks[i][0], d=temp_ticks[i][1]))
uniq_d.append(temp_ticks[i][0])
uniq_l.append(temp_ticks[i][1])
logger.debug("Unique labels are %s" % list(zip(uniq_d, uniq_l)))
plt.gca().set_xticks(uniq_d)
plt.gca().set_xticklabels(uniq_l)
for i in range(len(ticks['label'])):
if ticks['label'][i] is not None:
# don't print the same label twice
if i != 0:
if ticks['label'][i] == ticks['label'][i - 1]:
logger.debug("already print label... "
"skipping label {i}".format(
i=ticks['label'][i]))
else:
logger.debug("Adding a line at {d}"
" for label {l}".format(
d=ticks['distance'][i], l=ticks['label'][i]))
plt.axvline(ticks['distance'][i], color='k')
else:
logger.debug("Adding a line at {d} for label {l}".format(
d=ticks['distance'][i], l=ticks['label'][i]))
plt.axvline(ticks['distance'][i], color='k')
return plt
def bs_plot_data(self, zero_to_efermi=True):
"""
Get the data nicely formatted for a plot
Args:
zero_to_efermi: Automatically subtract off the Fermi energy from the
eigenvalues and plot.
Returns:
A dict of the following format:
ticks: A dict with the 'distances' at which there is a kpoint (the
x axis) and the labels (None if no label)
energy: A dict storing bands for spin up and spin down data
[{Spin:[band_index][k_point_index]}] as a list (one element
for each branch) of energy for each kpoint. The data is
stored by branch to facilitate the plotting
vbm: A list of tuples (distance,energy) marking the vbms. The
energies are shifted with respect to the fermi level is the
option has been selected.
cbm: A list of tuples (distance,energy) marking the cbms. The
energies are shifted with respect to the fermi level is the
option has been selected.
lattice: The reciprocal lattice.
zero_energy: This is the energy used as zero for the plot.
band_gap:A string indicating the band gap and its nature (empty if
it's a metal).
is_metal: True if the band structure is metallic (i.e., there is at
least one band crossing the fermi level).
"""
distance = []
energy = []
if self._bs.is_metal():
zero_energy = self._bs.efermi
else:
zero_energy = self._bs.get_vbm()['energy']
if not zero_to_efermi:
zero_energy = 0.0
for b in self._bs._branches:
if self._bs.is_spin_polarized:
energy.append({str(Spin.up): [], str(Spin.down): []})
else:
energy.append({str(Spin.up): []})
distance.append([self._bs._distance[j]
for j in range(b['start_index'],
b['end_index'] + 1)])
ticks = self.get_ticks()
for i in range(self._nb_bands):
energy[-1][str(Spin.up)].append(
[self._bs._bands[Spin.up][i][j] - zero_energy
for j in range(b['start_index'], b['end_index'] + 1)])
if self._bs.is_spin_polarized:
for i in range(self._nb_bands):
energy[-1][str(Spin.down)].append(
[self._bs._bands[Spin.down][i][j] - zero_energy
for j in range(b['start_index'], b['end_index'] + 1)])
vbm = self._bs.get_vbm()
cbm = self._bs.get_cbm()
vbm_plot = []
cbm_plot = []
for index in cbm['kpoint_index']:
cbm_plot.append((self._bs._distance[index],
cbm['energy'] - zero_energy if zero_to_efermi
else cbm['energy']))
for index in vbm['kpoint_index']:
vbm_plot.append((self._bs._distance[index],
vbm['energy'] - zero_energy if zero_to_efermi
else vbm['energy']))
bg = self._bs.get_band_gap()
direct = "Indirect"
if bg['direct']:
direct = "Direct"
return {'ticks': ticks, 'distances': distance, 'energy': energy,
'vbm': vbm_plot, 'cbm': cbm_plot,
'lattice': self._bs._lattice_rec.as_dict(),
'zero_energy': zero_energy, 'is_metal': self._bs.is_metal(),
'band_gap': "{} {} bandgap = {}".format(direct,
bg['transition'],
bg['energy'])
if not self._bs.is_metal() else ""}
def get_plot(self, zero_to_efermi=True, ylim=None, smooth=False,
vbm_cbm_marker=False):
"""
get a matplotlib object for the bandstructure plot.
Blue lines are up spin, red lines are down
spin.
Args:
zero_to_efermi: Automatically subtract off the Fermi energy from
the eigenvalues and plot (E-Ef).
ylim: Specify the y-axis (energy) limits; by default None let
the code choose. It is vbm-4 and cbm+4 if insulator
efermi-10 and efermi+10 if metal
smooth: interpolates the bands by a spline cubic
"""
from pymatgen.util.plotting_utils import get_publication_quality_plot
plt = get_publication_quality_plot(12, 8)
from matplotlib import rc
import scipy.interpolate as scint
rc('text', usetex=True)
# main internal config options
e_min = -4
e_max = 4
if self._bs.is_metal():
e_min = -10
e_max = 10
#band_linewidth = 3
band_linewidth = 1
data = self.bs_plot_data(zero_to_efermi)
if not smooth:
for d in range(len(data['distances'])):
for i in range(self._nb_bands):
plt.plot(data['distances'][d],
[data['energy'][d][str(Spin.up)][i][j]
for j in range(len(data['distances'][d]))], 'b-',
linewidth=band_linewidth)
if self._bs.is_spin_polarized:
plt.plot(data['distances'][d],
[data['energy'][d][str(Spin.down)][i][j]
for j in range(len(data['distances'][d]))],
'r--', linewidth=band_linewidth)
else:
for d in range(len(data['distances'])):
for i in range(self._nb_bands):
tck = scint.splrep(
data['distances'][d],
[data['energy'][d][str(Spin.up)][i][j]
for j in range(len(data['distances'][d]))])
step = (data['distances'][d][-1]
- data['distances'][d][0]) / 1000
plt.plot([x * step + data['distances'][d][0]
for x in range(1000)],
[scint.splev(x * step + data['distances'][d][0],
tck, der=0)
for x in range(1000)], 'b-',
linewidth=band_linewidth)
if self._bs.is_spin_polarized:
tck = scint.splrep(
data['distances'][d],
[data['energy'][d][str(Spin.down)][i][j]
for j in range(len(data['distances'][d]))])
step = (data['distances'][d][-1]
- data['distances'][d][0]) / 1000
plt.plot([x * step + data['distances'][d][0]
for x in range(1000)],
[scint.splev(
x * step + data['distances'][d][0],
tck, der=0)
for x in range(1000)], 'r--',
linewidth=band_linewidth)
self._maketicks(plt)
# Main X and Y Labels
plt.xlabel(r'$\mathrm{Wave\ Vector}$', fontsize=30)
ylabel = r'$\mathrm{E\ -\ E_f\ (eV)}$' if zero_to_efermi \
else r'$\mathrm{Energy\ (eV)}$'
plt.ylabel(ylabel, fontsize=30)
# Draw Fermi energy, only if not the zero
if not zero_to_efermi:
ef = self._bs.efermi
plt.axhline(ef, linewidth=2, color='k')
# X range (K)
# last distance point
x_max = data['distances'][-1][-1]
plt.xlim(0, x_max)
if ylim is None:
if self._bs.is_metal():
# Plot A Metal
if zero_to_efermi:
plt.ylim(e_min, e_max)
else:
plt.ylim(self._bs.efermi + e_min, self._bs._efermi + e_max)
else:
if vbm_cbm_marker:
for cbm in data['cbm']:
plt.scatter(cbm[0], cbm[1], color='r', marker='o',
s=100)
for vbm in data['vbm']:
plt.scatter(vbm[0], vbm[1], color='g', marker='o',
s=100)
plt.ylim(data['vbm'][0][1] + e_min,
data['cbm'][0][1] + e_max)
else:
plt.ylim(ylim)
plt.tight_layout()
return plt
def show(self, zero_to_efermi=True, ylim=None, smooth=False):
"""
Show the plot using matplotlib.
Args:
zero_to_efermi: Automatically subtract off the Fermi energy from
the eigenvalues and plot (E-Ef).
ylim: Specify the y-axis (energy) limits; by default None let
the code choose. It is vbm-4 and cbm+4 if insulator
efermi-10 and efermi+10 if metal
smooth: interpolates the bands by a spline cubic
"""
plt = self.get_plot(zero_to_efermi, ylim, smooth)
plt.show()
def save_plot(self, filename, img_format="eps", ylim=None,
zero_to_efermi=True, smooth=False):
"""
Save matplotlib plot to a file.
Args:
filename: Filename to write to.
img_format: Image format to use. Defaults to EPS.
ylim: Specifies the y-axis limits.
"""
plt = self.get_plot(ylim=ylim, zero_to_efermi=zero_to_efermi,
smooth=smooth)
plt.savefig(filename, format=img_format)
plt.close()
def get_ticks(self):
"""
Get all ticks and labels for a band structure plot.
Returns:
A dict with 'distance': a list of distance at which ticks should
be set and 'label': a list of label for each of those ticks.
"""
tick_distance = []
tick_labels = []
previous_label = self._bs._kpoints[0].label
previous_branch = self._bs._branches[0]['name']
for i, c in enumerate(self._bs._kpoints):
if c.label is not None:
tick_distance.append(self._bs._distance[i])
this_branch = None
for b in self._bs._branches:
if b['start_index'] <= i <= b['end_index']:
this_branch = b['name']
break
if c.label != previous_label \
and previous_branch != this_branch:
label1 = c.label
if label1.startswith("\\") or label1.find("_") != -1:
label1 = "$" + label1 + "$"
label0 = previous_label
if label0.startswith("\\") or label0.find("_") != -1:
label0 = "$" + label0 + "$"
tick_labels.pop()
tick_distance.pop()
tick_labels.append(label0 + "$\mid$" + label1)
else:
if c.label.startswith("\\") or c.label.find("_") != -1:
tick_labels.append("$" + c.label + "$")
else:
tick_labels.append(c.label)
previous_label = c.label
previous_branch = this_branch
return {'distance': tick_distance, 'label': tick_labels}
def plot_compare(self, other_plotter):
"""
plot two band structure for comparison. One is in red the other in blue
(no difference in spins). The two band structures need to be defined
on the same symmetry lines! and the distance between symmetry lines is
the one of the band structure used to build the BSPlotter
Args:
another band structure object defined along the same symmetry lines
Returns:
a matplotlib object with both band structures
"""
# TODO: add exception if the band structures are not compatible
plt = self.get_plot()
data_orig = self.bs_plot_data()
data = other_plotter.bs_plot_data()
band_linewidth = 3
for i in range(other_plotter._nb_bands):
plt.plot(data_orig['distances'],
[e for e in data['energy'][str(Spin.up)][i]],
'r-', linewidth=band_linewidth)
if other_plotter._bs.is_spin_polarized:
plt.plot(data_orig['distances'],
[e for e in data['energy'][str(Spin.down)][i]],
'r-', linewidth=band_linewidth)
return plt
def plot_brillouin(self):
"""
plot the Brillouin zone
"""
import matplotlib as mpl
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
mpl.rcParams['legend.fontsize'] = 10
fig = plt.figure()
ax = Axes3D(fig)
vec1 = self._bs.lattice.matrix[0]
vec2 = self._bs.lattice.matrix[1]
vec3 = self._bs.lattice.matrix[2]
# make the grid
max_x = -1000
max_y = -1000
max_z = -1000
min_x = 1000
min_y = 1000
min_z = 1000
list_k_points = []
for i in [-1, 0, 1]:
for j in [-1, 0, 1]:
for k in [-1, 0, 1]:
list_k_points.append(i * vec1 + j * vec2 + k * vec3)
if list_k_points[-1][0] > max_x:
max_x = list_k_points[-1][0]
if list_k_points[-1][1] > max_y:
max_y = list_k_points[-1][1]
if list_k_points[-1][2] > max_z:
max_z = list_k_points[-1][0]
if list_k_points[-1][0] < min_x:
min_x = list_k_points[-1][0]
if list_k_points[-1][1] < min_y:
min_y = list_k_points[-1][1]
if list_k_points[-1][2] < min_z:
min_z = list_k_points[-1][0]
vertex = _qvertex_target(list_k_points, 13)
lines = get_lines_voronoi(vertex)
for i in range(len(lines)):
vertex1 = lines[i]['start']
vertex2 = lines[i]['end']
ax.plot([vertex1[0], vertex2[0]], [vertex1[1], vertex2[1]],
[vertex1[2], vertex2[2]], color='k')
for b in self._bs._branches:
vertex1 = self._bs.kpoints[b['start_index']].cart_coords
vertex2 = self._bs.kpoints[b['end_index']].cart_coords
ax.plot([vertex1[0], vertex2[0]], [vertex1[1], vertex2[1]],
[vertex1[2], vertex2[2]], color='r', linewidth=3)
for k in self._bs.kpoints:
if k.label:
label = k.label
if k.label.startswith("\\") or k.label.find("_") != -1:
label = "$" + k.label + "$"
off = 0.01
ax.text(k.cart_coords[0] + off, k.cart_coords[1] + off,
k.cart_coords[2] + off, label, color='b', size='25')
ax.scatter([k.cart_coords[0]], [k.cart_coords[1]],
[k.cart_coords[2]], color='b')
# make ticklabels and ticklines invisible
for a in ax.w_xaxis.get_ticklines() + ax.w_xaxis.get_ticklabels():
a.set_visible(False)
for a in ax.w_yaxis.get_ticklines() + ax.w_yaxis.get_ticklabels():
a.set_visible(False)
for a in ax.w_zaxis.get_ticklines() + ax.w_zaxis.get_ticklabels():
a.set_visible(False)
ax.grid(False)
plt.show()
ax.axis("off")
class BSPlotterProjected(BSPlotter):
"""
Class to plot or get data to facilitate the plot of band structure objects
projected along orbitals, elements or sites.
Args:
bs: A BandStructureSymmLine object with projections.
"""
def __init__(self, bs):
if len(bs._projections) == 0:
raise ValueError("try to plot projections"
" on a band structure without any")
super(BSPlotterProjected, self).__init__(bs)
def _get_projections_by_branches(self, dictio):
proj = self._bs.get_projections_on_elts_and_orbitals(dictio)
proj_br = []
print(len(proj[Spin.up]))
print(len(proj[Spin.up][0]))
for c in proj[Spin.up][0]:
print(c)
for b in self._bs._branches:
print(b)
if self._bs.is_spin_polarized:
proj_br.append(
{str(Spin.up): [[] for l in range(self._nb_bands)],
str(Spin.down): [[] for l in range(self._nb_bands)]})
else:
proj_br.append(
{str(Spin.up): [[] for l in range(self._nb_bands)]})
print((len(proj_br[-1][str(Spin.up)]), self._nb_bands))
for i in range(self._nb_bands):
for j in range(b['start_index'], b['end_index'] + 1):
proj_br[-1][str(Spin.up)][i].append(
{e: {o: proj[Spin.up][i][j][e][o]
for o in proj[Spin.up][i][j][e]}
for e in proj[Spin.up][i][j]})
if self._bs.is_spin_polarized:
for b in self._bs._branches:
for i in range(self._nb_bands):
for j in range(b['start_index'], b['end_index'] + 1):
proj_br[-1][str(Spin.down)][i].append(
{e: {o: proj[Spin.down][i][j][e][o]
for o in proj[Spin.down][i][j][e]}
for e in proj[Spin.down][i][j]})
return proj_br
def get_projected_plots_dots(self, dictio, zero_to_efermi=True, ylim=None,
vbm_cbm_marker=False):
"""
Method returning a plot composed of subplots along different elements
and orbitals.
Args:
dictio: The element and orbitals you want a projection on. The
format is {Element:[Orbitals]} for instance
{'Cu':['d','s'],'O':['p']} will give projections for Cu on
d and s orbitals and on oxygen p.
Returns:
a pylab object with different subfigures for each projection
The blue and red colors are for spin up and spin down.
The bigger the red or blue dot in the band structure the higher
character for the corresponding element and orbital.
"""
from pymatgen.util.plotting_utils import get_publication_quality_plot
band_linewidth = 1.0
fig_number = sum([len(v) for v in dictio.values()])
proj = self._get_projections_by_branches(dictio)
data = self.bs_plot_data(zero_to_efermi)
plt = get_publication_quality_plot(12, 8)
e_min = -4
e_max = 4
if self._bs.is_metal():
e_min = -10
e_max = 10
count = 1
for el in dictio:
for o in dictio[el]:
plt.subplot(100 * math.ceil(fig_number / 2) + 20 + count)
self._maketicks(plt)
for b in range(len(data['distances'])):
for i in range(self._nb_bands):
plt.plot(data['distances'][b],
[data['energy'][b][str(Spin.up)][i][j]
for j in range(len(data['distances'][b]))],
'b-',
linewidth=band_linewidth)
if self._bs.is_spin_polarized:
plt.plot(data['distances'][b],
[data['energy'][b][str(Spin.down)][i][j]
for j in
range(len(data['distances'][b]))],
'r--', linewidth=band_linewidth)
for j in range(
len(data['energy'][b][str(Spin.up)][i])):
plt.plot(data['distances'][b][j],
data['energy'][b][str(Spin.down)][i][
j], 'ro',
markersize=
proj[b][str(Spin.down)][i][j][str(el)][
o] * 15.0)
for j in range(len(data['energy'][b][str(Spin.up)][i])):
plt.plot(data['distances'][b][j],
data['energy'][b][str(Spin.up)][i][j],
'bo',
markersize=
proj[b][str(Spin.up)][i][j][str(el)][
o] * 15.0)
if ylim is None:
if self._bs.is_metal():
if zero_to_efermi:
plt.ylim(e_min, e_max)
else:
plt.ylim(self._bs.efermi + e_min, self._bs._efermi
+ e_max)
else:
if vbm_cbm_marker:
for cbm in data['cbm']:
plt.scatter(cbm[0], cbm[1], color='r',
marker='o',
s=100)
for vbm in data['vbm']:
plt.scatter(vbm[0], vbm[1], color='g',
marker='o',
s=100)
plt.ylim(data['vbm'][0][1] + e_min, data['cbm'][0][1]
+ e_max)
else:
plt.ylim(ylim)
plt.title(str(el) + " " + str(o))
count += 1
return plt
def get_elt_projected_plots(self, zero_to_efermi=True, ylim=None,
vbm_cbm_marker=False):
"""
Method returning a plot composed of subplots along different elements
Returns:
a pylab object with different subfigures for each projection
The blue and red colors are for spin up and spin down
The bigger the red or blue dot in the band structure the higher
character for the corresponding element and orbital
"""
band_linewidth = 1.0
proj = self._get_projections_by_branches({e.symbol: ['s', 'p', 'd']
for e in
self._bs._structure.composition.elements})
data = self.bs_plot_data(zero_to_efermi)
from pymatgen.util.plotting_utils import get_publication_quality_plot
plt = get_publication_quality_plot(12, 8)
e_min = -4
e_max = 4
if self._bs.is_metal():
e_min = -10
e_max = 10
count = 1
for el in self._bs._structure.composition.elements:
plt.subplot(220 + count)
self._maketicks(plt)
for b in range(len(data['distances'])):
for i in range(self._nb_bands):
plt.plot(data['distances'][b],
[data['energy'][b][str(Spin.up)][i][j]
for j in range(len(data['distances'][b]))], 'b-',
linewidth=band_linewidth)
if self._bs.is_spin_polarized:
plt.plot(data['distances'][b],
[data['energy'][b][str(Spin.down)][i][j]
for j in range(len(data['distances'][b]))],
'r--', linewidth=band_linewidth)
for j in range(len(data['energy'][b][str(Spin.up)][i])):
plt.plot(data['distances'][b][j],
data['energy'][b][str(Spin.down)][i][j],
'ro',
markersize=sum([proj[b][str(Spin.down)][i][
j][str(el)][o] for o in
proj[b]
[str(Spin.down)][i][j][
str(el)]]) * 15.0)
for j in range(len(data['energy'][b][str(Spin.up)][i])):
plt.plot(data['distances'][b][j],
data['energy'][b][str(Spin.up)][i][j], 'bo',
markersize=sum(
[proj[b][str(Spin.up)][i][j][str(el)][o]
for o in proj[b]
[str(Spin.up)][i][j][str(el)]]) * 15.0)
if ylim is None:
if self._bs.is_metal():
if zero_to_efermi:
plt.ylim(e_min, e_max)
else:
plt.ylim(self._bs.efermi + e_min, self._bs._efermi
+ e_max)
else:
if vbm_cbm_marker:
for cbm in data['cbm']:
plt.scatter(cbm[0], cbm[1], color='r', marker='o',
s=100)
for vbm in data['vbm']:
plt.scatter(vbm[0], vbm[1], color='g', marker='o',
s=100)
plt.ylim(data['vbm'][0][1] + e_min, data['cbm'][0][1]
+ e_max)
else:
plt.ylim(ylim)
plt.title(str(el))
count += 1
return plt
def get_elt_projected_plots_color(self, zero_to_efermi=True,
elt_ordered=None):
"""
returns a pylab plot object with one plot where the band structure
line color depends on the character of the band (along different
elements). Each element is associated with red, green or blue
and the corresponding rgb color depending on the character of the band
is used. The method can only deal with binary and ternary compounds
spin up and spin down are differientiated by a '-' and a '--' line
Args:
elt_ordered: A list of Element ordered. The first one is red,
second green, last blue
Returns:
a pylab object
"""
band_linewidth = 3.0
if len(self._bs._structure.composition.elements) > 3:
raise ValueError
if elt_ordered is None:
elt_ordered = self._bs._structure.composition.elements
proj = self._get_projections_by_branches(
{e.symbol: ['s', 'p', 'd']
for e in self._bs._structure.composition.elements})
data = self.bs_plot_data(zero_to_efermi)
from pymatgen.util.plotting_utils import get_publication_quality_plot
plt = get_publication_quality_plot(12, 8)
spins = [Spin.up]
if self._bs.is_spin_polarized:
spins = [Spin.up, Spin.down]
self._maketicks(plt)
for s in spins:
for b in range(len(data['distances'])):
for i in range(self._nb_bands):
for j in range(len(data['energy'][b][str(s)][i]) - 1):
sum_e = 0.0
for el in elt_ordered:
sum_e = sum_e + \
sum([proj[b][str(s)][i][j][str(el)][o]
for o
in proj[b][str(s)][i][j][str(el)]])
if sum_e == 0.0:
color = [0.0] * len(elt_ordered)
else:
color = [sum([proj[b][str(s)][i][j][str(el)][o]
for o
in proj[b][str(s)][i][j][str(el)]])
/ sum_e
for el in elt_ordered]
if len(color) == 2:
color.append(0.0)
color[2] = color[1]
color[1] = 0.0
sign = '-'
if s == Spin.down:
sign = '--'
plt.plot([data['distances'][b][j],
data['distances'][b][j + 1]],
[data['energy'][b][str(s)][i][j],
data['energy'][b][str(s)][i][j + 1]], sign,
color=color, linewidth=band_linewidth)
plt.ylim(data['vbm'][0][1] - 4.0, data['cbm'][0][1] + 2.0)
return plt
def _qvertex_target(data, index):
"""
Input data should be in the form of a list of a list of floats.
index is the index of the targeted point
Returns the vertices of the voronoi construction around this target point.
"""
from pyhull import qvoronoi
output = qvoronoi("p QV" + str(index), data)
output.pop(0)
output.pop(0)
return [[float(i) for i in row.split()] for row in output]
def get_lines_voronoi(data):
from pyhull import qconvex
output = qconvex("o", data)
nb_points = int(output[1].split(" ")[0])
list_lines = []
list_points = []
for i in range(2, 2 + nb_points):
list_points.append([float(c) for c in output[i].strip().split()])
facets = []
for i in range(2 + nb_points, len(output)):
if output[i] != '':
tmp = output[i].strip().split(" ")
facets.append([int(tmp[j]) for j in range(1, len(tmp))])
for i in range(len(facets)):
for line in itertools.combinations(facets[i], 2):
for j in range(len(facets)):
if i != j and line[0] in facets[j] and line[1] in facets[j]:
# check if the two facets i and j are not coplanar
vector1 = np.array(list_points[facets[j][0]]) \
- np.array(list_points[facets[j][1]])
vector2 = np.array(list_points[facets[j][0]]) \
- np.array(list_points[facets[j][2]])
n1 = np.cross(vector1, vector2)
vector1 = np.array(list_points[facets[i][0]]) \
- np.array(list_points[facets[i][1]])
vector2 = np.array(list_points[facets[i][0]]) \
- np.array(list_points[facets[i][2]])
n2 = np.cross(vector1, vector2)
dot = math.fabs(np.dot(n1, n2) / (np.linalg.norm(n1)
* np.linalg.norm(n2)))
if 1.05 > dot > 0.95:
continue
list_lines.append({'start': list_points[line[0]],
'end': list_points[line[1]]})
break
return list_lines
| mit |
sara-02/fabric8-analytics-stack-analysis | util/softnet_util.py | 1 | 2671 | """Utility functions for handling package lists, compute similarity score etc."""
import numpy as np
import pandas as pd
import analytics_platform.kronos.softnet.src.softnet_constants as softnet_constants
def generate_parent_tuple_list(node_list, edge_dict_list):
"""Generate parent tuple list."""
child_to_parent_dict = dict()
for edge_dict in edge_dict_list:
child = edge_dict[softnet_constants.EDGE_DICT_TO]
parent = edge_dict[softnet_constants.EDGE_DICT_FROM]
if child in child_to_parent_dict:
temp_parent_list = child_to_parent_dict[child]
temp_parent_list += [node_list.index(parent)]
child_to_parent_dict[child] = temp_parent_list
else:
child_to_parent_dict[child] = [node_list.index(parent)]
parent_tuple_list = list()
for node in node_list:
if node in child_to_parent_dict:
parent_tuple_list.append(tuple(child_to_parent_dict[node]))
else:
parent_tuple_list.append(())
return tuple(parent_tuple_list)
def get_similar_package_dict_list(package, package_list, package_to_topic_dict):
"""Compute the dictionary with similar packages sorted by similarity score."""
topic_list_1 = package_to_topic_dict[package]
package_score_dict_list = list()
for package_2 in package_list:
topic_list_2 = package_to_topic_dict[package_2]
actual_topic_list = [
x[len(softnet_constants.GNOSIS_PTM_TOPIC_PREFIX):] for x in topic_list_2]
similarity_score = calculate_similarity_score(topic_list_1, topic_list_2)
package_score_dict_list.append(
{softnet_constants.KD_PACKAGE_NAME: package_2,
softnet_constants.KD_SIMILARITY_SCORE: similarity_score,
softnet_constants.KD_TOPIC_LIST: actual_topic_list})
sorted_package_score_dict_list = sorted(
package_score_dict_list,
key=lambda x: x[softnet_constants.KD_SIMILARITY_SCORE],
reverse=True)
return sorted_package_score_dict_list
def calculate_similarity_score(topic_list_1, topic_list_2):
"""Calculate the similarity score between two topic lists."""
average_length = float(len(topic_list_1) + len(topic_list_2)) / 2
intersection_set = set.intersection(set(topic_list_1), set(topic_list_2))
similarity_score = float(len(intersection_set)) / average_length
return similarity_score
def create_empty_pandas_df(rowsize, columns_list):
"""Create empty Panda data frame (filled by zeros)."""
zero_data = np.zeros(shape=(rowsize, len(columns_list)), dtype=np.int8)
df = pd.DataFrame(zero_data, columns=columns_list)
return df
| gpl-3.0 |
pv/scikit-learn | examples/neighbors/plot_regression.py | 349 | 1402 | """
============================
Nearest Neighbors regression
============================
Demonstrate the resolution of a regression problem
using a k-Nearest Neighbor and the interpolation of the
target using both barycenter and constant weights.
"""
print(__doc__)
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
#
# License: BSD 3 clause (C) INRIA
###############################################################################
# Generate sample data
import numpy as np
import matplotlib.pyplot as plt
from sklearn import neighbors
np.random.seed(0)
X = np.sort(5 * np.random.rand(40, 1), axis=0)
T = np.linspace(0, 5, 500)[:, np.newaxis]
y = np.sin(X).ravel()
# Add noise to targets
y[::5] += 1 * (0.5 - np.random.rand(8))
###############################################################################
# Fit regression model
n_neighbors = 5
for i, weights in enumerate(['uniform', 'distance']):
knn = neighbors.KNeighborsRegressor(n_neighbors, weights=weights)
y_ = knn.fit(X, y).predict(T)
plt.subplot(2, 1, i + 1)
plt.scatter(X, y, c='k', label='data')
plt.plot(T, y_, c='g', label='prediction')
plt.axis('tight')
plt.legend()
plt.title("KNeighborsRegressor (k = %i, weights = '%s')" % (n_neighbors,
weights))
plt.show()
| bsd-3-clause |
valexandersaulys/airbnb_kaggle_contest | venv/lib/python3.4/site-packages/pandas/util/decorators.py | 9 | 9665 | from pandas.compat import StringIO, callable
from pandas.lib import cache_readonly
import sys
import warnings
from functools import wraps
def deprecate(name, alternative, alt_name=None):
alt_name = alt_name or alternative.__name__
def wrapper(*args, **kwargs):
warnings.warn("%s is deprecated. Use %s instead" % (name, alt_name),
FutureWarning, stacklevel=2)
return alternative(*args, **kwargs)
return wrapper
def deprecate_kwarg(old_arg_name, new_arg_name, mapping=None, stacklevel=2):
"""Decorator to deprecate a keyword argument of a function
Parameters
----------
old_arg_name : str
Name of argument in function to deprecate
new_arg_name : str
Name of prefered argument in function
mapping : dict or callable
If mapping is present, use it to translate old arguments to
new arguments. A callable must do its own value checking;
values not found in a dict will be forwarded unchanged.
Examples
--------
The following deprecates 'cols', using 'columns' instead
>>> @deprecate_kwarg(old_arg_name='cols', new_arg_name='columns')
... def f(columns=''):
... print(columns)
...
>>> f(columns='should work ok')
should work ok
>>> f(cols='should raise warning')
FutureWarning: cols is deprecated, use columns instead
warnings.warn(msg, FutureWarning)
should raise warning
>>> f(cols='should error', columns="can\'t pass do both")
TypeError: Can only specify 'cols' or 'columns', not both
>>> @deprecate_kwarg('old', 'new', {'yes': True, 'no': False})
... def f(new=False):
... print('yes!' if new else 'no!')
...
>>> f(old='yes')
FutureWarning: old='yes' is deprecated, use new=True instead
warnings.warn(msg, FutureWarning)
yes!
"""
if mapping is not None and not hasattr(mapping, 'get') and \
not callable(mapping):
raise TypeError("mapping from old to new argument values "
"must be dict or callable!")
def _deprecate_kwarg(func):
@wraps(func)
def wrapper(*args, **kwargs):
old_arg_value = kwargs.pop(old_arg_name, None)
if old_arg_value is not None:
if mapping is not None:
if hasattr(mapping, 'get'):
new_arg_value = mapping.get(old_arg_value,
old_arg_value)
else:
new_arg_value = mapping(old_arg_value)
msg = "the %s=%r keyword is deprecated, " \
"use %s=%r instead" % \
(old_arg_name, old_arg_value,
new_arg_name, new_arg_value)
else:
new_arg_value = old_arg_value
msg = "the '%s' keyword is deprecated, " \
"use '%s' instead" % (old_arg_name, new_arg_name)
warnings.warn(msg, FutureWarning, stacklevel=stacklevel)
if kwargs.get(new_arg_name, None) is not None:
msg = "Can only specify '%s' or '%s', not both" % \
(old_arg_name, new_arg_name)
raise TypeError(msg)
else:
kwargs[new_arg_name] = new_arg_value
return func(*args, **kwargs)
return wrapper
return _deprecate_kwarg
# Substitution and Appender are derived from matplotlib.docstring (1.1.0)
# module http://matplotlib.sourceforge.net/users/license.html
class Substitution(object):
"""
A decorator to take a function's docstring and perform string
substitution on it.
This decorator should be robust even if func.__doc__ is None
(for example, if -OO was passed to the interpreter)
Usage: construct a docstring.Substitution with a sequence or
dictionary suitable for performing substitution; then
decorate a suitable function with the constructed object. e.g.
sub_author_name = Substitution(author='Jason')
@sub_author_name
def some_function(x):
"%(author)s wrote this function"
# note that some_function.__doc__ is now "Jason wrote this function"
One can also use positional arguments.
sub_first_last_names = Substitution('Edgar Allen', 'Poe')
@sub_first_last_names
def some_function(x):
"%s %s wrote the Raven"
"""
def __init__(self, *args, **kwargs):
if (args and kwargs):
raise AssertionError( "Only positional or keyword args are allowed")
self.params = args or kwargs
def __call__(self, func):
func.__doc__ = func.__doc__ and func.__doc__ % self.params
return func
def update(self, *args, **kwargs):
"Assume self.params is a dict and update it with supplied args"
self.params.update(*args, **kwargs)
@classmethod
def from_params(cls, params):
"""
In the case where the params is a mutable sequence (list or dictionary)
and it may change before this class is called, one may explicitly use a
reference to the params rather than using *args or **kwargs which will
copy the values and not reference them.
"""
result = cls()
result.params = params
return result
class Appender(object):
"""
A function decorator that will append an addendum to the docstring
of the target function.
This decorator should be robust even if func.__doc__ is None
(for example, if -OO was passed to the interpreter).
Usage: construct a docstring.Appender with a string to be joined to
the original docstring. An optional 'join' parameter may be supplied
which will be used to join the docstring and addendum. e.g.
add_copyright = Appender("Copyright (c) 2009", join='\n')
@add_copyright
def my_dog(has='fleas'):
"This docstring will have a copyright below"
pass
"""
def __init__(self, addendum, join='', indents=0):
if indents > 0:
self.addendum = indent(addendum, indents=indents)
else:
self.addendum = addendum
self.join = join
def __call__(self, func):
func.__doc__ = func.__doc__ if func.__doc__ else ''
self.addendum = self.addendum if self.addendum else ''
docitems = [func.__doc__, self.addendum]
func.__doc__ = self.join.join(docitems)
return func
def indent(text, indents=1):
if not text or not isinstance(text, str):
return ''
jointext = ''.join(['\n'] + [' '] * indents)
return jointext.join(text.split('\n'))
def suppress_stdout(f):
def wrapped(*args, **kwargs):
try:
sys.stdout = StringIO()
f(*args, **kwargs)
finally:
sys.stdout = sys.__stdout__
return wrapped
class KnownFailureTest(Exception):
'''Raise this exception to mark a test as a known failing test.'''
pass
def knownfailureif(fail_condition, msg=None):
"""
Make function raise KnownFailureTest exception if given condition is true.
If the condition is a callable, it is used at runtime to dynamically
make the decision. This is useful for tests that may require costly
imports, to delay the cost until the test suite is actually executed.
Parameters
----------
fail_condition : bool or callable
Flag to determine whether to mark the decorated test as a known
failure (if True) or not (if False).
msg : str, optional
Message to give on raising a KnownFailureTest exception.
Default is None.
Returns
-------
decorator : function
Decorator, which, when applied to a function, causes SkipTest
to be raised when `skip_condition` is True, and the function
to be called normally otherwise.
Notes
-----
The decorator itself is decorated with the ``nose.tools.make_decorator``
function in order to transmit function name, and various other metadata.
"""
if msg is None:
msg = 'Test skipped due to known failure'
# Allow for both boolean or callable known failure conditions.
if callable(fail_condition):
fail_val = fail_condition
else:
fail_val = lambda: fail_condition
def knownfail_decorator(f):
# Local import to avoid a hard nose dependency and only incur the
# import time overhead at actual test-time.
import nose
def knownfailer(*args, **kwargs):
if fail_val():
raise KnownFailureTest(msg)
else:
return f(*args, **kwargs)
return nose.tools.make_decorator(f)(knownfailer)
return knownfail_decorator
def make_signature(func) :
"""
Returns a string repr of the arg list of a func call, with any defaults
Examples
--------
>>> def f(a,b,c=2) :
>>> return a*b*c
>>> print(_make_signature(f))
a,b,c=2
"""
from inspect import getargspec
spec = getargspec(func)
if spec.defaults == None :
n_wo_defaults = len(spec.args)
defaults = ('',) * n_wo_defaults
else :
n_wo_defaults = len(spec.args) - len(spec.defaults)
defaults = ('',) * n_wo_defaults + spec.defaults
args = []
for i, (var, default) in enumerate(zip(spec.args, defaults)) :
args.append(var if default=='' else var+'='+repr(default))
if spec.varargs:
args.append('*' + spec.varargs)
if spec.keywords:
args.append('**' + spec.keywords)
return args, spec.args
| gpl-2.0 |
neilhan/tensorflow | tensorflow/examples/skflow/mnist_rnn.py | 14 | 2812 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This example builds rnn network for mnist data.
Borrowed structure from here: https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/3%20-%20Neural%20Networks/recurrent_network.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sklearn import metrics, preprocessing
import tensorflow as tf
from tensorflow.contrib import learn
# Parameters
learning_rate = 0.1
training_steps = 3000
batch_size = 128
# Network Parameters
n_input = 28 # MNIST data input (img shape: 28*28)
n_steps = 28 # timesteps
n_hidden = 128 # hidden layer num of features
n_classes = 10 # MNIST total classes (0-9 digits)
### Download and load MNIST data.
mnist = learn.datasets.load_dataset('mnist')
X_train = mnist.train.images
y_train = mnist.train.labels
X_test = mnist.test.images
y_test = mnist.test.labels
# It's useful to scale to ensure Stochastic Gradient Descent will do the right thing
scaler = preprocessing.StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.fit_transform(X_test)
def rnn_model(X, y):
X = tf.reshape(X, [-1, n_steps, n_input]) # (batch_size, n_steps, n_input)
# # permute n_steps and batch_size
X = tf.transpose(X, [1, 0, 2])
# # Reshape to prepare input to hidden activation
X = tf.reshape(X, [-1, n_input]) # (n_steps*batch_size, n_input)
# # Split data because rnn cell needs a list of inputs for the RNN inner loop
X = tf.split(0, n_steps, X) # n_steps * (batch_size, n_input)
# Define a GRU cell with tensorflow
lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(n_hidden)
# Get lstm cell output
_, encoding = tf.nn.rnn(lstm_cell, X, dtype=tf.float32)
return learn.models.logistic_regression(encoding, y)
classifier = learn.TensorFlowEstimator(model_fn=rnn_model, n_classes=n_classes,
batch_size=batch_size,
steps=training_steps,
learning_rate=learning_rate)
classifier.fit(X_train, y_train, logdir="/tmp/mnist_rnn")
score = metrics.accuracy_score(y_test, classifier.predict(X_test))
print('Accuracy: {0:f}'.format(score))
| apache-2.0 |
xuleiboy1234/autoTitle | tensorflow/tensorflow/contrib/learn/__init__.py | 42 | 2596 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# TODO(ptucker,ipolosukhin): Improve descriptions.
"""High level API for learning.
See the @{$python/contrib.learn} guide.
@@BaseEstimator
@@Estimator
@@Trainable
@@Evaluable
@@KMeansClustering
@@ModeKeys
@@ModelFnOps
@@MetricSpec
@@PredictionKey
@@DNNClassifier
@@DNNEstimator
@@DNNRegressor
@@DNNLinearCombinedRegressor
@@DNNLinearCombinedEstimator
@@DNNLinearCombinedClassifier
@@DynamicRnnEstimator
@@LinearClassifier
@@LinearEstimator
@@LinearRegressor
@@LogisticRegressor
@@StateSavingRnnEstimator
@@SVM
@@SKCompat
@@Head
@@multi_class_head
@@multi_label_head
@@binary_svm_head
@@regression_head
@@poisson_regression_head
@@multi_head
@@no_op_train_fn
@@Experiment
@@ExportStrategy
@@TaskType
@@NanLossDuringTrainingError
@@RunConfig
@@evaluate
@@infer
@@run_feeds
@@run_n
@@train
@@extract_dask_data
@@extract_dask_labels
@@extract_pandas_data
@@extract_pandas_labels
@@extract_pandas_matrix
@@infer_real_valued_columns_from_input
@@infer_real_valued_columns_from_input_fn
@@read_batch_examples
@@read_batch_features
@@read_batch_record_features
@@read_keyed_batch_examples
@@read_keyed_batch_examples_shared_queue
@@read_keyed_batch_features
@@read_keyed_batch_features_shared_queue
@@InputFnOps
@@ProblemType
@@build_parsing_serving_input_fn
@@make_export_strategy
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=wildcard-import
from tensorflow.contrib.learn.python.learn import *
# pylint: enable=wildcard-import
from tensorflow.contrib.learn.python.learn import learn_runner_lib as learn_runner
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = ['datasets', 'head', 'io', 'learn_runner', 'models',
'monitors', 'NotFittedError', 'ops', 'preprocessing',
'utils', 'graph_actions']
remove_undocumented(__name__, _allowed_symbols)
| mit |
fabioticconi/scikit-learn | sklearn/metrics/cluster/tests/test_supervised.py | 5 | 8869 | import numpy as np
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.metrics.cluster import homogeneity_score
from sklearn.metrics.cluster import completeness_score
from sklearn.metrics.cluster import v_measure_score
from sklearn.metrics.cluster import homogeneity_completeness_v_measure
from sklearn.metrics.cluster import adjusted_mutual_info_score
from sklearn.metrics.cluster import normalized_mutual_info_score
from sklearn.metrics.cluster import mutual_info_score
from sklearn.metrics.cluster import expected_mutual_information
from sklearn.metrics.cluster import contingency_matrix
from sklearn.metrics.cluster import entropy
from sklearn.utils.testing import assert_raise_message
from nose.tools import assert_almost_equal
from nose.tools import assert_equal
from numpy.testing import assert_array_almost_equal
score_funcs = [
adjusted_rand_score,
homogeneity_score,
completeness_score,
v_measure_score,
adjusted_mutual_info_score,
normalized_mutual_info_score,
]
def test_error_messages_on_wrong_input():
for score_func in score_funcs:
expected = ('labels_true and labels_pred must have same size,'
' got 2 and 3')
assert_raise_message(ValueError, expected, score_func,
[0, 1], [1, 1, 1])
expected = "labels_true must be 1D: shape is (2"
assert_raise_message(ValueError, expected, score_func,
[[0, 1], [1, 0]], [1, 1, 1])
expected = "labels_pred must be 1D: shape is (2"
assert_raise_message(ValueError, expected, score_func,
[0, 1, 0], [[1, 1], [0, 0]])
def test_perfect_matches():
for score_func in score_funcs:
assert_equal(score_func([], []), 1.0)
assert_equal(score_func([0], [1]), 1.0)
assert_equal(score_func([0, 0, 0], [0, 0, 0]), 1.0)
assert_equal(score_func([0, 1, 0], [42, 7, 42]), 1.0)
assert_equal(score_func([0., 1., 0.], [42., 7., 42.]), 1.0)
assert_equal(score_func([0., 1., 2.], [42., 7., 2.]), 1.0)
assert_equal(score_func([0, 1, 2], [42, 7, 2]), 1.0)
def test_homogeneous_but_not_complete_labeling():
# homogeneous but not complete clustering
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 2, 2])
assert_almost_equal(h, 1.00, 2)
assert_almost_equal(c, 0.69, 2)
assert_almost_equal(v, 0.81, 2)
def test_complete_but_not_homogeneous_labeling():
# complete but not homogeneous clustering
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 1, 1, 2, 2],
[0, 0, 1, 1, 1, 1])
assert_almost_equal(h, 0.58, 2)
assert_almost_equal(c, 1.00, 2)
assert_almost_equal(v, 0.73, 2)
def test_not_complete_and_not_homogeneous_labeling():
# neither complete nor homogeneous but not so bad either
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 1, 1, 1],
[0, 1, 0, 1, 2, 2])
assert_almost_equal(h, 0.67, 2)
assert_almost_equal(c, 0.42, 2)
assert_almost_equal(v, 0.52, 2)
def test_non_consicutive_labels():
# regression tests for labels with gaps
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 2, 2, 2],
[0, 1, 0, 1, 2, 2])
assert_almost_equal(h, 0.67, 2)
assert_almost_equal(c, 0.42, 2)
assert_almost_equal(v, 0.52, 2)
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 1, 1, 1],
[0, 4, 0, 4, 2, 2])
assert_almost_equal(h, 0.67, 2)
assert_almost_equal(c, 0.42, 2)
assert_almost_equal(v, 0.52, 2)
ari_1 = adjusted_rand_score([0, 0, 0, 1, 1, 1], [0, 1, 0, 1, 2, 2])
ari_2 = adjusted_rand_score([0, 0, 0, 1, 1, 1], [0, 4, 0, 4, 2, 2])
assert_almost_equal(ari_1, 0.24, 2)
assert_almost_equal(ari_2, 0.24, 2)
def uniform_labelings_scores(score_func, n_samples, k_range, n_runs=10,
seed=42):
# Compute score for random uniform cluster labelings
random_labels = np.random.RandomState(seed).randint
scores = np.zeros((len(k_range), n_runs))
for i, k in enumerate(k_range):
for j in range(n_runs):
labels_a = random_labels(low=0, high=k, size=n_samples)
labels_b = random_labels(low=0, high=k, size=n_samples)
scores[i, j] = score_func(labels_a, labels_b)
return scores
def test_adjustment_for_chance():
# Check that adjusted scores are almost zero on random labels
n_clusters_range = [2, 10, 50, 90]
n_samples = 100
n_runs = 10
scores = uniform_labelings_scores(
adjusted_rand_score, n_samples, n_clusters_range, n_runs)
max_abs_scores = np.abs(scores).max(axis=1)
assert_array_almost_equal(max_abs_scores, [0.02, 0.03, 0.03, 0.02], 2)
def test_adjusted_mutual_info_score():
# Compute the Adjusted Mutual Information and test against known values
labels_a = np.array([1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3])
labels_b = np.array([1, 1, 1, 1, 2, 1, 2, 2, 2, 2, 3, 1, 3, 3, 3, 2, 2])
# Mutual information
mi = mutual_info_score(labels_a, labels_b)
assert_almost_equal(mi, 0.41022, 5)
# Expected mutual information
C = contingency_matrix(labels_a, labels_b)
n_samples = np.sum(C)
emi = expected_mutual_information(C, n_samples)
assert_almost_equal(emi, 0.15042, 5)
# Adjusted mutual information
ami = adjusted_mutual_info_score(labels_a, labels_b)
assert_almost_equal(ami, 0.27502, 5)
ami = adjusted_mutual_info_score([1, 1, 2, 2], [2, 2, 3, 3])
assert_equal(ami, 1.0)
# Test with a very large array
a110 = np.array([list(labels_a) * 110]).flatten()
b110 = np.array([list(labels_b) * 110]).flatten()
ami = adjusted_mutual_info_score(a110, b110)
# This is not accurate to more than 2 places
assert_almost_equal(ami, 0.37, 2)
def test_entropy():
ent = entropy([0, 0, 42.])
assert_almost_equal(ent, 0.6365141, 5)
assert_almost_equal(entropy([]), 1)
def test_contingency_matrix():
labels_a = np.array([1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3])
labels_b = np.array([1, 1, 1, 1, 2, 1, 2, 2, 2, 2, 3, 1, 3, 3, 3, 2, 2])
C = contingency_matrix(labels_a, labels_b)
C2 = np.histogram2d(labels_a, labels_b,
bins=(np.arange(1, 5),
np.arange(1, 5)))[0]
assert_array_almost_equal(C, C2)
C = contingency_matrix(labels_a, labels_b, eps=.1)
assert_array_almost_equal(C, C2 + .1)
def test_exactly_zero_info_score():
# Check numerical stability when information is exactly zero
for i in np.logspace(1, 4, 4).astype(np.int):
labels_a, labels_b = np.ones(i, dtype=np.int),\
np.arange(i, dtype=np.int)
assert_equal(normalized_mutual_info_score(labels_a, labels_b,
max_n_classes=1e4), 0.0)
assert_equal(v_measure_score(labels_a, labels_b,
max_n_classes=1e4), 0.0)
assert_equal(adjusted_mutual_info_score(labels_a, labels_b,
max_n_classes=1e4), 0.0)
assert_equal(normalized_mutual_info_score(labels_a, labels_b,
max_n_classes=1e4), 0.0)
def test_v_measure_and_mutual_information(seed=36):
# Check relation between v_measure, entropy and mutual information
for i in np.logspace(1, 4, 4).astype(np.int):
random_state = np.random.RandomState(seed)
labels_a, labels_b = random_state.randint(0, 10, i),\
random_state.randint(0, 10, i)
assert_almost_equal(v_measure_score(labels_a, labels_b),
2.0 * mutual_info_score(labels_a, labels_b) /
(entropy(labels_a) + entropy(labels_b)), 0)
def test_max_n_classes():
rng = np.random.RandomState(seed=0)
labels_true = rng.rand(53)
labels_pred = rng.rand(53)
labels_zero = np.zeros(53)
labels_true[:2] = 0
labels_zero[:3] = 1
labels_pred[:2] = 0
for score_func in score_funcs:
expected = ("Too many classes for a clustering metric. If you "
"want to increase the limit, pass parameter "
"max_n_classes to the scoring function")
assert_raise_message(ValueError, expected, score_func,
labels_true, labels_pred,
max_n_classes=50)
expected = ("Too many clusters for a clustering metric. If you "
"want to increase the limit, pass parameter "
"max_n_classes to the scoring function")
assert_raise_message(ValueError, expected, score_func,
labels_zero, labels_pred,
max_n_classes=50)
| bsd-3-clause |
uglyboxer/linear_neuron | net-p3/lib/python3.5/site-packages/matplotlib/finance.py | 10 | 51311 | """
A collection of functions for collecting, analyzing and plotting
financial data. User contributions welcome!
This module is deprecated in 1.4 and will be moved to `mpl_toolkits`
or it's own project in the future.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import xrange, zip
import contextlib
import os
import sys
import warnings
if six.PY3:
from urllib.request import urlopen
else:
from urllib2 import urlopen
if six.PY3:
import hashlib
md5 = lambda x: hashlib.md5(x.encode())
else:
from hashlib import md5
import datetime
import numpy as np
from matplotlib import verbose, get_cachedir
from matplotlib.dates import date2num
from matplotlib.cbook import iterable, mkdirs
from matplotlib.collections import LineCollection, PolyCollection
from matplotlib.colors import colorConverter
from matplotlib.lines import Line2D, TICKLEFT, TICKRIGHT
from matplotlib.patches import Rectangle
from matplotlib.transforms import Affine2D
from matplotlib.cbook import mplDeprecation
cachedir = get_cachedir()
# cachedir will be None if there is no writable directory.
if cachedir is not None:
cachedir = os.path.join(cachedir, 'finance.cache')
else:
# Should only happen in a restricted environment (such as Google App
# Engine). Deal with this gracefully by not caching finance data.
cachedir = None
stock_dt_ohlc = np.dtype([
(str('date'), object),
(str('year'), np.int16),
(str('month'), np.int8),
(str('day'), np.int8),
(str('d'), np.float), # mpl datenum
(str('open'), np.float),
(str('high'), np.float),
(str('low'), np.float),
(str('close'), np.float),
(str('volume'), np.float),
(str('aclose'), np.float)])
stock_dt_ochl = np.dtype(
[(str('date'), object),
(str('year'), np.int16),
(str('month'), np.int8),
(str('day'), np.int8),
(str('d'), np.float), # mpl datenum
(str('open'), np.float),
(str('close'), np.float),
(str('high'), np.float),
(str('low'), np.float),
(str('volume'), np.float),
(str('aclose'), np.float)])
_warn_str = ("This function has been deprecated in 1.4 in favor "
"of `{fun}_ochl`, "
"which maintains the original argument order, "
"or `{fun}_ohlc`, "
"which uses the open-high-low-close order. "
"This function will be removed in 1.5")
def parse_yahoo_historical_ochl(fh, adjusted=True, asobject=False):
"""Parse the historical data in file handle fh from yahoo finance.
Parameters
----------
adjusted : bool
If True (default) replace open, close, high, low prices with
their adjusted values. The adjustment is by a scale factor, S =
adjusted_close/close. Adjusted prices are actual prices
multiplied by S.
Volume is not adjusted as it is already backward split adjusted
by Yahoo. If you want to compute dollars traded, multiply volume
by the adjusted close, regardless of whether you choose adjusted
= True|False.
asobject : bool or None
If False (default for compatibility with earlier versions)
return a list of tuples containing
d, open, close, high, low, volume
If None (preferred alternative to False), return
a 2-D ndarray corresponding to the list of tuples.
Otherwise return a numpy recarray with
date, year, month, day, d, open, close, high, low,
volume, adjusted_close
where d is a floating poing representation of date,
as returned by date2num, and date is a python standard
library datetime.date instance.
The name of this kwarg is a historical artifact. Formerly,
True returned a cbook Bunch
holding 1-D ndarrays. The behavior of a numpy recarray is
very similar to the Bunch.
"""
return _parse_yahoo_historical(fh, adjusted=adjusted, asobject=asobject,
ochl=True)
def parse_yahoo_historical_ohlc(fh, adjusted=True, asobject=False):
"""Parse the historical data in file handle fh from yahoo finance.
Parameters
----------
adjusted : bool
If True (default) replace open, high, low, close prices with
their adjusted values. The adjustment is by a scale factor, S =
adjusted_close/close. Adjusted prices are actual prices
multiplied by S.
Volume is not adjusted as it is already backward split adjusted
by Yahoo. If you want to compute dollars traded, multiply volume
by the adjusted close, regardless of whether you choose adjusted
= True|False.
asobject : bool or None
If False (default for compatibility with earlier versions)
return a list of tuples containing
d, open, high, low, close, volume
If None (preferred alternative to False), return
a 2-D ndarray corresponding to the list of tuples.
Otherwise return a numpy recarray with
date, year, month, day, d, open, high, low, close,
volume, adjusted_close
where d is a floating poing representation of date,
as returned by date2num, and date is a python standard
library datetime.date instance.
The name of this kwarg is a historical artifact. Formerly,
True returned a cbook Bunch
holding 1-D ndarrays. The behavior of a numpy recarray is
very similar to the Bunch.
"""
return _parse_yahoo_historical(fh, adjusted=adjusted, asobject=asobject,
ochl=False)
def parse_yahoo_historical(fh, adjusted=True, asobject=False):
"""Parse the historical data in file handle fh from yahoo finance.
This function has been deprecated in 1.4 in favor of
`parse_yahoo_historical_ochl`, which maintains the original argument
order, or `parse_yahoo_historical_ohlc`, which uses the
open-high-low-close order. This function will be removed in 1.5
Parameters
----------
adjusted : bool
If True (default) replace open, close, high, low prices with
their adjusted values. The adjustment is by a scale factor, S =
adjusted_close/close. Adjusted prices are actual prices
multiplied by S.
Volume is not adjusted as it is already backward split adjusted
by Yahoo. If you want to compute dollars traded, multiply volume
by the adjusted close, regardless of whether you choose adjusted
= True|False.
asobject : bool or None
If False (default for compatibility with earlier versions)
return a list of tuples containing
d, open, close, high, low, volume
If None (preferred alternative to False), return
a 2-D ndarray corresponding to the list of tuples.
Otherwise return a numpy recarray with
date, year, month, day, d, open, close, high, low,
volume, adjusted_close
where d is a floating poing representation of date,
as returned by date2num, and date is a python standard
library datetime.date instance.
The name of this kwarg is a historical artifact. Formerly,
True returned a cbook Bunch
holding 1-D ndarrays. The behavior of a numpy recarray is
very similar to the Bunch.
ochl : bool
Temporary argument to select between ochl and ohlc ordering.
Defaults to True to preserve original functionality.
"""
warnings.warn(_warn_str.format(fun='parse_yahoo_historical'),
mplDeprecation)
return _parse_yahoo_historical(fh, adjusted=adjusted, asobject=asobject,
ochl=True)
def _parse_yahoo_historical(fh, adjusted=True, asobject=False,
ochl=True):
"""Parse the historical data in file handle fh from yahoo finance.
Parameters
----------
adjusted : bool
If True (default) replace open, high, low, close prices with
their adjusted values. The adjustment is by a scale factor, S =
adjusted_close/close. Adjusted prices are actual prices
multiplied by S.
Volume is not adjusted as it is already backward split adjusted
by Yahoo. If you want to compute dollars traded, multiply volume
by the adjusted close, regardless of whether you choose adjusted
= True|False.
asobject : bool or None
If False (default for compatibility with earlier versions)
return a list of tuples containing
d, open, high, low, close, volume
or
d, open, close, high, low, volume
depending on `ochl`
If None (preferred alternative to False), return
a 2-D ndarray corresponding to the list of tuples.
Otherwise return a numpy recarray with
date, year, month, day, d, open, high, low, close,
volume, adjusted_close
where d is a floating poing representation of date,
as returned by date2num, and date is a python standard
library datetime.date instance.
The name of this kwarg is a historical artifact. Formerly,
True returned a cbook Bunch
holding 1-D ndarrays. The behavior of a numpy recarray is
very similar to the Bunch.
ochl : bool
Selects between ochl and ohlc ordering.
Defaults to True to preserve original functionality.
"""
if ochl:
stock_dt = stock_dt_ochl
else:
stock_dt = stock_dt_ohlc
results = []
# datefmt = '%Y-%m-%d'
fh.readline() # discard heading
for line in fh:
vals = line.split(',')
if len(vals) != 7:
continue # add warning?
datestr = vals[0]
#dt = datetime.date(*time.strptime(datestr, datefmt)[:3])
# Using strptime doubles the runtime. With the present
# format, we don't need it.
dt = datetime.date(*[int(val) for val in datestr.split('-')])
dnum = date2num(dt)
open, high, low, close = [float(val) for val in vals[1:5]]
volume = float(vals[5])
aclose = float(vals[6])
if ochl:
results.append((dt, dt.year, dt.month, dt.day,
dnum, open, close, high, low, volume, aclose))
else:
results.append((dt, dt.year, dt.month, dt.day,
dnum, open, high, low, close, volume, aclose))
results.reverse()
d = np.array(results, dtype=stock_dt)
if adjusted:
scale = d['aclose'] / d['close']
scale[np.isinf(scale)] = np.nan
d['open'] *= scale
d['high'] *= scale
d['low'] *= scale
d['close'] *= scale
if not asobject:
# 2-D sequence; formerly list of tuples, now ndarray
ret = np.zeros((len(d), 6), dtype=np.float)
ret[:, 0] = d['d']
if ochl:
ret[:, 1] = d['open']
ret[:, 2] = d['close']
ret[:, 3] = d['high']
ret[:, 4] = d['low']
else:
ret[:, 1] = d['open']
ret[:, 2] = d['high']
ret[:, 3] = d['low']
ret[:, 4] = d['close']
ret[:, 5] = d['volume']
if asobject is None:
return ret
return [tuple(row) for row in ret]
return d.view(np.recarray) # Close enough to former Bunch return
def fetch_historical_yahoo(ticker, date1, date2, cachename=None,
dividends=False):
"""
Fetch historical data for ticker between date1 and date2. date1 and
date2 are date or datetime instances, or (year, month, day) sequences.
Parameters
----------
ticker : str
ticker
date1 : sequence of form (year, month, day), `datetime`, or `date`
start date
date2 : sequence of form (year, month, day), `datetime`, or `date`
end date
cachename : str
cachename is the name of the local file cache. If None, will
default to the md5 hash or the url (which incorporates the ticker
and date range)
dividends : bool
set dividends=True to return dividends instead of price data. With
this option set, parse functions will not work
Returns
-------
file_handle : file handle
a file handle is returned
Examples
--------
>>> fh = fetch_historical_yahoo('^GSPC', (2000, 1, 1), (2001, 12, 31))
"""
ticker = ticker.upper()
if iterable(date1):
d1 = (date1[1] - 1, date1[2], date1[0])
else:
d1 = (date1.month - 1, date1.day, date1.year)
if iterable(date2):
d2 = (date2[1] - 1, date2[2], date2[0])
else:
d2 = (date2.month - 1, date2.day, date2.year)
if dividends:
g = 'v'
verbose.report('Retrieving dividends instead of prices')
else:
g = 'd'
urlFmt = ('http://ichart.yahoo.com/table.csv?a=%d&b=%d&' +
'c=%d&d=%d&e=%d&f=%d&s=%s&y=0&g=%s&ignore=.csv')
url = urlFmt % (d1[0], d1[1], d1[2],
d2[0], d2[1], d2[2], ticker, g)
# Cache the finance data if cachename is supplied, or there is a writable
# cache directory.
if cachename is None and cachedir is not None:
cachename = os.path.join(cachedir, md5(url).hexdigest())
if cachename is not None:
if os.path.exists(cachename):
fh = open(cachename)
verbose.report('Using cachefile %s for '
'%s' % (cachename, ticker))
else:
mkdirs(os.path.abspath(os.path.dirname(cachename)))
with contextlib.closing(urlopen(url)) as urlfh:
with open(cachename, 'wb') as fh:
fh.write(urlfh.read())
verbose.report('Saved %s data to cache file '
'%s' % (ticker, cachename))
fh = open(cachename, 'r')
return fh
else:
return urlopen(url)
def quotes_historical_yahoo(ticker, date1, date2, asobject=False,
adjusted=True, cachename=None):
""" Get historical data for ticker between date1 and date2.
This function has been deprecated in 1.4 in favor of
`quotes_yahoo_historical_ochl`, which maintains the original argument
order, or `quotes_yahoo_historical_ohlc`, which uses the
open-high-low-close order. This function will be removed in 1.5
See :func:`parse_yahoo_historical` for explanation of output formats
and the *asobject* and *adjusted* kwargs.
Parameters
----------
ticker : str
stock ticker
date1 : sequence of form (year, month, day), `datetime`, or `date`
start date
date2 : sequence of form (year, month, day), `datetime`, or `date`
end date
cachename : str or `None`
is the name of the local file cache. If None, will
default to the md5 hash or the url (which incorporates the ticker
and date range)
Examples
--------
>>> sp = f.quotes_historical_yahoo('^GSPC', d1, d2,
asobject=True, adjusted=True)
>>> returns = (sp.open[1:] - sp.open[:-1])/sp.open[1:]
>>> [n,bins,patches] = hist(returns, 100)
>>> mu = mean(returns)
>>> sigma = std(returns)
>>> x = normpdf(bins, mu, sigma)
>>> plot(bins, x, color='red', lw=2)
"""
warnings.warn(_warn_str.format(fun='quotes_historical_yahoo'),
mplDeprecation)
return _quotes_historical_yahoo(ticker, date1, date2, asobject=asobject,
adjusted=adjusted, cachename=cachename,
ochl=True)
def quotes_historical_yahoo_ochl(ticker, date1, date2, asobject=False,
adjusted=True, cachename=None):
""" Get historical data for ticker between date1 and date2.
See :func:`parse_yahoo_historical` for explanation of output formats
and the *asobject* and *adjusted* kwargs.
Parameters
----------
ticker : str
stock ticker
date1 : sequence of form (year, month, day), `datetime`, or `date`
start date
date2 : sequence of form (year, month, day), `datetime`, or `date`
end date
cachename : str or `None`
is the name of the local file cache. If None, will
default to the md5 hash or the url (which incorporates the ticker
and date range)
Examples
--------
>>> sp = f.quotes_historical_yahoo_ochl('^GSPC', d1, d2,
asobject=True, adjusted=True)
>>> returns = (sp.open[1:] - sp.open[:-1])/sp.open[1:]
>>> [n,bins,patches] = hist(returns, 100)
>>> mu = mean(returns)
>>> sigma = std(returns)
>>> x = normpdf(bins, mu, sigma)
>>> plot(bins, x, color='red', lw=2)
"""
return _quotes_historical_yahoo(ticker, date1, date2, asobject=asobject,
adjusted=adjusted, cachename=cachename,
ochl=True)
def quotes_historical_yahoo_ohlc(ticker, date1, date2, asobject=False,
adjusted=True, cachename=None):
""" Get historical data for ticker between date1 and date2.
See :func:`parse_yahoo_historical` for explanation of output formats
and the *asobject* and *adjusted* kwargs.
Parameters
----------
ticker : str
stock ticker
date1 : sequence of form (year, month, day), `datetime`, or `date`
start date
date2 : sequence of form (year, month, day), `datetime`, or `date`
end date
cachename : str or `None`
is the name of the local file cache. If None, will
default to the md5 hash or the url (which incorporates the ticker
and date range)
Examples
--------
>>> sp = f.quotes_historical_yahoo_ohlc('^GSPC', d1, d2,
asobject=True, adjusted=True)
>>> returns = (sp.open[1:] - sp.open[:-1])/sp.open[1:]
>>> [n,bins,patches] = hist(returns, 100)
>>> mu = mean(returns)
>>> sigma = std(returns)
>>> x = normpdf(bins, mu, sigma)
>>> plot(bins, x, color='red', lw=2)
"""
return _quotes_historical_yahoo(ticker, date1, date2, asobject=asobject,
adjusted=adjusted, cachename=cachename,
ochl=False)
def _quotes_historical_yahoo(ticker, date1, date2, asobject=False,
adjusted=True, cachename=None,
ochl=True):
""" Get historical data for ticker between date1 and date2.
See :func:`parse_yahoo_historical` for explanation of output formats
and the *asobject* and *adjusted* kwargs.
Parameters
----------
ticker : str
stock ticker
date1 : sequence of form (year, month, day), `datetime`, or `date`
start date
date2 : sequence of form (year, month, day), `datetime`, or `date`
end date
cachename : str or `None`
is the name of the local file cache. If None, will
default to the md5 hash or the url (which incorporates the ticker
and date range)
ochl: bool
temporary argument to select between ochl and ohlc ordering
Examples
--------
>>> sp = f.quotes_historical_yahoo('^GSPC', d1, d2,
asobject=True, adjusted=True)
>>> returns = (sp.open[1:] - sp.open[:-1])/sp.open[1:]
>>> [n,bins,patches] = hist(returns, 100)
>>> mu = mean(returns)
>>> sigma = std(returns)
>>> x = normpdf(bins, mu, sigma)
>>> plot(bins, x, color='red', lw=2)
"""
# Maybe enable a warning later as part of a slow transition
# to using None instead of False.
#if asobject is False:
# warnings.warn("Recommend changing to asobject=None")
fh = fetch_historical_yahoo(ticker, date1, date2, cachename)
try:
ret = _parse_yahoo_historical(fh, asobject=asobject,
adjusted=adjusted, ochl=ochl)
if len(ret) == 0:
return None
except IOError as exc:
warnings.warn('fh failure\n%s' % (exc.strerror[1]))
return None
return ret
def plot_day_summary(ax, quotes, ticksize=3,
colorup='k', colordown='r',
):
"""Plots day summary
Represent the time, open, close, high, low as a vertical line
ranging from low to high. The left tick is the open and the right
tick is the close.
This function has been deprecated in 1.4 in favor of
`plot_day_summary_ochl`, which maintains the original argument
order, or `plot_day_summary_ohlc`, which uses the
open-high-low-close order. This function will be removed in 1.5
Parameters
----------
ax : `Axes`
an `Axes` instance to plot to
quotes : sequence of (time, open, close, high, low, ...) sequences
data to plot. time must be in float date format - see date2num
ticksize : int
open/close tick marker in points
colorup : color
the color of the lines where close >= open
colordown : color
the color of the lines where close < open
Returns
-------
lines : list
list of tuples of the lines added (one tuple per quote)
"""
warnings.warn(_warn_str.format(fun='plot_day_summary'),
mplDeprecation)
return _plot_day_summary(ax, quotes, ticksize=ticksize,
colorup=colorup, colordown=colordown,
ochl=True)
def plot_day_summary_oclh(ax, quotes, ticksize=3,
colorup='k', colordown='r',
):
"""Plots day summary
Represent the time, open, close, high, low as a vertical line
ranging from low to high. The left tick is the open and the right
tick is the close.
Parameters
----------
ax : `Axes`
an `Axes` instance to plot to
quotes : sequence of (time, open, close, high, low, ...) sequences
data to plot. time must be in float date format - see date2num
ticksize : int
open/close tick marker in points
colorup : color
the color of the lines where close >= open
colordown : color
the color of the lines where close < open
Returns
-------
lines : list
list of tuples of the lines added (one tuple per quote)
"""
return _plot_day_summary(ax, quotes, ticksize=ticksize,
colorup=colorup, colordown=colordown,
ochl=True)
def plot_day_summary_ohlc(ax, quotes, ticksize=3,
colorup='k', colordown='r',
):
"""Plots day summary
Represent the time, open, high, low, close as a vertical line
ranging from low to high. The left tick is the open and the right
tick is the close.
Parameters
----------
ax : `Axes`
an `Axes` instance to plot to
quotes : sequence of (time, open, high, low, close, ...) sequences
data to plot. time must be in float date format - see date2num
ticksize : int
open/close tick marker in points
colorup : color
the color of the lines where close >= open
colordown : color
the color of the lines where close < open
Returns
-------
lines : list
list of tuples of the lines added (one tuple per quote)
"""
return _plot_day_summary(ax, quotes, ticksize=ticksize,
colorup=colorup, colordown=colordown,
ochl=False)
def _plot_day_summary(ax, quotes, ticksize=3,
colorup='k', colordown='r',
ochl=True
):
"""Plots day summary
Represent the time, open, high, low, close as a vertical line
ranging from low to high. The left tick is the open and the right
tick is the close.
Parameters
----------
ax : `Axes`
an `Axes` instance to plot to
quotes : sequence of quote sequences
data to plot. time must be in float date format - see date2num
(time, open, high, low, close, ...) vs
(time, open, close, high, low, ...)
set by `ochl`
ticksize : int
open/close tick marker in points
colorup : color
the color of the lines where close >= open
colordown : color
the color of the lines where close < open
ochl: bool
argument to select between ochl and ohlc ordering of quotes
Returns
-------
lines : list
list of tuples of the lines added (one tuple per quote)
"""
# unfortunately this has a different return type than plot_day_summary2_*
lines = []
for q in quotes:
if ochl:
t, open, close, high, low = q[:5]
else:
t, open, high, low, close = q[:5]
if close >= open:
color = colorup
else:
color = colordown
vline = Line2D(xdata=(t, t), ydata=(low, high),
color=color,
antialiased=False, # no need to antialias vert lines
)
oline = Line2D(xdata=(t, t), ydata=(open, open),
color=color,
antialiased=False,
marker=TICKLEFT,
markersize=ticksize,
)
cline = Line2D(xdata=(t, t), ydata=(close, close),
color=color,
antialiased=False,
markersize=ticksize,
marker=TICKRIGHT)
lines.extend((vline, oline, cline))
ax.add_line(vline)
ax.add_line(oline)
ax.add_line(cline)
ax.autoscale_view()
return lines
def candlestick(ax, quotes, width=0.2, colorup='k', colordown='r',
alpha=1.0):
"""
Plot the time, open, close, high, low as a vertical line ranging
from low to high. Use a rectangular bar to represent the
open-close span. If close >= open, use colorup to color the bar,
otherwise use colordown
This function has been deprecated in 1.4 in favor of
`candlestick_ochl`, which maintains the original argument
order, or `candlestick_ohlc`, which uses the
open-high-low-close order. This function will be removed in 1.5
Parameters
----------
ax : `Axes`
an Axes instance to plot to
quotes : sequence of (time, open, close, high, low, ...) sequences
As long as the first 5 elements are these values,
the record can be as long as you want (e.g., it may store volume).
time must be in float days format - see date2num
width : float
fraction of a day for the rectangle width
colorup : color
the color of the rectangle where close >= open
colordown : color
the color of the rectangle where close < open
alpha : float
the rectangle alpha level
Returns
-------
ret : tuple
returns (lines, patches) where lines is a list of lines
added and patches is a list of the rectangle patches added
"""
warnings.warn(_warn_str.format(fun='candlestick'),
mplDeprecation)
return _candlestick(ax, quotes, width=width, colorup=colorup,
colordown=colordown,
alpha=alpha, ochl=True)
def candlestick_ochl(ax, quotes, width=0.2, colorup='k', colordown='r',
alpha=1.0):
"""
Plot the time, open, close, high, low as a vertical line ranging
from low to high. Use a rectangular bar to represent the
open-close span. If close >= open, use colorup to color the bar,
otherwise use colordown
Parameters
----------
ax : `Axes`
an Axes instance to plot to
quotes : sequence of (time, open, close, high, low, ...) sequences
As long as the first 5 elements are these values,
the record can be as long as you want (e.g., it may store volume).
time must be in float days format - see date2num
width : float
fraction of a day for the rectangle width
colorup : color
the color of the rectangle where close >= open
colordown : color
the color of the rectangle where close < open
alpha : float
the rectangle alpha level
Returns
-------
ret : tuple
returns (lines, patches) where lines is a list of lines
added and patches is a list of the rectangle patches added
"""
return _candlestick(ax, quotes, width=width, colorup=colorup,
colordown=colordown,
alpha=alpha, ochl=True)
def candlestick_ohlc(ax, quotes, width=0.2, colorup='k', colordown='r',
alpha=1.0):
"""
Plot the time, open, high, low, close as a vertical line ranging
from low to high. Use a rectangular bar to represent the
open-close span. If close >= open, use colorup to color the bar,
otherwise use colordown
Parameters
----------
ax : `Axes`
an Axes instance to plot to
quotes : sequence of (time, open, high, low, close, ...) sequences
As long as the first 5 elements are these values,
the record can be as long as you want (e.g., it may store volume).
time must be in float days format - see date2num
width : float
fraction of a day for the rectangle width
colorup : color
the color of the rectangle where close >= open
colordown : color
the color of the rectangle where close < open
alpha : float
the rectangle alpha level
Returns
-------
ret : tuple
returns (lines, patches) where lines is a list of lines
added and patches is a list of the rectangle patches added
"""
return _candlestick(ax, quotes, width=width, colorup=colorup,
colordown=colordown,
alpha=alpha, ochl=False)
def _candlestick(ax, quotes, width=0.2, colorup='k', colordown='r',
alpha=1.0, ochl=True):
"""
Plot the time, open, high, low, close as a vertical line ranging
from low to high. Use a rectangular bar to represent the
open-close span. If close >= open, use colorup to color the bar,
otherwise use colordown
Parameters
----------
ax : `Axes`
an Axes instance to plot to
quotes : sequence of quote sequences
data to plot. time must be in float date format - see date2num
(time, open, high, low, close, ...) vs
(time, open, close, high, low, ...)
set by `ochl`
width : float
fraction of a day for the rectangle width
colorup : color
the color of the rectangle where close >= open
colordown : color
the color of the rectangle where close < open
alpha : float
the rectangle alpha level
ochl: bool
argument to select between ochl and ohlc ordering of quotes
Returns
-------
ret : tuple
returns (lines, patches) where lines is a list of lines
added and patches is a list of the rectangle patches added
"""
OFFSET = width / 2.0
lines = []
patches = []
for q in quotes:
if ochl:
t, open, close, high, low = q[:5]
else:
t, open, high, low, close = q[:5]
if close >= open:
color = colorup
lower = open
height = close - open
else:
color = colordown
lower = close
height = open - close
vline = Line2D(
xdata=(t, t), ydata=(low, high),
color=color,
linewidth=0.5,
antialiased=True,
)
rect = Rectangle(
xy=(t - OFFSET, lower),
width = width,
height = height,
facecolor = color,
edgecolor = color,
)
rect.set_alpha(alpha)
lines.append(vline)
patches.append(rect)
ax.add_line(vline)
ax.add_patch(rect)
ax.autoscale_view()
return lines, patches
def plot_day_summary2(ax, opens, closes, highs, lows, ticksize=4,
colorup='k', colordown='r',
):
"""Represent the time, open, close, high, low, as a vertical line
ranging from low to high. The left tick is the open and the right
tick is the close.
This function has been deprecated in 1.4 in favor of
`plot_day_summary2_ochl`, which maintains the original argument
order, or `plot_day_summary2_ohlc`, which uses the
open-high-low-close order. This function will be removed in 1.5
Parameters
----------
ax : `Axes`
an Axes instance to plot to
opens : sequence
sequence of opening values
closes : sequence
sequence of closing values
highs : sequence
sequence of high values
lows : sequence
sequence of low values
ticksize : int
size of open and close ticks in points
colorup : color
the color of the lines where close >= open
colordown : color
the color of the lines where close < open
Returns
-------
ret : list
a list of lines added to the axes
"""
warnings.warn(_warn_str.format(fun='plot_day_summary2'), mplDeprecation)
return plot_day_summary2_ohlc(ax, opens, highs, lows, closes, ticksize,
colorup, colordown)
def plot_day_summary2_ochl(ax, opens, closes, highs, lows, ticksize=4,
colorup='k', colordown='r',
):
"""Represent the time, open, close, high, low, as a vertical line
ranging from low to high. The left tick is the open and the right
tick is the close.
Parameters
----------
ax : `Axes`
an Axes instance to plot to
opens : sequence
sequence of opening values
closes : sequence
sequence of closing values
highs : sequence
sequence of high values
lows : sequence
sequence of low values
ticksize : int
size of open and close ticks in points
colorup : color
the color of the lines where close >= open
colordown : color
the color of the lines where close < open
Returns
-------
ret : list
a list of lines added to the axes
"""
return plot_day_summary2_ohlc(ax, opens, highs, lows, closes, ticksize,
colorup, colordown)
def plot_day_summary2_ohlc(ax, opens, highs, lows, closes, ticksize=4,
colorup='k', colordown='r',
):
"""Represent the time, open, high, low, close as a vertical line
ranging from low to high. The left tick is the open and the right
tick is the close.
Parameters
----------
ax : `Axes`
an Axes instance to plot to
opens : sequence
sequence of opening values
highs : sequence
sequence of high values
lows : sequence
sequence of low values
closes : sequence
sequence of closing values
ticksize : int
size of open and close ticks in points
colorup : color
the color of the lines where close >= open
colordown : color
the color of the lines where close < open
Returns
-------
ret : list
a list of lines added to the axes
"""
# note this code assumes if any value open, high, low, close is
# missing they all are missing
rangeSegments = [((i, low), (i, high)) for i, low, high in
zip(xrange(len(lows)), lows, highs) if low != -1]
# the ticks will be from ticksize to 0 in points at the origin and
# we'll translate these to the i, close location
openSegments = [((-ticksize, 0), (0, 0))]
# the ticks will be from 0 to ticksize in points at the origin and
# we'll translate these to the i, close location
closeSegments = [((0, 0), (ticksize, 0))]
offsetsOpen = [(i, open) for i, open in
zip(xrange(len(opens)), opens) if open != -1]
offsetsClose = [(i, close) for i, close in
zip(xrange(len(closes)), closes) if close != -1]
scale = ax.figure.dpi * (1.0 / 72.0)
tickTransform = Affine2D().scale(scale, 0.0)
r, g, b = colorConverter.to_rgb(colorup)
colorup = r, g, b, 1
r, g, b = colorConverter.to_rgb(colordown)
colordown = r, g, b, 1
colord = {True: colorup,
False: colordown,
}
colors = [colord[open < close] for open, close in
zip(opens, closes) if open != -1 and close != -1]
assert(len(rangeSegments) == len(offsetsOpen))
assert(len(offsetsOpen) == len(offsetsClose))
assert(len(offsetsClose) == len(colors))
useAA = 0, # use tuple here
lw = 1, # and here
rangeCollection = LineCollection(rangeSegments,
colors=colors,
linewidths=lw,
antialiaseds=useAA,
)
openCollection = LineCollection(openSegments,
colors=colors,
antialiaseds=useAA,
linewidths=lw,
offsets=offsetsOpen,
transOffset=ax.transData,
)
openCollection.set_transform(tickTransform)
closeCollection = LineCollection(closeSegments,
colors=colors,
antialiaseds=useAA,
linewidths=lw,
offsets=offsetsClose,
transOffset=ax.transData,
)
closeCollection.set_transform(tickTransform)
minpy, maxx = (0, len(rangeSegments))
miny = min([low for low in lows if low != -1])
maxy = max([high for high in highs if high != -1])
corners = (minpy, miny), (maxx, maxy)
ax.update_datalim(corners)
ax.autoscale_view()
# add these last
ax.add_collection(rangeCollection)
ax.add_collection(openCollection)
ax.add_collection(closeCollection)
return rangeCollection, openCollection, closeCollection
def candlestick2_ochl(ax, opens, closes, highs, lows, width=4,
colorup='k', colordown='r',
alpha=0.75,
):
"""Represent the open, close as a bar line and high low range as a
vertical line.
Preserves the original argument order.
Parameters
----------
ax : `Axes`
an Axes instance to plot to
opens : sequence
sequence of opening values
closes : sequence
sequence of closing values
highs : sequence
sequence of high values
lows : sequence
sequence of low values
ticksize : int
size of open and close ticks in points
colorup : color
the color of the lines where close >= open
colordown : color
the color of the lines where close < open
alpha : float
bar transparency
Returns
-------
ret : tuple
(lineCollection, barCollection)
"""
candlestick2_ohlc(ax, opens, highs, closes, lows, width=width,
colorup=colorup, colordown=colordown,
alpha=alpha)
def candlestick2(ax, opens, closes, highs, lows, width=4,
colorup='k', colordown='r',
alpha=0.75,
):
"""Represent the open, close as a bar line and high low range as a
vertical line.
This function has been deprecated in 1.4 in favor of
`candlestick2_ochl`, which maintains the original argument order,
or `candlestick2_ohlc`, which uses the open-high-low-close order.
This function will be removed in 1.5
Parameters
----------
ax : `Axes`
an Axes instance to plot to
opens : sequence
sequence of opening values
closes : sequence
sequence of closing values
highs : sequence
sequence of high values
lows : sequence
sequence of low values
ticksize : int
size of open and close ticks in points
colorup : color
the color of the lines where close >= open
colordown : color
the color of the lines where close < open
alpha : float
bar transparency
Returns
-------
ret : tuple
(lineCollection, barCollection)
"""
warnings.warn(_warn_str.format(fun='candlestick2'),
mplDeprecation)
candlestick2_ohlc(ax, opens, highs, lows, closes, width=width,
colorup=colorup, colordown=colordown,
alpha=alpha)
def candlestick2_ohlc(ax, opens, highs, lows, closes, width=4,
colorup='k', colordown='r',
alpha=0.75,
):
"""Represent the open, close as a bar line and high low range as a
vertical line.
Parameters
----------
ax : `Axes`
an Axes instance to plot to
opens : sequence
sequence of opening values
highs : sequence
sequence of high values
lows : sequence
sequence of low values
closes : sequence
sequence of closing values
ticksize : int
size of open and close ticks in points
colorup : color
the color of the lines where close >= open
colordown : color
the color of the lines where close < open
alpha : float
bar transparency
Returns
-------
ret : tuple
(lineCollection, barCollection)
"""
# note this code assumes if any value open, low, high, close is
# missing they all are missing
delta = width / 2.
barVerts = [((i - delta, open),
(i - delta, close),
(i + delta, close),
(i + delta, open))
for i, open, close in zip(xrange(len(opens)), opens, closes)
if open != -1 and close != -1]
rangeSegments = [((i, low), (i, high))
for i, low, high in zip(xrange(len(lows)), lows, highs)
if low != -1]
r, g, b = colorConverter.to_rgb(colorup)
colorup = r, g, b, alpha
r, g, b = colorConverter.to_rgb(colordown)
colordown = r, g, b, alpha
colord = {True: colorup,
False: colordown,
}
colors = [colord[open < close]
for open, close in zip(opens, closes)
if open != -1 and close != -1]
assert(len(barVerts) == len(rangeSegments))
useAA = 0, # use tuple here
lw = 0.5, # and here
rangeCollection = LineCollection(rangeSegments,
colors=((0, 0, 0, 1), ),
linewidths=lw,
antialiaseds = useAA,
)
barCollection = PolyCollection(barVerts,
facecolors=colors,
edgecolors=((0, 0, 0, 1), ),
antialiaseds=useAA,
linewidths=lw,
)
minx, maxx = 0, len(rangeSegments)
miny = min([low for low in lows if low != -1])
maxy = max([high for high in highs if high != -1])
corners = (minx, miny), (maxx, maxy)
ax.update_datalim(corners)
ax.autoscale_view()
# add these last
ax.add_collection(barCollection)
ax.add_collection(rangeCollection)
return rangeCollection, barCollection
def volume_overlay(ax, opens, closes, volumes,
colorup='k', colordown='r',
width=4, alpha=1.0):
"""Add a volume overlay to the current axes. The opens and closes
are used to determine the color of the bar. -1 is missing. If a
value is missing on one it must be missing on all
Parameters
----------
ax : `Axes`
an Axes instance to plot to
opens : sequence
a sequence of opens
closes : sequence
a sequence of closes
volumes : sequence
a sequence of volumes
width : int
the bar width in points
colorup : color
the color of the lines where close >= open
colordown : color
the color of the lines where close < open
alpha : float
bar transparency
Returns
-------
ret : `barCollection`
The `barrCollection` added to the axes
"""
r, g, b = colorConverter.to_rgb(colorup)
colorup = r, g, b, alpha
r, g, b = colorConverter.to_rgb(colordown)
colordown = r, g, b, alpha
colord = {True: colorup,
False: colordown,
}
colors = [colord[open < close]
for open, close in zip(opens, closes)
if open != -1 and close != -1]
delta = width / 2.
bars = [((i - delta, 0), (i - delta, v), (i + delta, v), (i + delta, 0))
for i, v in enumerate(volumes)
if v != -1]
barCollection = PolyCollection(bars,
facecolors=colors,
edgecolors=((0, 0, 0, 1), ),
antialiaseds=(0,),
linewidths=(0.5,),
)
ax.add_collection(barCollection)
corners = (0, 0), (len(bars), max(volumes))
ax.update_datalim(corners)
ax.autoscale_view()
# add these last
return barCollection
def volume_overlay2(ax, closes, volumes,
colorup='k', colordown='r',
width=4, alpha=1.0):
"""
Add a volume overlay to the current axes. The closes are used to
determine the color of the bar. -1 is missing. If a value is
missing on one it must be missing on all
nb: first point is not displayed - it is used only for choosing the
right color
Parameters
----------
ax : `Axes`
an Axes instance to plot to
closes : sequence
a sequence of closes
volumes : sequence
a sequence of volumes
width : int
the bar width in points
colorup : color
the color of the lines where close >= open
colordown : color
the color of the lines where close < open
alpha : float
bar transparency
Returns
-------
ret : `barCollection`
The `barrCollection` added to the axes
"""
return volume_overlay(ax, closes[:-1], closes[1:], volumes[1:],
colorup, colordown, width, alpha)
def volume_overlay3(ax, quotes,
colorup='k', colordown='r',
width=4, alpha=1.0):
"""Add a volume overlay to the current axes. quotes is a list of (d,
open, high, low, close, volume) and close-open is used to
determine the color of the bar
Parameters
----------
ax : `Axes`
an Axes instance to plot to
quotes : sequence of (time, open, high, low, close, ...) sequences
data to plot. time must be in float date format - see date2num
width : int
the bar width in points
colorup : color
the color of the lines where close1 >= close0
colordown : color
the color of the lines where close1 < close0
alpha : float
bar transparency
Returns
-------
ret : `barCollection`
The `barrCollection` added to the axes
"""
r, g, b = colorConverter.to_rgb(colorup)
colorup = r, g, b, alpha
r, g, b = colorConverter.to_rgb(colordown)
colordown = r, g, b, alpha
colord = {True: colorup,
False: colordown,
}
dates, opens, highs, lows, closes, volumes = list(zip(*quotes))
colors = [colord[close1 >= close0]
for close0, close1 in zip(closes[:-1], closes[1:])
if close0 != -1 and close1 != -1]
colors.insert(0, colord[closes[0] >= opens[0]])
right = width / 2.0
left = -width / 2.0
bars = [((left, 0), (left, volume), (right, volume), (right, 0))
for d, open, high, low, close, volume in quotes]
sx = ax.figure.dpi * (1.0 / 72.0) # scale for points
sy = ax.bbox.height / ax.viewLim.height
barTransform = Affine2D().scale(sx, sy)
dates = [d for d, open, high, low, close, volume in quotes]
offsetsBars = [(d, 0) for d in dates]
useAA = 0, # use tuple here
lw = 0.5, # and here
barCollection = PolyCollection(bars,
facecolors=colors,
edgecolors=((0, 0, 0, 1),),
antialiaseds=useAA,
linewidths=lw,
offsets=offsetsBars,
transOffset=ax.transData,
)
barCollection.set_transform(barTransform)
minpy, maxx = (min(dates), max(dates))
miny = 0
maxy = max([volume for d, open, high, low, close, volume in quotes])
corners = (minpy, miny), (maxx, maxy)
ax.update_datalim(corners)
#print 'datalim', ax.dataLim.bounds
#print 'viewlim', ax.viewLim.bounds
ax.add_collection(barCollection)
ax.autoscale_view()
return barCollection
def index_bar(ax, vals,
facecolor='b', edgecolor='l',
width=4, alpha=1.0, ):
"""Add a bar collection graph with height vals (-1 is missing).
Parameters
----------
ax : `Axes`
an Axes instance to plot to
vals : sequence
a sequence of values
facecolor : color
the color of the bar face
edgecolor : color
the color of the bar edges
width : int
the bar width in points
alpha : float
bar transparency
Returns
-------
ret : `barCollection`
The `barrCollection` added to the axes
"""
facecolors = (colorConverter.to_rgba(facecolor, alpha),)
edgecolors = (colorConverter.to_rgba(edgecolor, alpha),)
right = width / 2.0
left = -width / 2.0
bars = [((left, 0), (left, v), (right, v), (right, 0))
for v in vals if v != -1]
sx = ax.figure.dpi * (1.0 / 72.0) # scale for points
sy = ax.bbox.height / ax.viewLim.height
barTransform = Affine2D().scale(sx, sy)
offsetsBars = [(i, 0) for i, v in enumerate(vals) if v != -1]
barCollection = PolyCollection(bars,
facecolors=facecolors,
edgecolors=edgecolors,
antialiaseds=(0,),
linewidths=(0.5,),
offsets=offsetsBars,
transOffset=ax.transData,
)
barCollection.set_transform(barTransform)
minpy, maxx = (0, len(offsetsBars))
miny = 0
maxy = max([v for v in vals if v != -1])
corners = (minpy, miny), (maxx, maxy)
ax.update_datalim(corners)
ax.autoscale_view()
# add these last
ax.add_collection(barCollection)
return barCollection
| mit |
petosegan/scikit-learn | sklearn/utils/metaestimators.py | 283 | 2353 | """Utilities for meta-estimators"""
# Author: Joel Nothman
# Andreas Mueller
# Licence: BSD
from operator import attrgetter
from functools import update_wrapper
__all__ = ['if_delegate_has_method']
class _IffHasAttrDescriptor(object):
"""Implements a conditional property using the descriptor protocol.
Using this class to create a decorator will raise an ``AttributeError``
if the ``attribute_name`` is not present on the base object.
This allows ducktyping of the decorated method based on ``attribute_name``.
See https://docs.python.org/3/howto/descriptor.html for an explanation of
descriptors.
"""
def __init__(self, fn, attribute_name):
self.fn = fn
self.get_attribute = attrgetter(attribute_name)
# update the docstring of the descriptor
update_wrapper(self, fn)
def __get__(self, obj, type=None):
# raise an AttributeError if the attribute is not present on the object
if obj is not None:
# delegate only on instances, not the classes.
# this is to allow access to the docstrings.
self.get_attribute(obj)
# lambda, but not partial, allows help() to work with update_wrapper
out = lambda *args, **kwargs: self.fn(obj, *args, **kwargs)
# update the docstring of the returned function
update_wrapper(out, self.fn)
return out
def if_delegate_has_method(delegate):
"""Create a decorator for methods that are delegated to a sub-estimator
This enables ducktyping by hasattr returning True according to the
sub-estimator.
>>> from sklearn.utils.metaestimators import if_delegate_has_method
>>>
>>>
>>> class MetaEst(object):
... def __init__(self, sub_est):
... self.sub_est = sub_est
...
... @if_delegate_has_method(delegate='sub_est')
... def predict(self, X):
... return self.sub_est.predict(X)
...
>>> class HasPredict(object):
... def predict(self, X):
... return X.sum(axis=1)
...
>>> class HasNoPredict(object):
... pass
...
>>> hasattr(MetaEst(HasPredict()), 'predict')
True
>>> hasattr(MetaEst(HasNoPredict()), 'predict')
False
"""
return lambda fn: _IffHasAttrDescriptor(fn, '%s.%s' % (delegate, fn.__name__))
| bsd-3-clause |
APPIAN-PET/APPIAN | src/dashboard/dashboard.py | 1 | 13109 | import os
import glob
import json
import subprocess
import sys
import importlib
import h5py
import nipype.interfaces.minc as minc
import nipype.pipeline.engine as pe
import nipype.interfaces.io as nio
import nipype.interfaces.utility as util
import nipype.interfaces.utility as niu
import distutils
import nibabel as nib
import src.initialization as init
import src.pvc as pvc
import src.results as results
import src.quantification as quant
import src.qc as qc
import pandas as pd
from nipype.interfaces.base import (TraitedSpec, File, traits, InputMultiPath,
BaseInterface, OutputMultiPath, BaseInterfaceInputSpec, isdefined)
from . import minc2volume_viewer as minc2volume
from nipype.utils.filemanip import (load_json, save_json, split_filename, fname_presuffix, copyfile)
from nipype.utils.filemanip import loadcrash
from xml.etree.ElementTree import Element, SubElement, Comment, tostring
from xml.dom import minidom
from src.utils import splitext
from distutils import dir_util
from nipype.interfaces.utility import Rename
from src import masking as masking
from src import surf_masking
global path
path = os.path.dirname(os.path.abspath(__file__))
path_split = path.split(os.sep)
pvc_path = '/'.join(path_split[0:-1])+os.sep+"src"+os.sep+"pvc_methods"
quant_path = '/'.join(path_split[0:-1])+os.sep+"src"+os.sep+"quant_methods"
sys.path.insert(0, pvc_path)
sys.path.insert(0, quant_path)
#importlib.import_module("src/pvc_methods/pvc_method_GTM")
#importlib.import_module("src/quant_methods/quant_method_lp")
def cmd(command):
return subprocess.check_output(command.split(), universal_newlines=True).strip()
def adjust_hdr(niftifile):
f = h5py.File(niftifile,'r')
n_dims = len(f['minc-2.0/dimensions'])
# list_dims = ['xspace', 'yspace', 'zspace', 'time']
# list_dims.pop() if ndims == 3 else False
list_dims = ['xspace', 'yspace', 'zspace']
for dim in list_dims:
dir_cosine = {
"xspace" : '1.,0.,0.',
"yspace" : '0.,1.,0.',
"zspace" : '0.,0.,1.',
} [str(dim)]
cmd("minc_modify_header -sinsert {}:direction_cosines='{}' {}".format(dim, dir_cosine, niftifile))
if n_dims == 4:
cmd("minc_modify_header -dinsert time:start=0 {}".format(niftifile))
cmd("minc_modify_header -dinsert time:step=1 {}".format(niftifile))
def mnc2vol(niftifile):
if not os.path.exists(niftifile) :
print('Warning: could not find file', niftifile)
exit(1)
datatype = nib.load(niftifile).get_data().dtype
basename = os.getcwd()+os.sep+ splitext(os.path.basename(niftifile))[0]
rawfile = basename +'.raw'
headerfile = basename +'.header'
minc2volume.make_raw(niftifile, datatype, rawfile)
minc2volume.make_header(niftifile, datatype, headerfile)
def prettify(elem):
"""Return a pretty-printed XML string for the Element.
"""
rough_string = tostring(elem, 'utf-8')
reparsed = minidom.parseString(rough_string)
return reparsed.toprettyxml(indent=" ", encoding='UTF-8')
def get_stage_file_type(stage, method, stage_dir, prefix):
'''
Get the file format from the methods file for quant or pvc. These files contain
a variable that specifies the output file format for the quant/pvc node.
'''
# Specify path to module
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__))+"../"+stage_dir+"/methods" )
module_fn=prefix+"_method_"+method
# Load the module
try :
module = importlib.import_module(module_fn)
except ImportError :
print("Error: Could not find source file", pvc_module_fn, "corresponding to pvcification method:", opts.pvc_method )
exit(1)
#Read the file format from the module
if stage == 'pvc' :
return module.file_format
elif stage =='quant' :
return module.out_file_format
return None
def set_stage_node_file(stage, method) :
if stage == 'pvc':
conversion_node = 'convertPVC'
stage_dir="src"
prefix="pvc"
elif stage == 'quant' :
conversion_node = 'convertParametric'
stage_dir="src"
prefix="quant"
else :
print("Error: stage must be with 'pvc' or 'quant' but received :", stage)
exit(1)
if method != None :
file_type = get_stage_file_type(stage, method, stage_dir, prefix)
node = method if file_type == "MINC" else conversion_node
return {"node" : node, "file" : 'out_file'}
def generate_xml_nodes(sourceDir,targetDir,pvc_method,quant_method,analysis_space,images, info, out_file):
#Create root for xml, called <qc>
xmlQC = Element('qc')
# Initialize empty list to store volumes that may need to be converted
listVolumes = list();
# Write variables related to scan info: sid, ses, task, cid
xmlscan = SubElement(xmlQC, 'scan')
for key, value in info.items() :
xmlscan.set(key, str(value))
# Iterate over the images dict and write the paths for
# outMnc and inMnc
for node_name, img in images.items() :
xmlnode = SubElement(xmlscan, 'node')
xmlnode.set('name', node_name)
xmlkey = SubElement(xmlnode,
'volumet1' if node_name == 'pvc' else 'volume1')
xmlkey.text = img["v1"].replace(targetDir+"/",'')
xmlkey = SubElement(xmlnode,
'volumet2' if node_name == 'pvc' else 'volume2')
xmlkey.text = img["v2"].replace(targetDir+"/",'')
listVolumes.append(img["v1"])
listVolumes.append(img["v2"])
# Save the output xml file
with open(out_file,"w") as f:
to_write = str(prettify(xmlQC) )
print(to_write)
f.write(to_write)
# Perform conversion to raw
#for niftifile in listVolumes:
# rawfile = niftifile+'.raw'
# headerfile = niftifile+'.header'
# mnc2vol(niftifile)
def link_stats_qc(*args):
opts=args[0]
if not os.path.exists(opts.targetDir+'/preproc/dashboard/public/') :
os.makedirs(opts.targetDir+'/preproc/dashboard/public/')
os.chdir(opts.targetDir+'/preproc/dashboard/public/')
final_dirs = [ os.path.basename(f) for f in glob.glob(opts.targetDir+'/*') ]
for flag in final_dirs :
lnk=os.path.join(opts.targetDir,'preproc/dashboard/public/',flag)
if os.path.islink(lnk):
os.remove(lnk)
os.symlink(os.path.join('../../../',flag), lnk)
class deployDashOutput(TraitedSpec):
out_file = traits.File(desc="Output file")
class deployDashInput(BaseInterfaceInputSpec):
targetDir = traits.File(mandatory=True, desc="Target directory")
sourceDir = traits.File(mandatory=True, desc="Source directory")
pvc_method = traits.Str(desc="PVC method")
quant_method = traits.Str(desc="TKA method")
analysis_space = traits.Str(desc="Analysis Space")
pet = traits.File(exists=True, mandatory=True, desc="PET image")
pet_space_mri = traits.File(exists=True, mandatory=True, desc="Output PETMRI image")
#pet_brain_mask_space_mri = traits.File(exists=True, mandatory=True, desc="Output PET Brain Mask")
mri_space_nat = traits.File(exists=True, mandatory=True, desc="Output T1 native space image")
mri_brain_mask = traits.File(exists=True, mandatory=True, desc="MRI brain mask (t1 native space)")
t1_analysis_space = traits.File(exists=True, mandatory=True, desc="Output T1 in analysis space image")
pvc = traits.File(exists=True, desc="Output PVC image")
quant = traits.File(exists=True, desc="Output TKA image")
sid =traits.Str(default_value='NA',usedefault=True)
cid=traits.Str(default_value='NA',usedefault=True)
ses=traits.Str(default_value='NA',usedefault=True)
task=traits.Str(default_value='NA',usedefault=True)
run=traits.Str(default_value='NA',usedefault=True)
out_file = traits.File(desc="Output file")
clobber = traits.Bool(desc="Overwrite output file", default=False)
class GenerateOutputImagesCSV(BaseInterface):
input_spec = deployDashInput
output_spec = deployDashOutput
def _gen_output(self):
fname = 'sub-'+self.inputs.sid
if self.inputs.ses != 'NA' : fname+='_ses-'+self.inputs.ses
if self.inputs.task != 'NA' : fname+='_task-'+self.inputs.task
if self.inputs.run != 'NA' : fname+='_run-'+self.inputs.run
fname += "_output_images.csv" #"nodes.xml"
dname = os.getcwd()
return dname+os.sep+fname
def _run_interface(self, runtime):
#create dictionary with information about scan
if not isdefined( self.inputs.out_file) :
self.inputs.out_file = self._gen_output()
images=pd.DataFrame({ "sid":[self.inputs.sid], "cid":[self.inputs.cid], "ses":[self.inputs.ses],
"task":[self.inputs.task], "run":[self.inputs.run], "mri":[self.inputs.mri_space_nat],
"pet_space_mri":[self.inputs.pet_space_mri], "pet_brain_mask_space_mri":[self.inputs.pet_brain_mask_space_mri],
"mri_brain_mask":[self.inputs.mri_brain_mask ]})
# If PVC method is defined, then add PVC images
if isdefined(self.inputs.pvc_method) :
images["pvc"]= self.inputs.pvc
# If TKA method is defined, then add quantification images
if isdefined(self.inputs.quant_method) :
images["quant"]= self.inputs.quant
#Create xml for current scan
#generate_xml_nodes(self.inputs.sourceDir,self.inputs.targetDir,self.inputs.pvc_method,self.inputs.quant_method,self.inputs.analysis_space,images,info, self.inputs.out_file);
if not isdefined( self.inputs.out_file) :
self.inputs.out_file = self._gen_output()
print(self.inputs.out_file)
images.to_csv(self.inputs.out_file,index=False)
return runtime
def _list_outputs(self):
outputs = self.output_spec().get()
if not isdefined( self.inputs.out_file) :
self.inputs.out_file = self._gen_output()
outputs["out_file"] = self.inputs.out_file
return outputs
def groupLevel_dashboard(opts, args):
workflow = pe.Workflow(name="concat_dashboard_xml")
workflow.base_dir = opts.targetDir + '/preproc'
#Check for valid data sources
sources=glob.glob(opts.targetDir+os.sep+opts.preproc_dir+os.sep+'**'+os.sep+'dash_scanLevel'+os.sep+'nodes.xml')
print(opts.targetDir+os.sep+opts.preproc_dir+os.sep+'**'+os.sep+'dash_scanLevel'+os.sep+'nodes.xml')
if len(sources) == 0 :
print('Could not find any nodes.xml files for processed subjects')
return 0
if not os.path.exists(opts.targetDir+"/preproc/dashboard/") :
os.makedirs(opts.targetDir+"/preproc/dashboard/");
distutils.dir_util.copy_tree(os.path.split(os.path.abspath(__file__))[0]+'/dashboard_web', opts.targetDir+'/preproc/dashboard', update=1, verbose=0)
os.chdir(opts.targetDir+'/preproc/dashboard/public/')
if os.path.exists(os.path.join(opts.targetDir,'preproc/dashboard/public/preproc')):
os.remove(os.path.join(opts.targetDir,'preproc/dashboard/public/preproc'))
os.symlink('../../../preproc', os.path.join(opts.targetDir,'preproc/dashboard/public/preproc'))
link_stats_qc(opts)
datasource = pe.Node( interface=nio.DataGrabber( outfields=['xml'], raise_on_empty=True, sort_filelist=False), name="datasourceDashboard")
datasource.inputs.base_directory = opts.targetDir + os.sep +opts.preproc_dir
datasource.inputs.template = '*'
datasource.inputs.field_template = {"xml" : '*'+os.sep+'dash_scanLevel'+os.sep+'nodes.xml'}
concat_dashboard_xmlNode=pe.Node(interface=concat_xml(), name="concat_xml")
concat_dashboard_xmlNode.inputs.out_file=opts.targetDir+"/preproc/dashboard/public/nodes.xml"
workflow.connect(datasource, 'xml', concat_dashboard_xmlNode, 'in_list')
workflow.run()
class concat_xmlOutput(TraitedSpec):
out_file = traits.File(desc="Output file")
class concat_xmlInput(BaseInterfaceInputSpec):
in_list = traits.List(mandatory=True, exists=True, desc="Input list")
out_file = traits.File(mandatory=True, desc="Output file")
class concat_xml(BaseInterface):
input_spec = concat_xmlInput
output_spec = concat_xmlOutput
def _run_interface(self, runtime):
out = open(self.inputs.out_file, 'w+')
out.write('<qc>\n')
n=len(self.inputs.in_list)
for filename in self.inputs.in_list :
with open(filename, 'r') as f :
for l in f.readlines() :
if (not '<qc>' in l) and (not '</qc>' in l) and (not 'xml version' in l) :
out.write(l)
else :
pass
out.write('</qc>')
return(runtime)
def _list_outputs(self):
outputs = self.output_spec().get()
outputs["out_file"] = os.getcwd() + os.sep + self.inputs.out_file
return outputs
| mit |
JaneliaSciComp/hybridizer | tests/adc_to_volume.py | 4 | 5112 | # -*- coding: utf-8 -*-
from __future__ import print_function, division
import matplotlib.pyplot as plot
import numpy
from numpy.polynomial.polynomial import polyfit,polyadd,Polynomial
import yaml
INCHES_PER_ML = 0.078
VOLTS_PER_ADC_UNIT = 0.0049
def load_numpy_data(path):
with open(path,'r') as fid:
header = fid.readline().rstrip().split(',')
dt = numpy.dtype({'names':header,'formats':['S25']*len(header)})
numpy_data = numpy.loadtxt(path,dtype=dt,delimiter=",",skiprows=1)
return numpy_data
# -----------------------------------------------------------------------------------------
if __name__ == '__main__':
# Load VA data
data_file = 'hall_effect_data_va.csv'
hall_effect_data_va = load_numpy_data(data_file)
distances_va = numpy.float64(hall_effect_data_va['distance'])
A1_VA = numpy.float64(hall_effect_data_va['A1'])
A9_VA = numpy.float64(hall_effect_data_va['A9'])
A4_VA = numpy.float64(hall_effect_data_va['A4'])
A12_VA = numpy.float64(hall_effect_data_va['A12'])
A2_VA = numpy.float64(hall_effect_data_va['A2'])
A10_VA = numpy.float64(hall_effect_data_va['A10'])
A5_VA = numpy.float64(hall_effect_data_va['A5'])
A13_VA = numpy.float64(hall_effect_data_va['A13'])
# Massage VA data
volumes_va = distances_va/INCHES_PER_ML
A1_VA = numpy.reshape(A1_VA,(-1,1))
A9_VA = numpy.reshape(A9_VA,(-1,1))
A4_VA = numpy.reshape(A4_VA,(-1,1))
A12_VA = numpy.reshape(A12_VA,(-1,1))
A2_VA = numpy.reshape(A2_VA,(-1,1))
A10_VA = numpy.reshape(A10_VA,(-1,1))
A5_VA = numpy.reshape(A5_VA,(-1,1))
A13_VA = numpy.reshape(A13_VA,(-1,1))
data_va = numpy.hstack((A1_VA,A9_VA,A4_VA,A12_VA,A2_VA,A10_VA,A5_VA,A13_VA))
data_va = data_va/VOLTS_PER_ADC_UNIT
# Load OA data
data_file = 'hall_effect_data_oa.csv'
hall_effect_data_oa = load_numpy_data(data_file)
distances_oa = numpy.float64(hall_effect_data_oa['distance'])
A9_OA = numpy.float64(hall_effect_data_oa['A9'])
A10_OA = numpy.float64(hall_effect_data_oa['A10'])
A11_OA = numpy.float64(hall_effect_data_oa['A11'])
A12_OA = numpy.float64(hall_effect_data_oa['A12'])
# Massage OA data
volumes_oa = distances_oa/INCHES_PER_ML
A9_OA = numpy.reshape(A9_OA,(-1,1))
A10_OA = numpy.reshape(A10_OA,(-1,1))
A11_OA = numpy.reshape(A11_OA,(-1,1))
A12_OA = numpy.reshape(A12_OA,(-1,1))
data_oa = numpy.hstack((A9_OA,A10_OA,A11_OA,A12_OA))
data_oa = data_oa/VOLTS_PER_ADC_UNIT
# Create figure
fig = plot.figure()
fig.suptitle('hall effect sensors',fontsize=14,fontweight='bold')
fig.subplots_adjust(top=0.85)
colors = ['b','g','r','c','m','y','k','b']
markers = ['o','o','o','o','o','o','o','^']
# Axis 1
ax1 = fig.add_subplot(121)
for column_index in range(0,data_va.shape[1]):
color = colors[column_index]
marker = markers[column_index]
ax1.plot(data_va[:,column_index],volumes_va,marker=marker,linestyle='--',color=color)
# for column_index in range(0,data_oa.shape[1]):
# color = colors[column_index]
# marker = markers[column_index]
# ax1.plot(data_oa[:,column_index],volumes_oa,marker=marker,linestyle='--',color=color)
ax1.set_xlabel('mean signals (ADC units)')
ax1.set_ylabel('volume (ml)')
ax1.grid(True)
# Axis 2
for column_index in range(0,data_va.shape[1]):
data_va[:,column_index] -= data_va[:,column_index].min()
MAX_VA = 120
data_va = data_va[numpy.all(data_va<MAX_VA,axis=1)]
length = data_va.shape[0]
volumes_va = volumes_va[-length:]
# for column_index in range(0,data_oa.shape[1]):
# data_oa[:,column_index] -= data_oa[:,column_index].max()
ax2 = fig.add_subplot(122)
for column_index in range(0,data_va.shape[1]):
color = colors[column_index]
marker = markers[column_index]
ax2.plot(data_va[:,column_index],volumes_va,marker=marker,linestyle='--',color=color)
# for column_index in range(0,data_oa.shape[1]):
# color = colors[column_index]
# marker = markers[column_index]
# ax2.plot(data_oa[:,column_index],volumes_oa,marker=marker,linestyle='--',color=color)
ax2.set_xlabel('offset mean signals (ADC units)')
ax2.set_ylabel('volume (ml)')
ax2.grid(True)
order = 3
sum_va = None
for column_index in range(0,data_va.shape[1]):
coefficients_va = polyfit(data_va[:,column_index],volumes_va,order)
if sum_va is None:
sum_va = coefficients_va
else:
sum_va = polyadd(sum_va,coefficients_va)
average_va = sum_va/data_va.shape[1]
with open('adc_to_volume_va.yaml', 'w') as f:
yaml.dump(average_va, f, default_flow_style=False)
round_digits = 8
average_va = [round(i,round_digits) for i in average_va]
poly = Polynomial(average_va)
ys_va = poly(data_va[:,-1])
ax2.plot(data_va[:,-1],ys_va,'r',linewidth=3)
ax2.text(5,7.5,r'$v = c_0 + c_1s + c_2s^2 + c_3s^3$',fontsize=20)
ax2.text(5,6.5,str(average_va),fontsize=18,color='r')
plot.show()
| bsd-3-clause |
ThirdProject/android_external_chromium_org | chrome/test/nacl_test_injection/buildbot_chrome_nacl_stage.py | 26 | 11131 | #!/usr/bin/python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Do all the steps required to build and test against nacl."""
import optparse
import os.path
import re
import shutil
import subprocess
import sys
import find_chrome
# Copied from buildbot/buildbot_lib.py
def TryToCleanContents(path, file_name_filter=lambda fn: True):
"""
Remove the contents of a directory without touching the directory itself.
Ignores all failures.
"""
if os.path.exists(path):
for fn in os.listdir(path):
TryToCleanPath(os.path.join(path, fn), file_name_filter)
# Copied from buildbot/buildbot_lib.py
def TryToCleanPath(path, file_name_filter=lambda fn: True):
"""
Removes a file or directory.
Ignores all failures.
"""
if os.path.exists(path):
if file_name_filter(path):
print 'Trying to remove %s' % path
if os.path.isdir(path):
shutil.rmtree(path, ignore_errors=True)
else:
try:
os.remove(path)
except Exception:
pass
else:
print 'Skipping %s' % path
# TODO(ncbray): this is somewhat unsafe. We should fix the underlying problem.
def CleanTempDir():
# Only delete files and directories like:
# a) C:\temp\83C4.tmp
# b) /tmp/.org.chromium.Chromium.EQrEzl
file_name_re = re.compile(
r'[\\/]([0-9a-fA-F]+\.tmp|\.org\.chrom\w+\.Chrom\w+\..+)$')
file_name_filter = lambda fn: file_name_re.search(fn) is not None
path = os.environ.get('TMP', os.environ.get('TEMP', '/tmp'))
if len(path) >= 4 and os.path.isdir(path):
print
print "Cleaning out the temp directory."
print
TryToCleanContents(path, file_name_filter)
else:
print
print "Cannot find temp directory, not cleaning it."
print
def RunCommand(cmd, cwd, env):
sys.stdout.write('\nRunning %s\n\n' % ' '.join(cmd))
sys.stdout.flush()
retcode = subprocess.call(cmd, cwd=cwd, env=env)
if retcode != 0:
sys.stdout.write('\nFailed: %s\n\n' % ' '.join(cmd))
sys.exit(retcode)
def RunTests(name, cmd, nacl_dir, env):
sys.stdout.write('\n\nBuilding files needed for %s testing...\n\n' % name)
RunCommand(cmd + ['do_not_run_tests=1', '-j8'], nacl_dir, env)
sys.stdout.write('\n\nRunning %s tests...\n\n' % name)
RunCommand(cmd, nacl_dir, env)
def BuildAndTest(options):
# Refuse to run under cygwin.
if sys.platform == 'cygwin':
raise Exception('I do not work under cygwin, sorry.')
# By default, use the version of Python is being used to run this script.
python = sys.executable
if sys.platform == 'darwin':
# Mac 10.5 bots tend to use a particularlly old version of Python, look for
# a newer version.
macpython27 = '/Library/Frameworks/Python.framework/Versions/2.7/bin/python'
if os.path.exists(macpython27):
python = macpython27
script_dir = os.path.dirname(os.path.abspath(__file__))
src_dir = os.path.dirname(os.path.dirname(os.path.dirname(script_dir)))
nacl_dir = os.path.join(src_dir, 'native_client')
# Decide platform specifics.
if options.browser_path:
chrome_filename = options.browser_path
else:
chrome_filename = find_chrome.FindChrome(src_dir, [options.mode])
if chrome_filename is None:
raise Exception('Cannot find a chome binary - specify one with '
'--browser_path?')
env = dict(os.environ)
if sys.platform in ['win32', 'cygwin']:
if options.bits == 64:
bits = 64
elif options.bits == 32:
bits = 32
elif '64' in os.environ.get('PROCESSOR_ARCHITECTURE', '') or \
'64' in os.environ.get('PROCESSOR_ARCHITEW6432', ''):
bits = 64
else:
bits = 32
msvs_path = ';'.join([
r'c:\Program Files\Microsoft Visual Studio 9.0\VC',
r'c:\Program Files (x86)\Microsoft Visual Studio 9.0\VC',
r'c:\Program Files\Microsoft Visual Studio 9.0\Common7\Tools',
r'c:\Program Files (x86)\Microsoft Visual Studio 9.0\Common7\Tools',
r'c:\Program Files\Microsoft Visual Studio 8\VC',
r'c:\Program Files (x86)\Microsoft Visual Studio 8\VC',
r'c:\Program Files\Microsoft Visual Studio 8\Common7\Tools',
r'c:\Program Files (x86)\Microsoft Visual Studio 8\Common7\Tools',
])
env['PATH'] += ';' + msvs_path
scons = [python, 'scons.py']
elif sys.platform == 'darwin':
if options.bits == 64:
bits = 64
elif options.bits == 32:
bits = 32
else:
p = subprocess.Popen(['file', chrome_filename], stdout=subprocess.PIPE)
(p_stdout, _) = p.communicate()
assert p.returncode == 0
if p_stdout.find('executable x86_64') >= 0:
bits = 64
else:
bits = 32
scons = [python, 'scons.py']
else:
p = subprocess.Popen(
'uname -m | '
'sed -e "s/i.86/ia32/;s/x86_64/x64/;s/amd64/x64/;s/arm.*/arm/"',
shell=True, stdout=subprocess.PIPE)
(p_stdout, _) = p.communicate()
assert p.returncode == 0
if options.bits == 64:
bits = 64
elif options.bits == 32:
bits = 32
elif p_stdout.find('64') >= 0:
bits = 64
else:
bits = 32
# xvfb-run has a 2-second overhead per invocation, so it is cheaper to wrap
# the entire build step rather than each test (browser_headless=1).
# We also need to make sure that there are at least 24 bits per pixel.
# https://code.google.com/p/chromium/issues/detail?id=316687
scons = [
'xvfb-run',
'--auto-servernum',
'--server-args', '-screen 0 1024x768x24',
python, 'scons.py',
]
if options.jobs > 1:
scons.append('-j%d' % options.jobs)
scons.append('disable_tests=%s' % options.disable_tests)
if options.buildbot is not None:
scons.append('buildbot=%s' % (options.buildbot,))
# Clean the output of the previous build.
# Incremental builds can get wedged in weird ways, so we're trading speed
# for reliability.
shutil.rmtree(os.path.join(nacl_dir, 'scons-out'), True)
# check that the HOST (not target) is 64bit
# this is emulating what msvs_env.bat is doing
if '64' in os.environ.get('PROCESSOR_ARCHITECTURE', '') or \
'64' in os.environ.get('PROCESSOR_ARCHITEW6432', ''):
# 64bit HOST
env['VS90COMNTOOLS'] = ('c:\\Program Files (x86)\\'
'Microsoft Visual Studio 9.0\\Common7\\Tools\\')
env['VS80COMNTOOLS'] = ('c:\\Program Files (x86)\\'
'Microsoft Visual Studio 8.0\\Common7\\Tools\\')
else:
# 32bit HOST
env['VS90COMNTOOLS'] = ('c:\\Program Files\\Microsoft Visual Studio 9.0\\'
'Common7\\Tools\\')
env['VS80COMNTOOLS'] = ('c:\\Program Files\\Microsoft Visual Studio 8.0\\'
'Common7\\Tools\\')
# Run nacl/chrome integration tests.
# Note that we have to add nacl_irt_test to --mode in order to get
# inbrowser_test_runner to run.
# TODO(mseaborn): Change it so that inbrowser_test_runner is not a
# special case.
cmd = scons + ['--verbose', '-k', 'platform=x86-%d' % bits,
'--mode=opt-host,nacl,nacl_irt_test',
'chrome_browser_path=%s' % chrome_filename,
]
if not options.integration_bot and not options.morenacl_bot:
cmd.append('disable_flaky_tests=1')
cmd.append('chrome_browser_tests')
# Propagate path to JSON output if present.
# Note that RunCommand calls sys.exit on errors, so potential errors
# from one command won't be overwritten by another one. Overwriting
# a successful results file with either success or failure is fine.
if options.json_build_results_output_file:
cmd.append('json_build_results_output_file=%s' %
options.json_build_results_output_file)
# Download the toolchain(s).
RunCommand([python,
os.path.join(nacl_dir, 'build', 'download_toolchains.py'),
'--no-arm-trusted', '--no-pnacl', 'TOOL_REVISIONS'],
nacl_dir, os.environ)
CleanTempDir()
if options.enable_newlib:
RunTests('nacl-newlib', cmd, nacl_dir, env)
if options.enable_glibc:
RunTests('nacl-glibc', cmd + ['--nacl_glibc'], nacl_dir, env)
def MakeCommandLineParser():
parser = optparse.OptionParser()
parser.add_option('-m', '--mode', dest='mode', default='Debug',
help='Debug/Release mode')
parser.add_option('-j', dest='jobs', default=1, type='int',
help='Number of parallel jobs')
parser.add_option('--enable_newlib', dest='enable_newlib', default=-1,
type='int', help='Run newlib tests?')
parser.add_option('--enable_glibc', dest='enable_glibc', default=-1,
type='int', help='Run glibc tests?')
parser.add_option('--json_build_results_output_file',
help='Path to a JSON file for machine-readable output.')
# Deprecated, but passed to us by a script in the Chrome repo.
# Replaced by --enable_glibc=0
parser.add_option('--disable_glibc', dest='disable_glibc',
action='store_true', default=False,
help='Do not test using glibc.')
parser.add_option('--disable_tests', dest='disable_tests',
type='string', default='',
help='Comma-separated list of tests to omit')
builder_name = os.environ.get('BUILDBOT_BUILDERNAME', '')
is_integration_bot = 'nacl-chrome' in builder_name
parser.add_option('--integration_bot', dest='integration_bot',
type='int', default=int(is_integration_bot),
help='Is this an integration bot?')
is_morenacl_bot = (
'More NaCl' in builder_name or
'naclmore' in builder_name)
parser.add_option('--morenacl_bot', dest='morenacl_bot',
type='int', default=int(is_morenacl_bot),
help='Is this a morenacl bot?')
# Not used on the bots, but handy for running the script manually.
parser.add_option('--bits', dest='bits', action='store',
type='int', default=None,
help='32/64')
parser.add_option('--browser_path', dest='browser_path', action='store',
type='string', default=None,
help='Path to the chrome browser.')
parser.add_option('--buildbot', dest='buildbot', action='store',
type='string', default=None,
help='Value passed to scons as buildbot= option.')
return parser
def Main():
parser = MakeCommandLineParser()
options, args = parser.parse_args()
if options.integration_bot and options.morenacl_bot:
parser.error('ERROR: cannot be both an integration bot and a morenacl bot')
# Set defaults for enabling newlib.
if options.enable_newlib == -1:
options.enable_newlib = 1
# Set defaults for enabling glibc.
if options.enable_glibc == -1:
if options.integration_bot or options.morenacl_bot:
options.enable_glibc = 1
else:
options.enable_glibc = 0
if args:
parser.error('ERROR: invalid argument')
BuildAndTest(options)
if __name__ == '__main__':
Main()
| bsd-3-clause |
lanselin/pysal | pysal/__init__.py | 4 | 5450 | """
Python Spatial Analysis Library
===============================
Documentation
-------------
PySAL documentation is available in two forms: python docstrings and an html \
webpage at http://pysal.org/
Available sub-packages
----------------------
cg
Basic data structures and tools for Computational Geometry
core
Basic functions used by several sub-packages
esda
Tools for Exploratory Spatial Data Analysis
examples
Example data sets used by several sub-packages for examples and testing
network
Spatial analysis on networks
region
Regionalization algorithms and spatially constrained clustering
spatial_dynamics
Space-time exploratory methods and clustering
spreg
Spatial regression and econometrics
weights
Tools for creating and manipulating weights
contrib
Package for interfacing with third-party libraries
Utilities
---------
`fileio`_
Tool for file input and output, supports many well known file formats
"""
import pysal.cg
import pysal.core
try:
import pandas
from pysal.contrib import pdio
pysal.common.pandas = pandas
except ImportError:
pysal.common.pandas = None
# Load the IOHandlers
from pysal.core import IOHandlers
# Assign pysal.open to dispatcher
open = pysal.core.FileIO.FileIO
from pysal.version import version
#from pysal.version import stable_release_date
#import urllib2, json
#import config
#import datetime
#import os, sys
# toplevel imports to be explicit
from pysal.esda.moran import Moran, Moran_BV, Moran_BV_matrix, Moran_Local, Moran_Local_BV
from pysal.esda.geary import Geary
from pysal.esda.join_counts import Join_Counts
from pysal.esda.gamma import Gamma
from pysal.esda.getisord import G, G_Local
from pysal.esda.mapclassify import quantile, binC, bin, bin1d, Equal_Interval, \
Percentiles
from pysal.esda.mapclassify import Box_Plot, Quantiles, Std_Mean, Maximum_Breaks
from pysal.esda.mapclassify import Natural_Breaks, Fisher_Jenks, Jenks_Caspall
from pysal.esda.mapclassify import Jenks_Caspall_Sampled, Jenks_Caspall_Forced
from pysal.esda.mapclassify import User_Defined, Max_P_Classifier, gadf
from pysal.esda.mapclassify import K_classifiers
from pysal.inequality.theil import Theil, TheilD, TheilDSim
from pysal.region.maxp import Maxp, Maxp_LISA
from pysal.spatial_dynamics import Markov, Spatial_Markov, LISA_Markov, \
SpatialTau, Theta, Tau
from pysal.spatial_dynamics import ergodic
from pysal.spatial_dynamics import directional
from pysal.weights import W, lat2W, block_weights, comb, full, shimbel, \
order, higher_order, higher_order_sp, remap_ids, hexLat2W, WSP, regime_weights
from pysal.weights.Distance import knnW, Kernel, DistanceBand
from pysal.weights.Contiguity import buildContiguity
from pysal.weights.spatial_lag import lag_spatial
from pysal.weights.Wsets import w_union, w_intersection, w_difference
from pysal.weights.Wsets import w_symmetric_difference, w_subset
from pysal.weights.user import queen_from_shapefile, rook_from_shapefile, \
knnW_from_array, knnW_from_shapefile, threshold_binaryW_from_array,\
threshold_binaryW_from_shapefile, threshold_continuousW_from_array,\
threshold_continuousW_from_shapefile, kernelW, kernelW_from_shapefile,\
adaptive_kernelW, adaptive_kernelW_from_shapefile,\
min_threshold_dist_from_shapefile, build_lattice_shapefile
from pysal.core.util.weight_converter import weight_convert
import pysal.spreg
import pysal.examples
from pysal.network.network import Network, NetworkG, NetworkK, NetworkF
#__all__=[]
#import esda,weights
#__all__+=esda.__all__
#__all__+=weights.__all__
# Constants
MISSINGVALUE = None # used by fileIO to flag missing values.
# Load stale and other possible messages at import
"""
base_path = os.path.split(pysal.__file__)[0]
config_path = os.path.join(base_path, 'config.py')
def query_yes_no(question):
yes = set(['yes','y', 'ye', ''])
no = set(['no','n'])
while True:
sys.stdout.write(question)
choice = raw_input().lower()
if choice in yes:
turn_off_check()
break
elif choice in no:
break
else:
sys.stdout.write("Please respond with 'yes' or 'no'.\n")
def turn_off_check():
if os.path.isfile(config_path):
f = open(config_path, 'w')
f.write("check_stable=False")
f.close()
pass
else:
print('Cannot find config.py. Please set value manually.')
def check_version():
today = datetime.date.today()
delta = datetime.timedelta(days=180)
diff = (today - stable_release_date).days
releases = int(diff)/180
if today - delta > stable_release_date:
print("Your version of PySAL is %d days old.") % diff
print("There have likely been %d new release(s).") % releases
print("Suppress this by setting check_stable=False in config.py.")
#query_yes_no("Disable this check? [Y/n]")
else:
pass
def check_remote_version():
print("Checking web for last stable release....")
try:
url = 'http://pypi.python.org/pypi/pysal/json'
request = urllib2.urlopen(url)
data = json.load(request)
newest = data['info']['version']
late = 'The most recent stable release is %s.' %newest
print(late)
except:
print("Machine is offline. I am unable to check for the latest version of PySAL")
if config.check_stable:
check_version()
else:
pass
"""
| bsd-3-clause |
vermouthmjl/scikit-learn | benchmarks/bench_sample_without_replacement.py | 397 | 8008 | """
Benchmarks for sampling without replacement of integer.
"""
from __future__ import division
from __future__ import print_function
import gc
import sys
import optparse
from datetime import datetime
import operator
import matplotlib.pyplot as plt
import numpy as np
import random
from sklearn.externals.six.moves import xrange
from sklearn.utils.random import sample_without_replacement
def compute_time(t_start, delta):
mu_second = 0.0 + 10 ** 6 # number of microseconds in a second
return delta.seconds + delta.microseconds / mu_second
def bench_sample(sampling, n_population, n_samples):
gc.collect()
# start time
t_start = datetime.now()
sampling(n_population, n_samples)
delta = (datetime.now() - t_start)
# stop time
time = compute_time(t_start, delta)
return time
if __name__ == "__main__":
###########################################################################
# Option parser
###########################################################################
op = optparse.OptionParser()
op.add_option("--n-times",
dest="n_times", default=5, type=int,
help="Benchmark results are average over n_times experiments")
op.add_option("--n-population",
dest="n_population", default=100000, type=int,
help="Size of the population to sample from.")
op.add_option("--n-step",
dest="n_steps", default=5, type=int,
help="Number of step interval between 0 and n_population.")
default_algorithms = "custom-tracking-selection,custom-auto," \
"custom-reservoir-sampling,custom-pool,"\
"python-core-sample,numpy-permutation"
op.add_option("--algorithm",
dest="selected_algorithm",
default=default_algorithms,
type=str,
help="Comma-separated list of transformer to benchmark. "
"Default: %default. \nAvailable: %default")
# op.add_option("--random-seed",
# dest="random_seed", default=13, type=int,
# help="Seed used by the random number generators.")
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
selected_algorithm = opts.selected_algorithm.split(',')
for key in selected_algorithm:
if key not in default_algorithms.split(','):
raise ValueError("Unknown sampling algorithm \"%s\" not in (%s)."
% (key, default_algorithms))
###########################################################################
# List sampling algorithm
###########################################################################
# We assume that sampling algorithm has the following signature:
# sample(n_population, n_sample)
#
sampling_algorithm = {}
###########################################################################
# Set Python core input
sampling_algorithm["python-core-sample"] = \
lambda n_population, n_sample: \
random.sample(xrange(n_population), n_sample)
###########################################################################
# Set custom automatic method selection
sampling_algorithm["custom-auto"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="auto",
random_state=random_state)
###########################################################################
# Set custom tracking based method
sampling_algorithm["custom-tracking-selection"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="tracking_selection",
random_state=random_state)
###########################################################################
# Set custom reservoir based method
sampling_algorithm["custom-reservoir-sampling"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="reservoir_sampling",
random_state=random_state)
###########################################################################
# Set custom reservoir based method
sampling_algorithm["custom-pool"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="pool",
random_state=random_state)
###########################################################################
# Numpy permutation based
sampling_algorithm["numpy-permutation"] = \
lambda n_population, n_sample: \
np.random.permutation(n_population)[:n_sample]
###########################################################################
# Remove unspecified algorithm
sampling_algorithm = dict((key, value)
for key, value in sampling_algorithm.items()
if key in selected_algorithm)
###########################################################################
# Perform benchmark
###########################################################################
time = {}
n_samples = np.linspace(start=0, stop=opts.n_population,
num=opts.n_steps).astype(np.int)
ratio = n_samples / opts.n_population
print('Benchmarks')
print("===========================")
for name in sorted(sampling_algorithm):
print("Perform benchmarks for %s..." % name, end="")
time[name] = np.zeros(shape=(opts.n_steps, opts.n_times))
for step in xrange(opts.n_steps):
for it in xrange(opts.n_times):
time[name][step, it] = bench_sample(sampling_algorithm[name],
opts.n_population,
n_samples[step])
print("done")
print("Averaging results...", end="")
for name in sampling_algorithm:
time[name] = np.mean(time[name], axis=1)
print("done\n")
# Print results
###########################################################################
print("Script arguments")
print("===========================")
arguments = vars(opts)
print("%s \t | %s " % ("Arguments".ljust(16),
"Value".center(12),))
print(25 * "-" + ("|" + "-" * 14) * 1)
for key, value in arguments.items():
print("%s \t | %s " % (str(key).ljust(16),
str(value).strip().center(12)))
print("")
print("Sampling algorithm performance:")
print("===============================")
print("Results are averaged over %s repetition(s)." % opts.n_times)
print("")
fig = plt.figure('scikit-learn sample w/o replacement benchmark results')
plt.title("n_population = %s, n_times = %s" %
(opts.n_population, opts.n_times))
ax = fig.add_subplot(111)
for name in sampling_algorithm:
ax.plot(ratio, time[name], label=name)
ax.set_xlabel('ratio of n_sample / n_population')
ax.set_ylabel('Time (s)')
ax.legend()
# Sort legend labels
handles, labels = ax.get_legend_handles_labels()
hl = sorted(zip(handles, labels), key=operator.itemgetter(1))
handles2, labels2 = zip(*hl)
ax.legend(handles2, labels2, loc=0)
plt.show()
| bsd-3-clause |
robin-lai/scikit-learn | examples/svm/plot_iris.py | 225 | 3252 | """
==================================================
Plot different SVM classifiers in the iris dataset
==================================================
Comparison of different linear SVM classifiers on a 2D projection of the iris
dataset. We only consider the first 2 features of this dataset:
- Sepal length
- Sepal width
This example shows how to plot the decision surface for four SVM classifiers
with different kernels.
The linear models ``LinearSVC()`` and ``SVC(kernel='linear')`` yield slightly
different decision boundaries. This can be a consequence of the following
differences:
- ``LinearSVC`` minimizes the squared hinge loss while ``SVC`` minimizes the
regular hinge loss.
- ``LinearSVC`` uses the One-vs-All (also known as One-vs-Rest) multiclass
reduction while ``SVC`` uses the One-vs-One multiclass reduction.
Both linear models have linear decision boundaries (intersecting hyperplanes)
while the non-linear kernel models (polynomial or Gaussian RBF) have more
flexible non-linear decision boundaries with shapes that depend on the kind of
kernel and its parameters.
.. NOTE:: while plotting the decision function of classifiers for toy 2D
datasets can help get an intuitive understanding of their respective
expressive power, be aware that those intuitions don't always generalize to
more realistic high-dimensional problems.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
# we create an instance of SVM and fit out data. We do not scale our
# data since we want to plot the support vectors
C = 1.0 # SVM regularization parameter
svc = svm.SVC(kernel='linear', C=C).fit(X, y)
rbf_svc = svm.SVC(kernel='rbf', gamma=0.7, C=C).fit(X, y)
poly_svc = svm.SVC(kernel='poly', degree=3, C=C).fit(X, y)
lin_svc = svm.LinearSVC(C=C).fit(X, y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# title for the plots
titles = ['SVC with linear kernel',
'LinearSVC (linear kernel)',
'SVC with RBF kernel',
'SVC with polynomial (degree 3) kernel']
for i, clf in enumerate((svc, lin_svc, rbf_svc, poly_svc)):
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
plt.subplot(2, 2, i + 1)
plt.subplots_adjust(wspace=0.4, hspace=0.4)
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap=plt.cm.Paired, alpha=0.8)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.title(titles[i])
plt.show()
| bsd-3-clause |
befelix/GPy | GPy/plotting/plotly_dep/plot_definitions.py | 4 | 16743 | #===============================================================================
# Copyright (c) 2015, Max Zwiessele
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of GPy.plotting.matplot_dep.plot_definitions nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#===============================================================================
import numpy as np
from ..abstract_plotting_library import AbstractPlottingLibrary
from .. import Tango
from . import defaults
import plotly
from plotly import tools
from plotly.graph_objs import Scatter, Scatter3d, Line,\
Marker, ErrorX, ErrorY, Bar, Heatmap, Trace,\
Annotations, Annotation, Contour, Font, Surface
from plotly.exceptions import PlotlyDictKeyError
SYMBOL_MAP = {
'o': 'dot',
'v': 'triangle-down',
'^': 'triangle-up',
'<': 'triangle-left',
'>': 'triangle-right',
's': 'square',
'+': 'cross',
'x': 'x',
'*': 'x', # no star yet in plotly!!
'D': 'diamond',
'd': 'diamond',
}
class PlotlyPlotsBase(AbstractPlottingLibrary):
def __init__(self):
super(PlotlyPlotsBase, self).__init__()
self._defaults = defaults.__dict__
self.current_states = dict()
def figure(self, rows=1, cols=1, specs=None, is_3d=False, **kwargs):
if specs is None:
specs = [[{'is_3d': is_3d}]*cols]*rows
figure = tools.make_subplots(rows, cols, specs=specs, **kwargs)
return figure
def new_canvas(self, figure=None, row=1, col=1, projection='2d',
xlabel=None, ylabel=None, zlabel=None,
title=None, xlim=None,
ylim=None, zlim=None, **kwargs):
#if 'filename' not in kwargs:
# print('PlotlyWarning: filename was not given, this may clutter your plotly workspace')
# filename = None
#else:
# filename = kwargs.pop('filename')
if figure is None:
figure = self.figure(is_3d=projection=='3d')
figure.layout.font = Font(family="Raleway, sans-serif")
if projection == '3d':
figure.layout.legend.x=.5
figure.layout.legend.bgcolor='#DCDCDC'
return (figure, row, col), kwargs
def add_to_canvas(self, canvas, traces, legend=False, **kwargs):
figure, row, col = canvas
def append_annotation(a, xref, yref):
if 'xref' not in a:
a['xref'] = xref
if 'yref' not in a:
a['yref'] = yref
figure.layout.annotations.append(a)
def append_trace(t, row, col):
figure.append_trace(t, row, col)
def recursive_append(traces):
if isinstance(traces, Annotations):
xref, yref = figure._grid_ref[row-1][col-1]
for a in traces:
append_annotation(a, xref, yref)
# elif isinstance(traces, (Trace)): # doesn't work
# elif type(traces) in [v for k,v in go.__dict__.iteritems()]:
elif isinstance(traces, (Scatter, Scatter3d, ErrorX,
ErrorY, Bar, Heatmap, Trace, Contour, Surface)):
try:
append_trace(traces, row, col)
except PlotlyDictKeyError:
# Its a dictionary of plots:
for t in traces:
recursive_append(traces[t])
elif isinstance(traces, (dict)):
for t in traces:
recursive_append(traces[t])
elif isinstance(traces, (tuple, list)):
for t in traces:
recursive_append(t)
recursive_append(traces)
figure.layout['showlegend'] = legend
return canvas
def show_canvas(self, canvas, filename=None, **kwargs):
return NotImplementedError
def scatter(self, ax, X, Y, Z=None, color=Tango.colorsHex['mediumBlue'], cmap=None, label=None, marker='o', marker_kwargs=None, **kwargs):
try:
marker = SYMBOL_MAP[marker]
except:
#not matplotlib marker
pass
marker_kwargs = marker_kwargs or {}
if 'symbol' not in marker_kwargs:
marker_kwargs['symbol'] = marker
X, Y = np.squeeze(X), np.squeeze(Y)
if Z is not None:
Z = np.squeeze(Z)
return Scatter3d(x=X, y=Y, z=Z, mode='markers',
showlegend=label is not None,
marker=Marker(color=color, colorscale=cmap, **marker_kwargs),
name=label, **kwargs)
return Scatter(x=X, y=Y, mode='markers', showlegend=label is not None,
marker=Marker(color=color, colorscale=cmap, **marker_kwargs),
name=label, **kwargs)
def plot(self, ax, X, Y, Z=None, color=None, label=None, line_kwargs=None, **kwargs):
if 'mode' not in kwargs:
kwargs['mode'] = 'lines'
X, Y = np.squeeze(X), np.squeeze(Y)
if Z is not None:
Z = np.squeeze(Z)
return Scatter3d(x=X, y=Y, z=Z, showlegend=label is not None, line=Line(color=color, **line_kwargs or {}), name=label, **kwargs)
return Scatter(x=X, y=Y, showlegend=label is not None, line=Line(color=color, **line_kwargs or {}), name=label, **kwargs)
def plot_axis_lines(self, ax, X, color=Tango.colorsHex['mediumBlue'], label=None, marker_kwargs=None, **kwargs):
if X.shape[1] == 1:
annotations = Annotations()
for i, row in enumerate(X):
annotations.append(
Annotation(
text='',
x=row[0], y=0,
yref='paper',
ax=0, ay=20,
arrowhead=2,
arrowsize=1,
arrowwidth=2,
arrowcolor=color,
showarrow=True,
#showlegend=i==0,
#label=label,
))
return annotations
elif X.shape[1] == 2:
marker_kwargs.setdefault('symbol', 'diamond')
opacity = kwargs.pop('opacity', .8)
return Scatter3d(x=X[:, 0], y=X[:, 1], z=np.zeros(X.shape[0]),
mode='markers',
projection=dict(z=dict(show=True, opacity=opacity)),
marker=Marker(color=color, **marker_kwargs or {}),
opacity=0,
name=label,
showlegend=label is not None, **kwargs)
def barplot(self, canvas, x, height, width=0.8, bottom=0, color=Tango.colorsHex['mediumBlue'], label=None, **kwargs):
figure, _, _ = canvas
if 'barmode' in kwargs:
figure.layout['barmode'] = kwargs.pop('barmode')
return Bar(x=x, y=height, marker=Marker(color=color), name=label)
def xerrorbar(self, ax, X, Y, error, Z=None, color=Tango.colorsHex['mediumBlue'], label=None, error_kwargs=None, **kwargs):
error_kwargs = error_kwargs or {}
if (error.shape[0] == 2) and (error.ndim == 2):
error_kwargs.update(dict(array=error[1], arrayminus=error[0], symmetric=False))
else:
error_kwargs.update(dict(array=error, symmetric=True))
X, Y = np.squeeze(X), np.squeeze(Y)
if Z is not None:
Z = np.squeeze(Z)
return Scatter3d(x=X, y=Y, z=Z, mode='markers',
error_x=ErrorX(color=color, **error_kwargs or {}),
marker=Marker(size='0'), name=label,
showlegend=label is not None, **kwargs)
return Scatter(x=X, y=Y, mode='markers',
error_x=ErrorX(color=color, **error_kwargs or {}),
marker=Marker(size='0'), name=label,
showlegend=label is not None,
**kwargs)
def yerrorbar(self, ax, X, Y, error, Z=None, color=Tango.colorsHex['mediumBlue'], label=None, error_kwargs=None, **kwargs):
error_kwargs = error_kwargs or {}
if (error.shape[0] == 2) and (error.ndim == 2):
error_kwargs.update(dict(array=error[1], arrayminus=error[0], symmetric=False))
else:
error_kwargs.update(dict(array=error, symmetric=True))
X, Y = np.squeeze(X), np.squeeze(Y)
if Z is not None:
Z = np.squeeze(Z)
return Scatter3d(x=X, y=Y, z=Z, mode='markers',
error_y=ErrorY(color=color, **error_kwargs or {}),
marker=Marker(size='0'), name=label,
showlegend=label is not None, **kwargs)
return Scatter(x=X, y=Y, mode='markers',
error_y=ErrorY(color=color, **error_kwargs or {}),
marker=Marker(size='0'), name=label,
showlegend=label is not None,
**kwargs)
def imshow(self, ax, X, extent=None, label=None, vmin=None, vmax=None, **imshow_kwargs):
if not 'showscale' in imshow_kwargs:
imshow_kwargs['showscale'] = False
return Heatmap(z=X, name=label,
x0=extent[0], dx=float(extent[1]-extent[0])/(X.shape[0]-1),
y0=extent[2], dy=float(extent[3]-extent[2])/(X.shape[1]-1),
zmin=vmin, zmax=vmax,
showlegend=label is not None,
hoverinfo='z',
**imshow_kwargs)
def imshow_interact(self, ax, plot_function, extent=None, label=None, resolution=None, vmin=None, vmax=None, **imshow_kwargs):
# TODO stream interaction?
super(PlotlyPlotsBase, self).imshow_interact(ax, plot_function)
def annotation_heatmap(self, ax, X, annotation, extent=None, label='Gradient', imshow_kwargs=None, **annotation_kwargs):
imshow_kwargs.setdefault('label', label)
imshow_kwargs.setdefault('showscale', True)
imshow = self.imshow(ax, X, extent, **imshow_kwargs)
X = X-X.min()
X /= X.max()/2.
X -= 1
x = np.linspace(extent[0], extent[1], X.shape[0])
y = np.linspace(extent[2], extent[3], X.shape[1])
annotations = Annotations()
for n, row in enumerate(annotation):
for m, val in enumerate(row):
var = X[n][m]
annotations.append(
Annotation(
text=str(val),
x=x[m], y=y[n],
xref='x1', yref='y1',
font=dict(color='white' if np.abs(var) > 0.8 else 'black', size=10),
opacity=.5,
showarrow=False,
))
return imshow, annotations
def annotation_heatmap_interact(self, ax, plot_function, extent, label=None, resolution=15, imshow_kwargs=None, **annotation_kwargs):
super(PlotlyPlotsBase, self).annotation_heatmap_interact(ax, plot_function, extent)
def contour(self, ax, X, Y, C, levels=20, label=None, **kwargs):
return Contour(x=X, y=Y, z=C,
#ncontours=levels, contours=Contours(start=C.min(), end=C.max(), size=(C.max()-C.min())/levels),
name=label, **kwargs)
def surface(self, ax, X, Y, Z, color=None, label=None, **kwargs):
return Surface(x=X, y=Y, z=Z, name=label, showlegend=label is not None, **kwargs)
def fill_between(self, ax, X, lower, upper, color=Tango.colorsHex['mediumBlue'], label=None, line_kwargs=None, **kwargs):
if not 'line' in kwargs:
kwargs['line'] = Line(**line_kwargs or {})
else:
kwargs['line'].update(line_kwargs or {})
if color.startswith('#'):
fcolor = 'rgba({c[0]}, {c[1]}, {c[2]}, {alpha})'.format(c=Tango.hex2rgb(color), alpha=kwargs.get('opacity', 1.0))
else: fcolor = color
u = Scatter(x=X, y=upper, fillcolor=fcolor, showlegend=label is not None, name=label, fill='tonextx', legendgroup='{}_fill_({},{})'.format(label, ax[1], ax[2]), **kwargs)
#fcolor = '{}, {alpha})'.format(','.join(fcolor.split(',')[:-1]), alpha=0.0)
l = Scatter(x=X, y=lower, fillcolor=fcolor, showlegend=False, name=label, legendgroup='{}_fill_({},{})'.format(label, ax[1], ax[2]), **kwargs)
return l, u
def fill_gradient(self, canvas, X, percentiles, color=Tango.colorsHex['mediumBlue'], label=None, **kwargs):
if color.startswith('#'):
colarray = Tango.hex2rgb(color)
opacity = .9
else:
colarray = map(float(color.strip(')').split('(')[1]))
if len(colarray) == 4:
colarray, opacity = colarray[:3] ,colarray[3]
alpha = opacity*(1.-np.abs(np.linspace(-1,1,len(percentiles)-1)))
def pairwise(iterable):
"s -> (s0,s1), (s1,s2), (s2, s3), ..."
from itertools import tee
a, b = tee(iterable)
next(b, None)
return zip(a, b)
polycol = []
for i, y1, a in zip(range(len(percentiles)), percentiles, alpha):
fcolor = 'rgba({}, {}, {}, {alpha})'.format(*colarray, alpha=a)
if i == len(percentiles)/2:
polycol.append(Scatter(x=X, y=y1, fillcolor=fcolor, showlegend=True,
name=label, line=Line(width=0, smoothing=0), mode='none', fill='tonextx',
legendgroup='density', hoverinfo='none', **kwargs))
else:
polycol.append(Scatter(x=X, y=y1, fillcolor=fcolor, showlegend=False,
name=None, line=Line(width=1, smoothing=0, color=fcolor), mode='none', fill='tonextx',
legendgroup='density', hoverinfo='none', **kwargs))
return polycol
class PlotlyPlotsOnline(PlotlyPlotsBase):
def __init__(self):
super(PlotlyPlotsOnline, self).__init__()
def show_canvas(self, canvas, filename=None, **kwargs):
figure, _, _ = canvas
if len(figure.data) == 0:
# add mock data
figure.append_trace(Scatter(x=[], y=[], name='', showlegend=False), 1, 1)
from ..gpy_plot.plot_util import in_ipynb
if in_ipynb():
return plotly.plotly.iplot(figure, filename=filename, **kwargs)
else:
return plotly.plotly.plot(figure, filename=filename, **kwargs)#self.current_states[hex(id(figure))]['filename'])
class PlotlyPlotsOffline(PlotlyPlotsBase):
def __init__(self):
super(PlotlyPlotsOffline, self).__init__()
def show_canvas(self, canvas, filename=None, **kwargs):
figure, _, _ = canvas
if len(figure.data) == 0:
# add mock data
figure.append_trace(Scatter(x=[], y=[], name='', showlegend=False), 1, 1)
from ..gpy_plot.plot_util import in_ipynb
if in_ipynb():
plotly.offline.init_notebook_mode(connected=True)
return plotly.offline.iplot(figure, filename=filename, **kwargs)#self.current_states[hex(id(figure))]['filename'])
else:
return plotly.offline.plot(figure, filename=filename, **kwargs)
| bsd-3-clause |
alvaroing12/CADL | session-5/libs/utils.py | 6 | 21427 | """Utilities used in the Kadenze Academy Course on Deep Learning w/ Tensorflow.
Creative Applications of Deep Learning w/ Tensorflow.
Kadenze, Inc.
Parag K. Mital
Copyright Parag K. Mital, June 2016.
"""
from __future__ import print_function
import matplotlib.pyplot as plt
import tensorflow as tf
import urllib
import numpy as np
import zipfile
import os
from scipy.io import wavfile
from scipy.misc import imsave
def download(path):
"""Use urllib to download a file.
Parameters
----------
path : str
Url to download
Returns
-------
path : str
Location of downloaded file.
"""
import os
from six.moves import urllib
fname = path.split('/')[-1]
if os.path.exists(fname):
return fname
print('Downloading ' + path)
def progress(count, block_size, total_size):
if count % 20 == 0:
print('Downloaded %02.02f/%02.02f MB' % (
count * block_size / 1024.0 / 1024.0,
total_size / 1024.0 / 1024.0), end='\r')
filepath, _ = urllib.request.urlretrieve(
path, filename=fname, reporthook=progress)
return filepath
def download_and_extract_tar(path, dst):
"""Download and extract a tar file.
Parameters
----------
path : str
Url to tar file to download.
dst : str
Location to save tar file contents.
"""
import tarfile
filepath = download(path)
if not os.path.exists(dst):
os.makedirs(dst)
tarfile.open(filepath, 'r:gz').extractall(dst)
def download_and_extract_zip(path, dst):
"""Download and extract a zip file.
Parameters
----------
path : str
Url to zip file to download.
dst : str
Location to save zip file contents.
"""
import zipfile
filepath = download(path)
if not os.path.exists(dst):
os.makedirs(dst)
zf = zipfile.ZipFile(file=filepath)
zf.extractall(dst)
def load_audio(filename, b_normalize=True):
"""Load the audiofile at the provided filename using scipy.io.wavfile.
Optionally normalizes the audio to the maximum value.
Parameters
----------
filename : str
File to load.
b_normalize : bool, optional
Normalize to the maximum value.
"""
sr, s = wavfile.read(filename)
if b_normalize:
s = s.astype(np.float32)
s = (s / np.max(np.abs(s)))
s -= np.mean(s)
return s
def corrupt(x):
"""Take an input tensor and add uniform masking.
Parameters
----------
x : Tensor/Placeholder
Input to corrupt.
Returns
-------
x_corrupted : Tensor
50 pct of values corrupted.
"""
return tf.multiply(x, tf.cast(tf.random_uniform(shape=tf.shape(x),
minval=0,
maxval=2,
dtype=tf.int32), tf.float32))
def interp(l, r, n_samples):
"""Intepolate between the arrays l and r, n_samples times.
Parameters
----------
l : np.ndarray
Left edge
r : np.ndarray
Right edge
n_samples : int
Number of samples
Returns
-------
arr : np.ndarray
Inteporalted array
"""
return np.array([
l + step_i / (n_samples - 1) * (r - l)
for step_i in range(n_samples)])
def make_latent_manifold(corners, n_samples):
"""Create a 2d manifold out of the provided corners: n_samples * n_samples.
Parameters
----------
corners : list of np.ndarray
The four corners to intepolate.
n_samples : int
Number of samples to use in interpolation.
Returns
-------
arr : np.ndarray
Stacked array of all 2D interpolated samples
"""
left = interp(corners[0], corners[1], n_samples)
right = interp(corners[2], corners[3], n_samples)
embedding = []
for row_i in range(n_samples):
embedding.append(interp(left[row_i], right[row_i], n_samples))
return np.vstack(embedding)
def imcrop_tosquare(img):
"""Make any image a square image.
Parameters
----------
img : np.ndarray
Input image to crop, assumed at least 2d.
Returns
-------
crop : np.ndarray
Cropped image.
"""
size = np.min(img.shape[:2])
extra = img.shape[:2] - size
crop = img
for i in np.flatnonzero(extra):
crop = np.take(crop, extra[i] // 2 + np.r_[:size], axis=i)
return crop
def slice_montage(montage, img_h, img_w, n_imgs):
"""Slice a montage image into n_img h x w images.
Performs the opposite of the montage function. Takes a montage image and
slices it back into a N x H x W x C image.
Parameters
----------
montage : np.ndarray
Montage image to slice.
img_h : int
Height of sliced image
img_w : int
Width of sliced image
n_imgs : int
Number of images to slice
Returns
-------
sliced : np.ndarray
Sliced images as 4d array.
"""
sliced_ds = []
for i in range(int(np.sqrt(n_imgs))):
for j in range(int(np.sqrt(n_imgs))):
sliced_ds.append(montage[
1 + i + i * img_h:1 + i + (i + 1) * img_h,
1 + j + j * img_w:1 + j + (j + 1) * img_w])
return np.array(sliced_ds)
def montage(images, saveto='montage.png'):
"""Draw all images as a montage separated by 1 pixel borders.
Also saves the file to the destination specified by `saveto`.
Parameters
----------
images : numpy.ndarray
Input array to create montage of. Array should be:
batch x height x width x channels.
saveto : str
Location to save the resulting montage image.
Returns
-------
m : numpy.ndarray
Montage image.
"""
if isinstance(images, list):
images = np.array(images)
img_h = images.shape[1]
img_w = images.shape[2]
n_plots = int(np.ceil(np.sqrt(images.shape[0])))
if len(images.shape) == 4 and images.shape[3] == 3:
m = np.ones(
(images.shape[1] * n_plots + n_plots + 1,
images.shape[2] * n_plots + n_plots + 1, 3)) * 0.5
elif len(images.shape) == 4 and images.shape[3] == 1:
m = np.ones(
(images.shape[1] * n_plots + n_plots + 1,
images.shape[2] * n_plots + n_plots + 1, 1)) * 0.5
elif len(images.shape) == 3:
m = np.ones(
(images.shape[1] * n_plots + n_plots + 1,
images.shape[2] * n_plots + n_plots + 1)) * 0.5
else:
raise ValueError('Could not parse image shape of {}'.format(
images.shape))
for i in range(n_plots):
for j in range(n_plots):
this_filter = i * n_plots + j
if this_filter < images.shape[0]:
this_img = images[this_filter]
m[1 + i + i * img_h:1 + i + (i + 1) * img_h,
1 + j + j * img_w:1 + j + (j + 1) * img_w] = this_img
imsave(arr=np.squeeze(m), name=saveto)
return m
def montage_filters(W):
"""Draws all filters (n_input * n_output filters) as a
montage image separated by 1 pixel borders.
Parameters
----------
W : Tensor
Input tensor to create montage of.
Returns
-------
m : numpy.ndarray
Montage image.
"""
W = np.reshape(W, [W.shape[0], W.shape[1], 1, W.shape[2] * W.shape[3]])
n_plots = int(np.ceil(np.sqrt(W.shape[-1])))
m = np.ones(
(W.shape[0] * n_plots + n_plots + 1,
W.shape[1] * n_plots + n_plots + 1)) * 0.5
for i in range(n_plots):
for j in range(n_plots):
this_filter = i * n_plots + j
if this_filter < W.shape[-1]:
m[1 + i + i * W.shape[0]:1 + i + (i + 1) * W.shape[0],
1 + j + j * W.shape[1]:1 + j + (j + 1) * W.shape[1]] = (
np.squeeze(W[:, :, :, this_filter]))
return m
def get_celeb_files(dst='img_align_celeba', max_images=100):
"""Download the first 100 images of the celeb dataset.
Files will be placed in a directory 'img_align_celeba' if one
doesn't exist.
Returns
-------
files : list of strings
Locations to the first 100 images of the celeb net dataset.
"""
# Create a directory
if not os.path.exists(dst):
os.mkdir(dst)
# Now perform the following 100 times:
for img_i in range(1, max_images + 1):
# create a string using the current loop counter
f = '000%03d.jpg' % img_i
if not os.path.exists(os.path.join(dst, f)):
# and get the url with that string appended the end
url = 'https://s3.amazonaws.com/cadl/celeb-align/' + f
# We'll print this out to the console so we can see how far we've gone
print(url, end='\r')
# And now download the url to a location inside our new directory
urllib.request.urlretrieve(url, os.path.join(dst, f))
files = [os.path.join(dst, file_i)
for file_i in os.listdir(dst)
if '.jpg' in file_i][:max_images]
return files
def get_celeb_imgs(max_images=100):
"""Load the first `max_images` images of the celeb dataset.
Returns
-------
imgs : list of np.ndarray
List of the first 100 images from the celeb dataset
"""
return [plt.imread(f_i) for f_i in get_celeb_files(max_images=max_images)]
def gauss(mean, stddev, ksize):
"""Use Tensorflow to compute a Gaussian Kernel.
Parameters
----------
mean : float
Mean of the Gaussian (e.g. 0.0).
stddev : float
Standard Deviation of the Gaussian (e.g. 1.0).
ksize : int
Size of kernel (e.g. 16).
Returns
-------
kernel : np.ndarray
Computed Gaussian Kernel using Tensorflow.
"""
g = tf.Graph()
with tf.Session(graph=g):
x = tf.linspace(-3.0, 3.0, ksize)
z = (tf.exp(tf.negative(tf.pow(x - mean, 2.0) /
(2.0 * tf.pow(stddev, 2.0)))) *
(1.0 / (stddev * tf.sqrt(2.0 * 3.1415))))
return z.eval()
def gauss2d(mean, stddev, ksize):
"""Use Tensorflow to compute a 2D Gaussian Kernel.
Parameters
----------
mean : float
Mean of the Gaussian (e.g. 0.0).
stddev : float
Standard Deviation of the Gaussian (e.g. 1.0).
ksize : int
Size of kernel (e.g. 16).
Returns
-------
kernel : np.ndarray
Computed 2D Gaussian Kernel using Tensorflow.
"""
z = gauss(mean, stddev, ksize)
g = tf.Graph()
with tf.Session(graph=g):
z_2d = tf.matmul(tf.reshape(z, [ksize, 1]), tf.reshape(z, [1, ksize]))
return z_2d.eval()
def convolve(img, kernel):
"""Use Tensorflow to convolve a 4D image with a 4D kernel.
Parameters
----------
img : np.ndarray
4-dimensional image shaped N x H x W x C
kernel : np.ndarray
4-dimensional image shape K_H, K_W, C_I, C_O corresponding to the
kernel's height and width, the number of input channels, and the
number of output channels. Note that C_I should = C.
Returns
-------
result : np.ndarray
Convolved result.
"""
g = tf.Graph()
with tf.Session(graph=g):
convolved = tf.nn.conv2d(img, kernel, strides=[1, 1, 1, 1], padding='SAME')
res = convolved.eval()
return res
def gabor(ksize=32):
"""Use Tensorflow to compute a 2D Gabor Kernel.
Parameters
----------
ksize : int, optional
Size of kernel.
Returns
-------
gabor : np.ndarray
Gabor kernel with ksize x ksize dimensions.
"""
g = tf.Graph()
with tf.Session(graph=g):
z_2d = gauss2d(0.0, 1.0, ksize)
ones = tf.ones((1, ksize))
ys = tf.sin(tf.linspace(-3.0, 3.0, ksize))
ys = tf.reshape(ys, [ksize, 1])
wave = tf.matmul(ys, ones)
gabor = tf.multiply(wave, z_2d)
return gabor.eval()
def build_submission(filename, file_list, optional_file_list=()):
"""Helper utility to check homework assignment submissions and package them.
Parameters
----------
filename : str
Output zip file name
file_list : tuple
Tuple of files to include
"""
# check each file exists
for part_i, file_i in enumerate(file_list):
if not os.path.exists(file_i):
print('\nYou are missing the file {}. '.format(file_i) +
'It does not look like you have completed Part {}.'.format(
part_i + 1))
def zipdir(path, zf):
for root, dirs, files in os.walk(path):
for file in files:
# make sure the files are part of the necessary file list
if file.endswith(file_list) or file.endswith(optional_file_list):
zf.write(os.path.join(root, file))
# create a zip file with the necessary files
zipf = zipfile.ZipFile(filename, 'w', zipfile.ZIP_DEFLATED)
zipdir('.', zipf)
zipf.close()
print('Your assignment zip file has been created!')
print('Now submit the file:\n{}\nto Kadenze for grading!'.format(
os.path.abspath(filename)))
def normalize(a, s=0.1):
'''Normalize the image range for visualization'''
return np.uint8(np.clip(
(a - a.mean()) / max(a.std(), 1e-4) * s + 0.5,
0, 1) * 255)
# %%
def weight_variable(shape, **kwargs):
'''Helper function to create a weight variable initialized with
a normal distribution
Parameters
----------
shape : list
Size of weight variable
'''
if isinstance(shape, list):
initial = tf.random_normal(tf.stack(shape), mean=0.0, stddev=0.01)
initial.set_shape(shape)
else:
initial = tf.random_normal(shape, mean=0.0, stddev=0.01)
return tf.Variable(initial, **kwargs)
# %%
def bias_variable(shape, **kwargs):
'''Helper function to create a bias variable initialized with
a constant value.
Parameters
----------
shape : list
Size of weight variable
'''
if isinstance(shape, list):
initial = tf.random_normal(tf.stack(shape), mean=0.0, stddev=0.01)
initial.set_shape(shape)
else:
initial = tf.random_normal(shape, mean=0.0, stddev=0.01)
return tf.Variable(initial, **kwargs)
def binary_cross_entropy(z, x, name=None):
"""Binary Cross Entropy measures cross entropy of a binary variable.
loss(x, z) = - sum_i (x[i] * log(z[i]) + (1 - x[i]) * log(1 - z[i]))
Parameters
----------
z : tf.Tensor
A `Tensor` of the same type and shape as `x`.
x : tf.Tensor
A `Tensor` of type `float32` or `float64`.
"""
with tf.variable_scope(name or 'bce'):
eps = 1e-12
return (-(x * tf.log(z + eps) +
(1. - x) * tf.log(1. - z + eps)))
def conv2d(x, n_output,
k_h=5, k_w=5, d_h=2, d_w=2,
padding='SAME', name='conv2d', reuse=None):
"""Helper for creating a 2d convolution operation.
Parameters
----------
x : tf.Tensor
Input tensor to convolve.
n_output : int
Number of filters.
k_h : int, optional
Kernel height
k_w : int, optional
Kernel width
d_h : int, optional
Height stride
d_w : int, optional
Width stride
padding : str, optional
Padding type: "SAME" or "VALID"
name : str, optional
Variable scope
Returns
-------
op : tf.Tensor
Output of convolution
"""
with tf.variable_scope(name or 'conv2d', reuse=reuse):
W = tf.get_variable(
name='W',
shape=[k_h, k_w, x.get_shape()[-1], n_output],
initializer=tf.contrib.layers.xavier_initializer_conv2d())
conv = tf.nn.conv2d(
name='conv',
input=x,
filter=W,
strides=[1, d_h, d_w, 1],
padding=padding)
b = tf.get_variable(
name='b',
shape=[n_output],
initializer=tf.constant_initializer(0.0))
h = tf.nn.bias_add(
name='h',
value=conv,
bias=b)
return h, W
def deconv2d(x, n_output_h, n_output_w, n_output_ch, n_input_ch=None,
k_h=5, k_w=5, d_h=2, d_w=2,
padding='SAME', name='deconv2d', reuse=None):
"""Deconvolution helper.
Parameters
----------
x : tf.Tensor
Input tensor to convolve.
n_output_h : int
Height of output
n_output_w : int
Width of output
n_output_ch : int
Number of filters.
k_h : int, optional
Kernel height
k_w : int, optional
Kernel width
d_h : int, optional
Height stride
d_w : int, optional
Width stride
padding : str, optional
Padding type: "SAME" or "VALID"
name : str, optional
Variable scope
Returns
-------
op : tf.Tensor
Output of deconvolution
"""
with tf.variable_scope(name or 'deconv2d', reuse=reuse):
W = tf.get_variable(
name='W',
shape=[k_h, k_w, n_output_ch, n_input_ch or x.get_shape()[-1]],
initializer=tf.contrib.layers.xavier_initializer_conv2d())
conv = tf.nn.conv2d_transpose(
name='conv_t',
value=x,
filter=W,
output_shape=tf.stack(
[tf.shape(x)[0], n_output_h, n_output_w, n_output_ch]),
strides=[1, d_h, d_w, 1],
padding=padding)
conv.set_shape([None, n_output_h, n_output_w, n_output_ch])
b = tf.get_variable(
name='b',
shape=[n_output_ch],
initializer=tf.constant_initializer(0.0))
h = tf.nn.bias_add(name='h', value=conv, bias=b)
return h, W
def lrelu(features, leak=0.2):
"""Leaky rectifier.
Parameters
----------
features : tf.Tensor
Input to apply leaky rectifier to.
leak : float, optional
Percentage of leak.
Returns
-------
op : tf.Tensor
Resulting output of applying leaky rectifier activation.
"""
f1 = 0.5 * (1 + leak)
f2 = 0.5 * (1 - leak)
return f1 * features + f2 * abs(features)
def linear(x, n_output, name=None, activation=None, reuse=None):
"""Fully connected layer.
Parameters
----------
x : tf.Tensor
Input tensor to connect
n_output : int
Number of output neurons
name : None, optional
Scope to apply
Returns
-------
h, W : tf.Tensor, tf.Tensor
Output of fully connected layer and the weight matrix
"""
if len(x.get_shape()) != 2:
x = flatten(x, reuse=reuse)
n_input = x.get_shape().as_list()[1]
with tf.variable_scope(name or "fc", reuse=reuse):
W = tf.get_variable(
name='W',
shape=[n_input, n_output],
dtype=tf.float32,
initializer=tf.contrib.layers.xavier_initializer())
b = tf.get_variable(
name='b',
shape=[n_output],
dtype=tf.float32,
initializer=tf.constant_initializer(0.0))
h = tf.nn.bias_add(
name='h',
value=tf.matmul(x, W),
bias=b)
if activation:
h = activation(h)
return h, W
def flatten(x, name=None, reuse=None):
"""Flatten Tensor to 2-dimensions.
Parameters
----------
x : tf.Tensor
Input tensor to flatten.
name : None, optional
Variable scope for flatten operations
Returns
-------
flattened : tf.Tensor
Flattened tensor.
"""
with tf.variable_scope('flatten'):
dims = x.get_shape().as_list()
if len(dims) == 4:
flattened = tf.reshape(
x,
shape=[-1, dims[1] * dims[2] * dims[3]])
elif len(dims) == 2 or len(dims) == 1:
flattened = x
else:
raise ValueError('Expected n dimensions of 1, 2 or 4. Found:',
len(dims))
return flattened
def to_tensor(x):
"""Convert 2 dim Tensor to a 4 dim Tensor ready for convolution.
Performs the opposite of flatten(x). If the tensor is already 4-D, this
returns the same as the input, leaving it unchanged.
Parameters
----------
x : tf.Tesnor
Input 2-D tensor. If 4-D already, left unchanged.
Returns
-------
x : tf.Tensor
4-D representation of the input.
Raises
------
ValueError
If the tensor is not 2D or already 4D.
"""
if len(x.get_shape()) == 2:
n_input = x.get_shape().as_list()[1]
x_dim = np.sqrt(n_input)
if x_dim == int(x_dim):
x_dim = int(x_dim)
x_tensor = tf.reshape(
x, [-1, x_dim, x_dim, 1], name='reshape')
elif np.sqrt(n_input / 3) == int(np.sqrt(n_input / 3)):
x_dim = int(np.sqrt(n_input / 3))
x_tensor = tf.reshape(
x, [-1, x_dim, x_dim, 3], name='reshape')
else:
x_tensor = tf.reshape(
x, [-1, 1, 1, n_input], name='reshape')
elif len(x.get_shape()) == 4:
x_tensor = x
else:
raise ValueError('Unsupported input dimensions')
return x_tensor
| apache-2.0 |
andyraib/data-storage | python_scripts/env/lib/python3.6/site-packages/pandas/tests/test_window.py | 7 | 146653 | from itertools import product
import nose
import sys
import warnings
from nose.tools import assert_raises
from datetime import datetime
from numpy.random import randn
import numpy as np
from distutils.version import LooseVersion
import pandas as pd
from pandas import (Series, DataFrame, Panel, bdate_range, isnull,
notnull, concat, Timestamp)
import pandas.stats.moments as mom
import pandas.core.window as rwindow
import pandas.tseries.offsets as offsets
from pandas.core.base import SpecificationError
from pandas.core.common import UnsupportedFunctionCall
import pandas.util.testing as tm
from pandas.compat import range, zip, PY3
N, K = 100, 10
def assert_equal(left, right):
if isinstance(left, Series):
tm.assert_series_equal(left, right)
else:
tm.assert_frame_equal(left, right)
class Base(tm.TestCase):
_multiprocess_can_split_ = True
_nan_locs = np.arange(20, 40)
_inf_locs = np.array([])
def _create_data(self):
arr = randn(N)
arr[self._nan_locs] = np.NaN
self.arr = arr
self.rng = bdate_range(datetime(2009, 1, 1), periods=N)
self.series = Series(arr.copy(), index=self.rng)
self.frame = DataFrame(randn(N, K), index=self.rng,
columns=np.arange(K))
class TestApi(Base):
def setUp(self):
self._create_data()
def test_getitem(self):
r = self.frame.rolling(window=5)
tm.assert_index_equal(r._selected_obj.columns, self.frame.columns)
r = self.frame.rolling(window=5)[1]
self.assertEqual(r._selected_obj.name, self.frame.columns[1])
# technically this is allowed
r = self.frame.rolling(window=5)[1, 3]
tm.assert_index_equal(r._selected_obj.columns,
self.frame.columns[[1, 3]])
r = self.frame.rolling(window=5)[[1, 3]]
tm.assert_index_equal(r._selected_obj.columns,
self.frame.columns[[1, 3]])
def test_select_bad_cols(self):
df = DataFrame([[1, 2]], columns=['A', 'B'])
g = df.rolling(window=5)
self.assertRaises(KeyError, g.__getitem__, ['C']) # g[['C']]
self.assertRaises(KeyError, g.__getitem__, ['A', 'C']) # g[['A', 'C']]
with tm.assertRaisesRegexp(KeyError, '^[^A]+$'):
# A should not be referenced as a bad column...
# will have to rethink regex if you change message!
g[['A', 'C']]
def test_attribute_access(self):
df = DataFrame([[1, 2]], columns=['A', 'B'])
r = df.rolling(window=5)
tm.assert_series_equal(r.A.sum(), r['A'].sum())
self.assertRaises(AttributeError, lambda: r.F)
def tests_skip_nuisance(self):
df = DataFrame({'A': range(5), 'B': range(5, 10), 'C': 'foo'})
r = df.rolling(window=3)
result = r[['A', 'B']].sum()
expected = DataFrame({'A': [np.nan, np.nan, 3, 6, 9],
'B': [np.nan, np.nan, 18, 21, 24]},
columns=list('AB'))
tm.assert_frame_equal(result, expected)
expected = pd.concat([r[['A', 'B']].sum(), df[['C']]], axis=1)
result = r.sum()
tm.assert_frame_equal(result, expected, check_like=True)
def test_agg(self):
df = DataFrame({'A': range(5), 'B': range(0, 10, 2)})
r = df.rolling(window=3)
a_mean = r['A'].mean()
a_std = r['A'].std()
a_sum = r['A'].sum()
b_mean = r['B'].mean()
b_std = r['B'].std()
b_sum = r['B'].sum()
result = r.aggregate([np.mean, np.std])
expected = pd.concat([a_mean, a_std, b_mean, b_std], axis=1)
expected.columns = pd.MultiIndex.from_product([['A', 'B'], ['mean',
'std']])
tm.assert_frame_equal(result, expected)
result = r.aggregate({'A': np.mean, 'B': np.std})
expected = pd.concat([a_mean, b_std], axis=1)
tm.assert_frame_equal(result, expected, check_like=True)
result = r.aggregate({'A': ['mean', 'std']})
expected = pd.concat([a_mean, a_std], axis=1)
expected.columns = pd.MultiIndex.from_tuples([('A', 'mean'), ('A',
'std')])
tm.assert_frame_equal(result, expected)
result = r['A'].aggregate(['mean', 'sum'])
expected = pd.concat([a_mean, a_sum], axis=1)
expected.columns = ['mean', 'sum']
tm.assert_frame_equal(result, expected)
result = r.aggregate({'A': {'mean': 'mean', 'sum': 'sum'}})
expected = pd.concat([a_mean, a_sum], axis=1)
expected.columns = pd.MultiIndex.from_tuples([('A', 'mean'),
('A', 'sum')])
tm.assert_frame_equal(result, expected, check_like=True)
result = r.aggregate({'A': {'mean': 'mean',
'sum': 'sum'},
'B': {'mean2': 'mean',
'sum2': 'sum'}})
expected = pd.concat([a_mean, a_sum, b_mean, b_sum], axis=1)
exp_cols = [('A', 'mean'), ('A', 'sum'), ('B', 'mean2'), ('B', 'sum2')]
expected.columns = pd.MultiIndex.from_tuples(exp_cols)
tm.assert_frame_equal(result, expected, check_like=True)
result = r.aggregate({'A': ['mean', 'std'], 'B': ['mean', 'std']})
expected = pd.concat([a_mean, a_std, b_mean, b_std], axis=1)
exp_cols = [('A', 'mean'), ('A', 'std'), ('B', 'mean'), ('B', 'std')]
expected.columns = pd.MultiIndex.from_tuples(exp_cols)
tm.assert_frame_equal(result, expected, check_like=True)
# passed lambda
result = r.agg({'A': np.sum, 'B': lambda x: np.std(x, ddof=1)})
rcustom = r['B'].apply(lambda x: np.std(x, ddof=1))
expected = pd.concat([a_sum, rcustom], axis=1)
tm.assert_frame_equal(result, expected, check_like=True)
def test_agg_consistency(self):
df = DataFrame({'A': range(5), 'B': range(0, 10, 2)})
r = df.rolling(window=3)
result = r.agg([np.sum, np.mean]).columns
expected = pd.MultiIndex.from_product([list('AB'), ['sum', 'mean']])
tm.assert_index_equal(result, expected)
result = r['A'].agg([np.sum, np.mean]).columns
expected = pd.Index(['sum', 'mean'])
tm.assert_index_equal(result, expected)
result = r.agg({'A': [np.sum, np.mean]}).columns
expected = pd.MultiIndex.from_tuples([('A', 'sum'), ('A', 'mean')])
tm.assert_index_equal(result, expected)
def test_agg_nested_dicts(self):
# API change for disallowing these types of nested dicts
df = DataFrame({'A': range(5), 'B': range(0, 10, 2)})
r = df.rolling(window=3)
def f():
r.aggregate({'r1': {'A': ['mean', 'sum']},
'r2': {'B': ['mean', 'sum']}})
self.assertRaises(SpecificationError, f)
expected = pd.concat([r['A'].mean(), r['A'].std(), r['B'].mean(),
r['B'].std()], axis=1)
expected.columns = pd.MultiIndex.from_tuples([('ra', 'mean'), (
'ra', 'std'), ('rb', 'mean'), ('rb', 'std')])
result = r[['A', 'B']].agg({'A': {'ra': ['mean', 'std']},
'B': {'rb': ['mean', 'std']}})
tm.assert_frame_equal(result, expected, check_like=True)
result = r.agg({'A': {'ra': ['mean', 'std']},
'B': {'rb': ['mean', 'std']}})
expected.columns = pd.MultiIndex.from_tuples([('A', 'ra', 'mean'), (
'A', 'ra', 'std'), ('B', 'rb', 'mean'), ('B', 'rb', 'std')])
tm.assert_frame_equal(result, expected, check_like=True)
def test_window_with_args(self):
tm._skip_if_no_scipy()
# make sure that we are aggregating window functions correctly with arg
r = Series(np.random.randn(100)).rolling(window=10, min_periods=1,
win_type='gaussian')
expected = pd.concat([r.mean(std=10), r.mean(std=.01)], axis=1)
expected.columns = ['<lambda>', '<lambda>']
result = r.aggregate([lambda x: x.mean(std=10),
lambda x: x.mean(std=.01)])
tm.assert_frame_equal(result, expected)
def a(x):
return x.mean(std=10)
def b(x):
return x.mean(std=0.01)
expected = pd.concat([r.mean(std=10), r.mean(std=.01)], axis=1)
expected.columns = ['a', 'b']
result = r.aggregate([a, b])
tm.assert_frame_equal(result, expected)
def test_preserve_metadata(self):
# GH 10565
s = Series(np.arange(100), name='foo')
s2 = s.rolling(30).sum()
s3 = s.rolling(20).sum()
self.assertEqual(s2.name, 'foo')
self.assertEqual(s3.name, 'foo')
def test_how_compat(self):
# in prior versions, we would allow how to be used in the resample
# now that its deprecated, we need to handle this in the actual
# aggregation functions
s = pd.Series(
np.random.randn(20),
index=pd.date_range('1/1/2000', periods=20, freq='12H'))
for how in ['min', 'max', 'median']:
for op in ['mean', 'sum', 'std', 'var', 'kurt', 'skew']:
for t in ['rolling', 'expanding']:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
dfunc = getattr(pd, "{0}_{1}".format(t, op))
if dfunc is None:
continue
if t == 'rolling':
kwargs = {'window': 5}
else:
kwargs = {}
result = dfunc(s, freq='D', how=how, **kwargs)
expected = getattr(
getattr(s, t)(freq='D', **kwargs), op)(how=how)
tm.assert_series_equal(result, expected)
class TestWindow(Base):
def setUp(self):
self._create_data()
def test_constructor(self):
# GH 12669
tm._skip_if_no_scipy()
for o in [self.series, self.frame]:
c = o.rolling
# valid
c(win_type='boxcar', window=2, min_periods=1)
c(win_type='boxcar', window=2, min_periods=1, center=True)
c(win_type='boxcar', window=2, min_periods=1, center=False)
for wt in ['boxcar', 'triang', 'blackman', 'hamming', 'bartlett',
'bohman', 'blackmanharris', 'nuttall', 'barthann']:
c(win_type=wt, window=2)
# not valid
for w in [2., 'foo', np.array([2])]:
with self.assertRaises(ValueError):
c(win_type='boxcar', window=2, min_periods=w)
with self.assertRaises(ValueError):
c(win_type='boxcar', window=2, min_periods=1, center=w)
for wt in ['foobar', 1]:
with self.assertRaises(ValueError):
c(win_type=wt, window=2)
def test_numpy_compat(self):
# see gh-12811
w = rwindow.Window(Series([2, 4, 6]), window=[0, 2])
msg = "numpy operations are not valid with window objects"
for func in ('sum', 'mean'):
tm.assertRaisesRegexp(UnsupportedFunctionCall, msg,
getattr(w, func), 1, 2, 3)
tm.assertRaisesRegexp(UnsupportedFunctionCall, msg,
getattr(w, func), dtype=np.float64)
class TestRolling(Base):
def setUp(self):
self._create_data()
def test_doc_string(self):
df = DataFrame({'B': [0, 1, 2, np.nan, 4]})
df
df.rolling(2).sum()
df.rolling(2, min_periods=1).sum()
def test_constructor(self):
# GH 12669
for o in [self.series, self.frame]:
c = o.rolling
# valid
c(window=2)
c(window=2, min_periods=1)
c(window=2, min_periods=1, center=True)
c(window=2, min_periods=1, center=False)
# GH 13383
c(0)
with self.assertRaises(ValueError):
c(-1)
# not valid
for w in [2., 'foo', np.array([2])]:
with self.assertRaises(ValueError):
c(window=w)
with self.assertRaises(ValueError):
c(window=2, min_periods=w)
with self.assertRaises(ValueError):
c(window=2, min_periods=1, center=w)
def test_constructor_with_win_type(self):
# GH 13383
tm._skip_if_no_scipy()
for o in [self.series, self.frame]:
c = o.rolling
c(0, win_type='boxcar')
with self.assertRaises(ValueError):
c(-1, win_type='boxcar')
def test_numpy_compat(self):
# see gh-12811
r = rwindow.Rolling(Series([2, 4, 6]), window=2)
msg = "numpy operations are not valid with window objects"
for func in ('std', 'mean', 'sum', 'max', 'min', 'var'):
tm.assertRaisesRegexp(UnsupportedFunctionCall, msg,
getattr(r, func), 1, 2, 3)
tm.assertRaisesRegexp(UnsupportedFunctionCall, msg,
getattr(r, func), dtype=np.float64)
class TestExpanding(Base):
def setUp(self):
self._create_data()
def test_doc_string(self):
df = DataFrame({'B': [0, 1, 2, np.nan, 4]})
df
df.expanding(2).sum()
def test_constructor(self):
# GH 12669
for o in [self.series, self.frame]:
c = o.expanding
# valid
c(min_periods=1)
c(min_periods=1, center=True)
c(min_periods=1, center=False)
# not valid
for w in [2., 'foo', np.array([2])]:
with self.assertRaises(ValueError):
c(min_periods=w)
with self.assertRaises(ValueError):
c(min_periods=1, center=w)
def test_numpy_compat(self):
# see gh-12811
e = rwindow.Expanding(Series([2, 4, 6]), window=2)
msg = "numpy operations are not valid with window objects"
for func in ('std', 'mean', 'sum', 'max', 'min', 'var'):
tm.assertRaisesRegexp(UnsupportedFunctionCall, msg,
getattr(e, func), 1, 2, 3)
tm.assertRaisesRegexp(UnsupportedFunctionCall, msg,
getattr(e, func), dtype=np.float64)
class TestEWM(Base):
def setUp(self):
self._create_data()
def test_doc_string(self):
df = DataFrame({'B': [0, 1, 2, np.nan, 4]})
df
df.ewm(com=0.5).mean()
def test_constructor(self):
for o in [self.series, self.frame]:
c = o.ewm
# valid
c(com=0.5)
c(span=1.5)
c(alpha=0.5)
c(halflife=0.75)
c(com=0.5, span=None)
c(alpha=0.5, com=None)
c(halflife=0.75, alpha=None)
# not valid: mutually exclusive
with self.assertRaises(ValueError):
c(com=0.5, alpha=0.5)
with self.assertRaises(ValueError):
c(span=1.5, halflife=0.75)
with self.assertRaises(ValueError):
c(alpha=0.5, span=1.5)
# not valid: com < 0
with self.assertRaises(ValueError):
c(com=-0.5)
# not valid: span < 1
with self.assertRaises(ValueError):
c(span=0.5)
# not valid: halflife <= 0
with self.assertRaises(ValueError):
c(halflife=0)
# not valid: alpha <= 0 or alpha > 1
for alpha in (-0.5, 1.5):
with self.assertRaises(ValueError):
c(alpha=alpha)
def test_numpy_compat(self):
# see gh-12811
e = rwindow.EWM(Series([2, 4, 6]), alpha=0.5)
msg = "numpy operations are not valid with window objects"
for func in ('std', 'mean', 'var'):
tm.assertRaisesRegexp(UnsupportedFunctionCall, msg,
getattr(e, func), 1, 2, 3)
tm.assertRaisesRegexp(UnsupportedFunctionCall, msg,
getattr(e, func), dtype=np.float64)
class TestDeprecations(Base):
""" test that we are catching deprecation warnings """
def setUp(self):
self._create_data()
def test_deprecations(self):
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
mom.rolling_mean(np.ones(10), 3, center=True, axis=0)
mom.rolling_mean(Series(np.ones(10)), 3, center=True, axis=0)
# GH #12373 : rolling functions error on float32 data
# make sure rolling functions works for different dtypes
#
# NOTE that these are yielded tests and so _create_data is
# explicity called, nor do these inherit from unittest.TestCase
#
# further note that we are only checking rolling for fully dtype
# compliance (though both expanding and ewm inherit)
class Dtype(object):
window = 2
funcs = {
'count': lambda v: v.count(),
'max': lambda v: v.max(),
'min': lambda v: v.min(),
'sum': lambda v: v.sum(),
'mean': lambda v: v.mean(),
'std': lambda v: v.std(),
'var': lambda v: v.var(),
'median': lambda v: v.median()
}
def get_expects(self):
expects = {
'sr1': {
'count': Series([1, 2, 2, 2, 2], dtype='float64'),
'max': Series([np.nan, 1, 2, 3, 4], dtype='float64'),
'min': Series([np.nan, 0, 1, 2, 3], dtype='float64'),
'sum': Series([np.nan, 1, 3, 5, 7], dtype='float64'),
'mean': Series([np.nan, .5, 1.5, 2.5, 3.5], dtype='float64'),
'std': Series([np.nan] + [np.sqrt(.5)] * 4, dtype='float64'),
'var': Series([np.nan, .5, .5, .5, .5], dtype='float64'),
'median': Series([np.nan, .5, 1.5, 2.5, 3.5], dtype='float64')
},
'sr2': {
'count': Series([1, 2, 2, 2, 2], dtype='float64'),
'max': Series([np.nan, 10, 8, 6, 4], dtype='float64'),
'min': Series([np.nan, 8, 6, 4, 2], dtype='float64'),
'sum': Series([np.nan, 18, 14, 10, 6], dtype='float64'),
'mean': Series([np.nan, 9, 7, 5, 3], dtype='float64'),
'std': Series([np.nan] + [np.sqrt(2)] * 4, dtype='float64'),
'var': Series([np.nan, 2, 2, 2, 2], dtype='float64'),
'median': Series([np.nan, 9, 7, 5, 3], dtype='float64')
},
'df': {
'count': DataFrame({0: Series([1, 2, 2, 2, 2]),
1: Series([1, 2, 2, 2, 2])},
dtype='float64'),
'max': DataFrame({0: Series([np.nan, 2, 4, 6, 8]),
1: Series([np.nan, 3, 5, 7, 9])},
dtype='float64'),
'min': DataFrame({0: Series([np.nan, 0, 2, 4, 6]),
1: Series([np.nan, 1, 3, 5, 7])},
dtype='float64'),
'sum': DataFrame({0: Series([np.nan, 2, 6, 10, 14]),
1: Series([np.nan, 4, 8, 12, 16])},
dtype='float64'),
'mean': DataFrame({0: Series([np.nan, 1, 3, 5, 7]),
1: Series([np.nan, 2, 4, 6, 8])},
dtype='float64'),
'std': DataFrame({0: Series([np.nan] + [np.sqrt(2)] * 4),
1: Series([np.nan] + [np.sqrt(2)] * 4)},
dtype='float64'),
'var': DataFrame({0: Series([np.nan, 2, 2, 2, 2]),
1: Series([np.nan, 2, 2, 2, 2])},
dtype='float64'),
'median': DataFrame({0: Series([np.nan, 1, 3, 5, 7]),
1: Series([np.nan, 2, 4, 6, 8])},
dtype='float64'),
}
}
return expects
def _create_dtype_data(self, dtype):
sr1 = Series(range(5), dtype=dtype)
sr2 = Series(range(10, 0, -2), dtype=dtype)
df = DataFrame(np.arange(10).reshape((5, 2)), dtype=dtype)
data = {
'sr1': sr1,
'sr2': sr2,
'df': df
}
return data
def _create_data(self):
self.data = self._create_dtype_data(self.dtype)
self.expects = self.get_expects()
def test_dtypes(self):
self._create_data()
for f_name, d_name in product(self.funcs.keys(), self.data.keys()):
f = self.funcs[f_name]
d = self.data[d_name]
exp = self.expects[d_name][f_name]
yield self.check_dtypes, f, f_name, d, d_name, exp
def check_dtypes(self, f, f_name, d, d_name, exp):
roll = d.rolling(window=self.window)
result = f(roll)
tm.assert_almost_equal(result, exp)
class TestDtype_object(Dtype):
dtype = object
class Dtype_integer(Dtype):
pass
class TestDtype_int8(Dtype_integer):
dtype = np.int8
class TestDtype_int16(Dtype_integer):
dtype = np.int16
class TestDtype_int32(Dtype_integer):
dtype = np.int32
class TestDtype_int64(Dtype_integer):
dtype = np.int64
class Dtype_uinteger(Dtype):
pass
class TestDtype_uint8(Dtype_uinteger):
dtype = np.uint8
class TestDtype_uint16(Dtype_uinteger):
dtype = np.uint16
class TestDtype_uint32(Dtype_uinteger):
dtype = np.uint32
class TestDtype_uint64(Dtype_uinteger):
dtype = np.uint64
class Dtype_float(Dtype):
pass
class TestDtype_float16(Dtype_float):
dtype = np.float16
class TestDtype_float32(Dtype_float):
dtype = np.float32
class TestDtype_float64(Dtype_float):
dtype = np.float64
class TestDtype_category(Dtype):
dtype = 'category'
include_df = False
def _create_dtype_data(self, dtype):
sr1 = Series(range(5), dtype=dtype)
sr2 = Series(range(10, 0, -2), dtype=dtype)
data = {
'sr1': sr1,
'sr2': sr2
}
return data
class DatetimeLike(Dtype):
def check_dtypes(self, f, f_name, d, d_name, exp):
roll = d.rolling(window=self.window)
if f_name == 'count':
result = f(roll)
tm.assert_almost_equal(result, exp)
else:
# other methods not Implemented ATM
assert_raises(NotImplementedError, f, roll)
class TestDtype_timedelta(DatetimeLike):
dtype = np.dtype('m8[ns]')
class TestDtype_datetime(DatetimeLike):
dtype = np.dtype('M8[ns]')
class TestDtype_datetime64UTC(DatetimeLike):
dtype = 'datetime64[ns, UTC]'
def _create_data(self):
raise nose.SkipTest("direct creation of extension dtype "
"datetime64[ns, UTC] is not supported ATM")
class TestMoments(Base):
def setUp(self):
self._create_data()
def test_centered_axis_validation(self):
# ok
Series(np.ones(10)).rolling(window=3, center=True, axis=0).mean()
# bad axis
with self.assertRaises(ValueError):
Series(np.ones(10)).rolling(window=3, center=True, axis=1).mean()
# ok ok
DataFrame(np.ones((10, 10))).rolling(window=3, center=True,
axis=0).mean()
DataFrame(np.ones((10, 10))).rolling(window=3, center=True,
axis=1).mean()
# bad axis
with self.assertRaises(ValueError):
(DataFrame(np.ones((10, 10)))
.rolling(window=3, center=True, axis=2).mean())
def test_rolling_sum(self):
self._check_moment_func(mom.rolling_sum, np.sum, name='sum')
def test_rolling_count(self):
counter = lambda x: np.isfinite(x).astype(float).sum()
self._check_moment_func(mom.rolling_count, counter, name='count',
has_min_periods=False, preserve_nan=False,
fill_value=0)
def test_rolling_mean(self):
self._check_moment_func(mom.rolling_mean, np.mean, name='mean')
def test_cmov_mean(self):
# GH 8238
tm._skip_if_no_scipy()
vals = np.array([6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, 9.48,
10.63, 14.48])
xp = np.array([np.nan, np.nan, 9.962, 11.27, 11.564, 12.516, 12.818,
12.952, np.nan, np.nan])
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
rs = mom.rolling_mean(vals, 5, center=True)
tm.assert_almost_equal(xp, rs)
xp = Series(rs)
rs = Series(vals).rolling(5, center=True).mean()
tm.assert_series_equal(xp, rs)
def test_cmov_window(self):
# GH 8238
tm._skip_if_no_scipy()
vals = np.array([6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, 9.48,
10.63, 14.48])
xp = np.array([np.nan, np.nan, 9.962, 11.27, 11.564, 12.516, 12.818,
12.952, np.nan, np.nan])
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
rs = mom.rolling_window(vals, 5, 'boxcar', center=True)
tm.assert_almost_equal(xp, rs)
xp = Series(rs)
rs = Series(vals).rolling(5, win_type='boxcar', center=True).mean()
tm.assert_series_equal(xp, rs)
def test_cmov_window_corner(self):
# GH 8238
tm._skip_if_no_scipy()
# all nan
vals = np.empty(10, dtype=float)
vals.fill(np.nan)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
rs = mom.rolling_window(vals, 5, 'boxcar', center=True)
self.assertTrue(np.isnan(rs).all())
# empty
vals = np.array([])
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
rs = mom.rolling_window(vals, 5, 'boxcar', center=True)
self.assertEqual(len(rs), 0)
# shorter than window
vals = np.random.randn(5)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
rs = mom.rolling_window(vals, 10, 'boxcar')
self.assertTrue(np.isnan(rs).all())
self.assertEqual(len(rs), 5)
def test_cmov_window_frame(self):
# Gh 8238
tm._skip_if_no_scipy()
vals = np.array([[12.18, 3.64], [10.18, 9.16], [13.24, 14.61],
[4.51, 8.11], [6.15, 11.44], [9.14, 6.21],
[11.31, 10.67], [2.94, 6.51], [9.42, 8.39], [12.44,
7.34]])
xp = np.array([[np.nan, np.nan], [np.nan, np.nan], [9.252, 9.392],
[8.644, 9.906], [8.87, 10.208], [6.81, 8.588],
[7.792, 8.644], [9.05, 7.824], [np.nan, np.nan
], [np.nan, np.nan]])
# DataFrame
rs = DataFrame(vals).rolling(5, win_type='boxcar', center=True).mean()
tm.assert_frame_equal(DataFrame(xp), rs)
# invalid method
with self.assertRaises(AttributeError):
(DataFrame(vals).rolling(5, win_type='boxcar', center=True)
.std())
# sum
xp = np.array([[np.nan, np.nan], [np.nan, np.nan], [46.26, 46.96],
[43.22, 49.53], [44.35, 51.04], [34.05, 42.94],
[38.96, 43.22], [45.25, 39.12], [np.nan, np.nan
], [np.nan, np.nan]])
rs = DataFrame(vals).rolling(5, win_type='boxcar', center=True).sum()
tm.assert_frame_equal(DataFrame(xp), rs)
def test_cmov_window_na_min_periods(self):
tm._skip_if_no_scipy()
# min_periods
vals = Series(np.random.randn(10))
vals[4] = np.nan
vals[8] = np.nan
xp = vals.rolling(5, min_periods=4, center=True).mean()
rs = vals.rolling(5, win_type='boxcar', min_periods=4,
center=True).mean()
tm.assert_series_equal(xp, rs)
def test_cmov_window_regular(self):
# GH 8238
tm._skip_if_no_scipy()
win_types = ['triang', 'blackman', 'hamming', 'bartlett', 'bohman',
'blackmanharris', 'nuttall', 'barthann']
vals = np.array([6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, 9.48,
10.63, 14.48])
xps = {
'hamming': [np.nan, np.nan, 8.71384, 9.56348, 12.38009, 14.03687,
13.8567, 11.81473, np.nan, np.nan],
'triang': [np.nan, np.nan, 9.28667, 10.34667, 12.00556, 13.33889,
13.38, 12.33667, np.nan, np.nan],
'barthann': [np.nan, np.nan, 8.4425, 9.1925, 12.5575, 14.3675,
14.0825, 11.5675, np.nan, np.nan],
'bohman': [np.nan, np.nan, 7.61599, 9.1764, 12.83559, 14.17267,
14.65923, 11.10401, np.nan, np.nan],
'blackmanharris': [np.nan, np.nan, 6.97691, 9.16438, 13.05052,
14.02156, 15.10512, 10.74574, np.nan, np.nan],
'nuttall': [np.nan, np.nan, 7.04618, 9.16786, 13.02671, 14.03559,
15.05657, 10.78514, np.nan, np.nan],
'blackman': [np.nan, np.nan, 7.73345, 9.17869, 12.79607, 14.20036,
14.57726, 11.16988, np.nan, np.nan],
'bartlett': [np.nan, np.nan, 8.4425, 9.1925, 12.5575, 14.3675,
14.0825, 11.5675, np.nan, np.nan]
}
for wt in win_types:
xp = Series(xps[wt])
rs = Series(vals).rolling(5, win_type=wt, center=True).mean()
tm.assert_series_equal(xp, rs)
def test_cmov_window_regular_linear_range(self):
# GH 8238
tm._skip_if_no_scipy()
win_types = ['triang', 'blackman', 'hamming', 'bartlett', 'bohman',
'blackmanharris', 'nuttall', 'barthann']
vals = np.array(range(10), dtype=np.float)
xp = vals.copy()
xp[:2] = np.nan
xp[-2:] = np.nan
xp = Series(xp)
for wt in win_types:
rs = Series(vals).rolling(5, win_type=wt, center=True).mean()
tm.assert_series_equal(xp, rs)
def test_cmov_window_regular_missing_data(self):
# GH 8238
tm._skip_if_no_scipy()
win_types = ['triang', 'blackman', 'hamming', 'bartlett', 'bohman',
'blackmanharris', 'nuttall', 'barthann']
vals = np.array([6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, np.nan,
10.63, 14.48])
xps = {
'bartlett': [np.nan, np.nan, 9.70333, 10.5225, 8.4425, 9.1925,
12.5575, 14.3675, 15.61667, 13.655],
'blackman': [np.nan, np.nan, 9.04582, 11.41536, 7.73345, 9.17869,
12.79607, 14.20036, 15.8706, 13.655],
'barthann': [np.nan, np.nan, 9.70333, 10.5225, 8.4425, 9.1925,
12.5575, 14.3675, 15.61667, 13.655],
'bohman': [np.nan, np.nan, 8.9444, 11.56327, 7.61599, 9.1764,
12.83559, 14.17267, 15.90976, 13.655],
'hamming': [np.nan, np.nan, 9.59321, 10.29694, 8.71384, 9.56348,
12.38009, 14.20565, 15.24694, 13.69758],
'nuttall': [np.nan, np.nan, 8.47693, 12.2821, 7.04618, 9.16786,
13.02671, 14.03673, 16.08759, 13.65553],
'triang': [np.nan, np.nan, 9.33167, 9.76125, 9.28667, 10.34667,
12.00556, 13.82125, 14.49429, 13.765],
'blackmanharris': [np.nan, np.nan, 8.42526, 12.36824, 6.97691,
9.16438, 13.05052, 14.02175, 16.1098, 13.65509]
}
for wt in win_types:
xp = Series(xps[wt])
rs = Series(vals).rolling(5, win_type=wt, min_periods=3).mean()
tm.assert_series_equal(xp, rs)
def test_cmov_window_special(self):
# GH 8238
tm._skip_if_no_scipy()
win_types = ['kaiser', 'gaussian', 'general_gaussian', 'slepian']
kwds = [{'beta': 1.}, {'std': 1.}, {'power': 2.,
'width': 2.}, {'width': 0.5}]
vals = np.array([6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, 9.48,
10.63, 14.48])
xps = {
'gaussian': [np.nan, np.nan, 8.97297, 9.76077, 12.24763, 13.89053,
13.65671, 12.01002, np.nan, np.nan],
'general_gaussian': [np.nan, np.nan, 9.85011, 10.71589, 11.73161,
13.08516, 12.95111, 12.74577, np.nan, np.nan],
'slepian': [np.nan, np.nan, 9.81073, 10.89359, 11.70284, 12.88331,
12.96079, 12.77008, np.nan, np.nan],
'kaiser': [np.nan, np.nan, 9.86851, 11.02969, 11.65161, 12.75129,
12.90702, 12.83757, np.nan, np.nan]
}
for wt, k in zip(win_types, kwds):
xp = Series(xps[wt])
rs = Series(vals).rolling(5, win_type=wt, center=True).mean(**k)
tm.assert_series_equal(xp, rs)
def test_cmov_window_special_linear_range(self):
# GH 8238
tm._skip_if_no_scipy()
win_types = ['kaiser', 'gaussian', 'general_gaussian', 'slepian']
kwds = [{'beta': 1.}, {'std': 1.}, {'power': 2.,
'width': 2.}, {'width': 0.5}]
vals = np.array(range(10), dtype=np.float)
xp = vals.copy()
xp[:2] = np.nan
xp[-2:] = np.nan
xp = Series(xp)
for wt, k in zip(win_types, kwds):
rs = Series(vals).rolling(5, win_type=wt, center=True).mean(**k)
tm.assert_series_equal(xp, rs)
def test_rolling_median(self):
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
self._check_moment_func(mom.rolling_median, np.median,
name='median')
def test_rolling_min(self):
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
self._check_moment_func(mom.rolling_min, np.min, name='min')
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
a = np.array([1, 2, 3, 4, 5])
b = mom.rolling_min(a, window=100, min_periods=1)
tm.assert_almost_equal(b, np.ones(len(a)))
self.assertRaises(ValueError, mom.rolling_min, np.array([1, 2, 3]),
window=3, min_periods=5)
def test_rolling_max(self):
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
self._check_moment_func(mom.rolling_max, np.max, name='max')
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
a = np.array([1, 2, 3, 4, 5], dtype=np.float64)
b = mom.rolling_max(a, window=100, min_periods=1)
tm.assert_almost_equal(a, b)
self.assertRaises(ValueError, mom.rolling_max, np.array([1, 2, 3]),
window=3, min_periods=5)
def test_rolling_quantile(self):
qs = [.1, .5, .9]
def scoreatpercentile(a, per):
values = np.sort(a, axis=0)
idx = per / 1. * (values.shape[0] - 1)
return values[int(idx)]
for q in qs:
def f(x, window, quantile, min_periods=None, freq=None,
center=False):
return mom.rolling_quantile(x, window, quantile,
min_periods=min_periods, freq=freq,
center=center)
def alt(x):
return scoreatpercentile(x, q)
self._check_moment_func(f, alt, name='quantile', quantile=q)
def test_rolling_apply(self):
# suppress warnings about empty slices, as we are deliberately testing
# with a 0-length Series
with warnings.catch_warnings():
warnings.filterwarnings("ignore",
message=".*(empty slice|0 for slice).*",
category=RuntimeWarning)
ser = Series([])
tm.assert_series_equal(ser,
ser.rolling(10).apply(lambda x: x.mean()))
f = lambda x: x[np.isfinite(x)].mean()
def roll_mean(x, window, min_periods=None, freq=None, center=False,
**kwargs):
return mom.rolling_apply(x, window, func=f,
min_periods=min_periods, freq=freq,
center=center)
self._check_moment_func(roll_mean, np.mean, name='apply', func=f)
# GH 8080
s = Series([None, None, None])
result = s.rolling(2, min_periods=0).apply(lambda x: len(x))
expected = Series([1., 2., 2.])
tm.assert_series_equal(result, expected)
result = s.rolling(2, min_periods=0).apply(len)
tm.assert_series_equal(result, expected)
def test_rolling_apply_out_of_bounds(self):
# #1850
arr = np.arange(4)
# it works!
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = mom.rolling_apply(arr, 10, np.sum)
self.assertTrue(isnull(result).all())
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = mom.rolling_apply(arr, 10, np.sum, min_periods=1)
tm.assert_almost_equal(result, result)
def test_rolling_std(self):
self._check_moment_func(mom.rolling_std, lambda x: np.std(x, ddof=1),
name='std')
self._check_moment_func(mom.rolling_std, lambda x: np.std(x, ddof=0),
name='std', ddof=0)
def test_rolling_std_1obs(self):
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = mom.rolling_std(np.array([1., 2., 3., 4., 5.]),
1, min_periods=1)
expected = np.array([np.nan] * 5)
tm.assert_almost_equal(result, expected)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = mom.rolling_std(np.array([1., 2., 3., 4., 5.]),
1, min_periods=1, ddof=0)
expected = np.zeros(5)
tm.assert_almost_equal(result, expected)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = mom.rolling_std(np.array([np.nan, np.nan, 3., 4., 5.]),
3, min_periods=2)
self.assertTrue(np.isnan(result[2]))
def test_rolling_std_neg_sqrt(self):
# unit test from Bottleneck
# Test move_nanstd for neg sqrt.
a = np.array([0.0011448196318903589, 0.00028718669878572767,
0.00028718669878572767, 0.00028718669878572767,
0.00028718669878572767])
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
b = mom.rolling_std(a, window=3)
self.assertTrue(np.isfinite(b[2:]).all())
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
b = mom.ewmstd(a, span=3)
self.assertTrue(np.isfinite(b[2:]).all())
def test_rolling_var(self):
self._check_moment_func(mom.rolling_var, lambda x: np.var(x, ddof=1),
test_stable=True, name='var')
self._check_moment_func(mom.rolling_var, lambda x: np.var(x, ddof=0),
name='var', ddof=0)
def test_rolling_skew(self):
try:
from scipy.stats import skew
except ImportError:
raise nose.SkipTest('no scipy')
self._check_moment_func(mom.rolling_skew,
lambda x: skew(x, bias=False), name='skew')
def test_rolling_kurt(self):
try:
from scipy.stats import kurtosis
except ImportError:
raise nose.SkipTest('no scipy')
self._check_moment_func(mom.rolling_kurt,
lambda x: kurtosis(x, bias=False), name='kurt')
def test_fperr_robustness(self):
# TODO: remove this once python 2.5 out of picture
if PY3:
raise nose.SkipTest("doesn't work on python 3")
# #2114
data = '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x1a@\xaa\xaa\xaa\xaa\xaa\xaa\x02@8\x8e\xe38\x8e\xe3\xe8?z\t\xed%\xb4\x97\xd0?\xa2\x0c<\xdd\x9a\x1f\xb6?\x82\xbb\xfa&y\x7f\x9d?\xac\'\xa7\xc4P\xaa\x83?\x90\xdf\xde\xb0k8j?`\xea\xe9u\xf2zQ?*\xe37\x9d\x98N7?\xe2.\xf5&v\x13\x1f?\xec\xc9\xf8\x19\xa4\xb7\x04?\x90b\xf6w\x85\x9f\xeb>\xb5A\xa4\xfaXj\xd2>F\x02\xdb\xf8\xcb\x8d\xb8>.\xac<\xfb\x87^\xa0>\xe8:\xa6\xf9_\xd3\x85>\xfb?\xe2cUU\xfd?\xfc\x7fA\xed8\x8e\xe3?\xa5\xaa\xac\x91\xf6\x12\xca?n\x1cs\xb6\xf9a\xb1?\xe8%D\xf3L-\x97?5\xddZD\x11\xe7~?#>\xe7\x82\x0b\x9ad?\xd9R4Y\x0fxK?;7x;\nP2?N\xf4JO\xb8j\x18?4\xf81\x8a%G\x00?\x9a\xf5\x97\r2\xb4\xe5>\xcd\x9c\xca\xbcB\xf0\xcc>3\x13\x87(\xd7J\xb3>\x99\x19\xb4\xe0\x1e\xb9\x99>ff\xcd\x95\x14&\x81>\x88\x88\xbc\xc7p\xddf>`\x0b\xa6_\x96|N>@\xb2n\xea\x0eS4>U\x98\x938i\x19\x1b>\x8eeb\xd0\xf0\x10\x02>\xbd\xdc-k\x96\x16\xe8=(\x93\x1e\xf2\x0e\x0f\xd0=\xe0n\xd3Bii\xb5=*\xe9\x19Y\x8c\x8c\x9c=\xc6\xf0\xbb\x90]\x08\x83=]\x96\xfa\xc0|`i=>d\xfc\xd5\xfd\xeaP=R0\xfb\xc7\xa7\x8e6=\xc2\x95\xf9_\x8a\x13\x1e=\xd6c\xa6\xea\x06\r\x04=r\xda\xdd8\t\xbc\xea<\xf6\xe6\x93\xd0\xb0\xd2\xd1<\x9d\xdeok\x96\xc3\xb7<&~\xea9s\xaf\x9f<UUUUUU\x13@q\x1c\xc7q\x1c\xc7\xf9?\xf6\x12\xdaKh/\xe1?\xf2\xc3"e\xe0\xe9\xc6?\xed\xaf\x831+\x8d\xae?\xf3\x1f\xad\xcb\x1c^\x94?\x15\x1e\xdd\xbd>\xb8\x02@\xc6\xd2&\xfd\xa8\xf5\xe8?\xd9\xe1\x19\xfe\xc5\xa3\xd0?v\x82"\xa8\xb2/\xb6?\x9dX\x835\xee\x94\x9d?h\x90W\xce\x9e\xb8\x83?\x8a\xc0th~Kj?\\\x80\xf8\x9a\xa9\x87Q?%\xab\xa0\xce\x8c_7?1\xe4\x80\x13\x11*\x1f? \x98\x00\r\xb6\xc6\x04?\x80u\xabf\x9d\xb3\xeb>UNrD\xbew\xd2>\x1c\x13C[\xa8\x9f\xb8>\x12b\xd7<pj\xa0>m-\x1fQ@\xe3\x85>\xe6\x91)l\x00/m>Da\xc6\xf2\xaatS>\x05\xd7]\xee\xe3\xf09>' # noqa
arr = np.frombuffer(data, dtype='<f8')
if sys.byteorder != "little":
arr = arr.byteswap().newbyteorder()
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = mom.rolling_sum(arr, 2)
self.assertTrue((result[1:] >= 0).all())
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = mom.rolling_mean(arr, 2)
self.assertTrue((result[1:] >= 0).all())
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = mom.rolling_var(arr, 2)
self.assertTrue((result[1:] >= 0).all())
# #2527, ugh
arr = np.array([0.00012456, 0.0003, 0])
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = mom.rolling_mean(arr, 1)
self.assertTrue(result[-1] >= 0)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = mom.rolling_mean(-arr, 1)
self.assertTrue(result[-1] <= 0)
def _check_moment_func(self, f, static_comp, name=None, window=50,
has_min_periods=True, has_center=True,
has_time_rule=True, preserve_nan=True,
fill_value=None, test_stable=False, **kwargs):
with warnings.catch_warnings(record=True):
self._check_ndarray(f, static_comp, window=window,
has_min_periods=has_min_periods,
preserve_nan=preserve_nan,
has_center=has_center, fill_value=fill_value,
test_stable=test_stable, **kwargs)
with warnings.catch_warnings(record=True):
self._check_structures(f, static_comp,
has_min_periods=has_min_periods,
has_time_rule=has_time_rule,
fill_value=fill_value,
has_center=has_center, **kwargs)
# new API
if name is not None:
self._check_structures(f, static_comp, name=name,
has_min_periods=has_min_periods,
has_time_rule=has_time_rule,
fill_value=fill_value,
has_center=has_center, **kwargs)
def _check_ndarray(self, f, static_comp, window=50, has_min_periods=True,
preserve_nan=True, has_center=True, fill_value=None,
test_stable=False, test_window=True, **kwargs):
def get_result(arr, window, min_periods=None, center=False):
return f(arr, window, min_periods=min_periods, center=center, **
kwargs)
result = get_result(self.arr, window)
tm.assert_almost_equal(result[-1], static_comp(self.arr[-50:]))
if preserve_nan:
assert (np.isnan(result[self._nan_locs]).all())
# excluding NaNs correctly
arr = randn(50)
arr[:10] = np.NaN
arr[-10:] = np.NaN
if has_min_periods:
result = get_result(arr, 50, min_periods=30)
tm.assert_almost_equal(result[-1], static_comp(arr[10:-10]))
# min_periods is working correctly
result = get_result(arr, 20, min_periods=15)
self.assertTrue(np.isnan(result[23]))
self.assertFalse(np.isnan(result[24]))
self.assertFalse(np.isnan(result[-6]))
self.assertTrue(np.isnan(result[-5]))
arr2 = randn(20)
result = get_result(arr2, 10, min_periods=5)
self.assertTrue(isnull(result[3]))
self.assertTrue(notnull(result[4]))
# min_periods=0
result0 = get_result(arr, 20, min_periods=0)
result1 = get_result(arr, 20, min_periods=1)
tm.assert_almost_equal(result0, result1)
else:
result = get_result(arr, 50)
tm.assert_almost_equal(result[-1], static_comp(arr[10:-10]))
# GH 7925
if has_center:
if has_min_periods:
result = get_result(arr, 20, min_periods=15, center=True)
expected = get_result(
np.concatenate((arr, np.array([np.NaN] * 9))), 20,
min_periods=15)[9:]
else:
result = get_result(arr, 20, center=True)
expected = get_result(
np.concatenate((arr, np.array([np.NaN] * 9))), 20)[9:]
self.assert_numpy_array_equal(result, expected)
if test_stable:
result = get_result(self.arr + 1e9, window)
tm.assert_almost_equal(result[-1],
static_comp(self.arr[-50:] + 1e9))
# Test window larger than array, #7297
if test_window:
if has_min_periods:
for minp in (0, len(self.arr) - 1, len(self.arr)):
result = get_result(self.arr, len(self.arr) + 1,
min_periods=minp)
expected = get_result(self.arr, len(self.arr),
min_periods=minp)
nan_mask = np.isnan(result)
self.assertTrue(np.array_equal(nan_mask, np.isnan(
expected)))
nan_mask = ~nan_mask
tm.assert_almost_equal(result[nan_mask],
expected[nan_mask])
else:
result = get_result(self.arr, len(self.arr) + 1)
expected = get_result(self.arr, len(self.arr))
nan_mask = np.isnan(result)
self.assertTrue(np.array_equal(nan_mask, np.isnan(expected)))
nan_mask = ~nan_mask
tm.assert_almost_equal(result[nan_mask], expected[nan_mask])
def _check_structures(self, f, static_comp, name=None,
has_min_periods=True, has_time_rule=True,
has_center=True, fill_value=None, **kwargs):
def get_result(obj, window, min_periods=None, freq=None, center=False):
# check via the API calls if name is provided
if name is not None:
# catch a freq deprecation warning if freq is provided and not
# None
w = FutureWarning if freq is not None else None
with tm.assert_produces_warning(w, check_stacklevel=False):
r = obj.rolling(window=window, min_periods=min_periods,
freq=freq, center=center)
return getattr(r, name)(**kwargs)
# check via the moments API
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
return f(obj, window=window, min_periods=min_periods,
freq=freq, center=center, **kwargs)
series_result = get_result(self.series, window=50)
frame_result = get_result(self.frame, window=50)
tm.assertIsInstance(series_result, Series)
self.assertEqual(type(frame_result), DataFrame)
# check time_rule works
if has_time_rule:
win = 25
minp = 10
if has_min_periods:
series_result = get_result(self.series[::2], window=win,
min_periods=minp, freq='B')
frame_result = get_result(self.frame[::2], window=win,
min_periods=minp, freq='B')
else:
series_result = get_result(self.series[::2], window=win,
freq='B')
frame_result = get_result(self.frame[::2], window=win,
freq='B')
last_date = series_result.index[-1]
prev_date = last_date - 24 * offsets.BDay()
trunc_series = self.series[::2].truncate(prev_date, last_date)
trunc_frame = self.frame[::2].truncate(prev_date, last_date)
self.assertAlmostEqual(series_result[-1],
static_comp(trunc_series))
tm.assert_series_equal(frame_result.xs(last_date),
trunc_frame.apply(static_comp),
check_names=False)
# GH 7925
if has_center:
# shifter index
s = ['x%d' % x for x in range(12)]
if has_min_periods:
minp = 10
series_xp = get_result(
self.series.reindex(list(self.series.index) + s),
window=25,
min_periods=minp).shift(-12).reindex(self.series.index)
frame_xp = get_result(
self.frame.reindex(list(self.frame.index) + s),
window=25,
min_periods=minp).shift(-12).reindex(self.frame.index)
series_rs = get_result(self.series, window=25,
min_periods=minp, center=True)
frame_rs = get_result(self.frame, window=25, min_periods=minp,
center=True)
else:
series_xp = get_result(
self.series.reindex(list(self.series.index) + s),
window=25).shift(-12).reindex(self.series.index)
frame_xp = get_result(
self.frame.reindex(list(self.frame.index) + s),
window=25).shift(-12).reindex(self.frame.index)
series_rs = get_result(self.series, window=25, center=True)
frame_rs = get_result(self.frame, window=25, center=True)
if fill_value is not None:
series_xp = series_xp.fillna(fill_value)
frame_xp = frame_xp.fillna(fill_value)
tm.assert_series_equal(series_xp, series_rs)
tm.assert_frame_equal(frame_xp, frame_rs)
def test_ewma(self):
self._check_ew(mom.ewma, name='mean')
arr = np.zeros(1000)
arr[5] = 1
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = mom.ewma(arr, span=100, adjust=False).sum()
self.assertTrue(np.abs(result - 1) < 1e-2)
s = Series([1.0, 2.0, 4.0, 8.0])
expected = Series([1.0, 1.6, 2.736842, 4.923077])
for f in [lambda s: s.ewm(com=2.0, adjust=True).mean(),
lambda s: s.ewm(com=2.0, adjust=True,
ignore_na=False).mean(),
lambda s: s.ewm(com=2.0, adjust=True, ignore_na=True).mean(),
]:
result = f(s)
tm.assert_series_equal(result, expected)
expected = Series([1.0, 1.333333, 2.222222, 4.148148])
for f in [lambda s: s.ewm(com=2.0, adjust=False).mean(),
lambda s: s.ewm(com=2.0, adjust=False,
ignore_na=False).mean(),
lambda s: s.ewm(com=2.0, adjust=False,
ignore_na=True).mean(),
]:
result = f(s)
tm.assert_series_equal(result, expected)
def test_ewma_nan_handling(self):
s = Series([1.] + [np.nan] * 5 + [1.])
result = s.ewm(com=5).mean()
tm.assert_series_equal(result, Series([1.] * len(s)))
s = Series([np.nan] * 2 + [1.] + [np.nan] * 2 + [1.])
result = s.ewm(com=5).mean()
tm.assert_series_equal(result, Series([np.nan] * 2 + [1.] * 4))
# GH 7603
s0 = Series([np.nan, 1., 101.])
s1 = Series([1., np.nan, 101.])
s2 = Series([np.nan, 1., np.nan, np.nan, 101., np.nan])
s3 = Series([1., np.nan, 101., 50.])
com = 2.
alpha = 1. / (1. + com)
def simple_wma(s, w):
return (s.multiply(w).cumsum() / w.cumsum()).fillna(method='ffill')
for (s, adjust, ignore_na, w) in [
(s0, True, False, [np.nan, (1. - alpha), 1.]),
(s0, True, True, [np.nan, (1. - alpha), 1.]),
(s0, False, False, [np.nan, (1. - alpha), alpha]),
(s0, False, True, [np.nan, (1. - alpha), alpha]),
(s1, True, False, [(1. - alpha) ** 2, np.nan, 1.]),
(s1, True, True, [(1. - alpha), np.nan, 1.]),
(s1, False, False, [(1. - alpha) ** 2, np.nan, alpha]),
(s1, False, True, [(1. - alpha), np.nan, alpha]),
(s2, True, False, [np.nan, (1. - alpha) **
3, np.nan, np.nan, 1., np.nan]),
(s2, True, True, [np.nan, (1. - alpha),
np.nan, np.nan, 1., np.nan]),
(s2, False, False, [np.nan, (1. - alpha) **
3, np.nan, np.nan, alpha, np.nan]),
(s2, False, True, [np.nan, (1. - alpha),
np.nan, np.nan, alpha, np.nan]),
(s3, True, False, [(1. - alpha) **
3, np.nan, (1. - alpha), 1.]),
(s3, True, True, [(1. - alpha) **
2, np.nan, (1. - alpha), 1.]),
(s3, False, False, [(1. - alpha) ** 3, np.nan,
(1. - alpha) * alpha,
alpha * ((1. - alpha) ** 2 + alpha)]),
(s3, False, True, [(1. - alpha) ** 2,
np.nan, (1. - alpha) * alpha, alpha])]:
expected = simple_wma(s, Series(w))
result = s.ewm(com=com, adjust=adjust, ignore_na=ignore_na).mean()
tm.assert_series_equal(result, expected)
if ignore_na is False:
# check that ignore_na defaults to False
result = s.ewm(com=com, adjust=adjust).mean()
tm.assert_series_equal(result, expected)
def test_ewmvar(self):
self._check_ew(mom.ewmvar, name='var')
def test_ewmvol(self):
self._check_ew(mom.ewmvol, name='vol')
def test_ewma_span_com_args(self):
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
A = mom.ewma(self.arr, com=9.5)
B = mom.ewma(self.arr, span=20)
tm.assert_almost_equal(A, B)
self.assertRaises(ValueError, mom.ewma, self.arr, com=9.5, span=20)
self.assertRaises(ValueError, mom.ewma, self.arr)
def test_ewma_halflife_arg(self):
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
A = mom.ewma(self.arr, com=13.932726172912965)
B = mom.ewma(self.arr, halflife=10.0)
tm.assert_almost_equal(A, B)
self.assertRaises(ValueError, mom.ewma, self.arr, span=20,
halflife=50)
self.assertRaises(ValueError, mom.ewma, self.arr, com=9.5,
halflife=50)
self.assertRaises(ValueError, mom.ewma, self.arr, com=9.5, span=20,
halflife=50)
self.assertRaises(ValueError, mom.ewma, self.arr)
def test_ewma_alpha_old_api(self):
# GH 10789
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
a = mom.ewma(self.arr, alpha=0.61722699889169674)
b = mom.ewma(self.arr, com=0.62014947789973052)
c = mom.ewma(self.arr, span=2.240298955799461)
d = mom.ewma(self.arr, halflife=0.721792864318)
tm.assert_numpy_array_equal(a, b)
tm.assert_numpy_array_equal(a, c)
tm.assert_numpy_array_equal(a, d)
def test_ewma_alpha_arg_old_api(self):
# GH 10789
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
self.assertRaises(ValueError, mom.ewma, self.arr)
self.assertRaises(ValueError, mom.ewma, self.arr,
com=10.0, alpha=0.5)
self.assertRaises(ValueError, mom.ewma, self.arr,
span=10.0, alpha=0.5)
self.assertRaises(ValueError, mom.ewma, self.arr,
halflife=10.0, alpha=0.5)
def test_ewm_alpha(self):
# GH 10789
s = Series(self.arr)
a = s.ewm(alpha=0.61722699889169674).mean()
b = s.ewm(com=0.62014947789973052).mean()
c = s.ewm(span=2.240298955799461).mean()
d = s.ewm(halflife=0.721792864318).mean()
tm.assert_series_equal(a, b)
tm.assert_series_equal(a, c)
tm.assert_series_equal(a, d)
def test_ewm_alpha_arg(self):
# GH 10789
s = Series(self.arr)
self.assertRaises(ValueError, s.ewm)
self.assertRaises(ValueError, s.ewm, com=10.0, alpha=0.5)
self.assertRaises(ValueError, s.ewm, span=10.0, alpha=0.5)
self.assertRaises(ValueError, s.ewm, halflife=10.0, alpha=0.5)
def test_ewm_domain_checks(self):
# GH 12492
s = Series(self.arr)
# com must satisfy: com >= 0
self.assertRaises(ValueError, s.ewm, com=-0.1)
s.ewm(com=0.0)
s.ewm(com=0.1)
# span must satisfy: span >= 1
self.assertRaises(ValueError, s.ewm, span=-0.1)
self.assertRaises(ValueError, s.ewm, span=0.0)
self.assertRaises(ValueError, s.ewm, span=0.9)
s.ewm(span=1.0)
s.ewm(span=1.1)
# halflife must satisfy: halflife > 0
self.assertRaises(ValueError, s.ewm, halflife=-0.1)
self.assertRaises(ValueError, s.ewm, halflife=0.0)
s.ewm(halflife=0.1)
# alpha must satisfy: 0 < alpha <= 1
self.assertRaises(ValueError, s.ewm, alpha=-0.1)
self.assertRaises(ValueError, s.ewm, alpha=0.0)
s.ewm(alpha=0.1)
s.ewm(alpha=1.0)
self.assertRaises(ValueError, s.ewm, alpha=1.1)
def test_ew_empty_arrays(self):
arr = np.array([], dtype=np.float64)
funcs = [mom.ewma, mom.ewmvol, mom.ewmvar]
for f in funcs:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = f(arr, 3)
tm.assert_almost_equal(result, arr)
def _check_ew(self, func, name=None):
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
self._check_ew_ndarray(func, name=name)
self._check_ew_structures(func, name=name)
def _check_ew_ndarray(self, func, preserve_nan=False, name=None):
result = func(self.arr, com=10)
if preserve_nan:
assert (np.isnan(result[self._nan_locs]).all())
# excluding NaNs correctly
arr = randn(50)
arr[:10] = np.NaN
arr[-10:] = np.NaN
s = Series(arr)
# check min_periods
# GH 7898
result = func(s, 50, min_periods=2)
self.assertTrue(np.isnan(result.values[:11]).all())
self.assertFalse(np.isnan(result.values[11:]).any())
for min_periods in (0, 1):
result = func(s, 50, min_periods=min_periods)
if func == mom.ewma:
self.assertTrue(np.isnan(result.values[:10]).all())
self.assertFalse(np.isnan(result.values[10:]).any())
else:
# ewmstd, ewmvol, ewmvar (with bias=False) require at least two
# values
self.assertTrue(np.isnan(result.values[:11]).all())
self.assertFalse(np.isnan(result.values[11:]).any())
# check series of length 0
result = func(Series([]), 50, min_periods=min_periods)
tm.assert_series_equal(result, Series([]))
# check series of length 1
result = func(Series([1.]), 50, min_periods=min_periods)
if func == mom.ewma:
tm.assert_series_equal(result, Series([1.]))
else:
# ewmstd, ewmvol, ewmvar with bias=False require at least two
# values
tm.assert_series_equal(result, Series([np.NaN]))
# pass in ints
result2 = func(np.arange(50), span=10)
self.assertEqual(result2.dtype, np.float_)
def _check_ew_structures(self, func, name):
series_result = getattr(self.series.ewm(com=10), name)()
tm.assertIsInstance(series_result, Series)
frame_result = getattr(self.frame.ewm(com=10), name)()
self.assertEqual(type(frame_result), DataFrame)
# create the data only once as we are not setting it
def _create_consistency_data():
def create_series():
return [Series(),
Series([np.nan]),
Series([np.nan, np.nan]),
Series([3.]),
Series([np.nan, 3.]),
Series([3., np.nan]),
Series([1., 3.]),
Series([2., 2.]),
Series([3., 1.]),
Series([5., 5., 5., 5., np.nan, np.nan, np.nan, 5., 5., np.nan,
np.nan]),
Series([np.nan, 5., 5., 5., np.nan, np.nan, np.nan, 5., 5.,
np.nan, np.nan]),
Series([np.nan, np.nan, 5., 5., np.nan, np.nan, np.nan, 5., 5.,
np.nan, np.nan]),
Series([np.nan, 3., np.nan, 3., 4., 5., 6., np.nan, np.nan, 7.,
12., 13., 14., 15.]),
Series([np.nan, 5., np.nan, 2., 4., 0., 9., np.nan, np.nan, 3.,
12., 13., 14., 15.]),
Series([2., 3., np.nan, 3., 4., 5., 6., np.nan, np.nan, 7.,
12., 13., 14., 15.]),
Series([2., 5., np.nan, 2., 4., 0., 9., np.nan, np.nan, 3.,
12., 13., 14., 15.]),
Series(range(10)),
Series(range(20, 0, -2)), ]
def create_dataframes():
return ([DataFrame(),
DataFrame(columns=['a']),
DataFrame(columns=['a', 'a']),
DataFrame(columns=['a', 'b']),
DataFrame(np.arange(10).reshape((5, 2))),
DataFrame(np.arange(25).reshape((5, 5))),
DataFrame(np.arange(25).reshape((5, 5)),
columns=['a', 'b', 99, 'd', 'd'])] +
[DataFrame(s) for s in create_series()])
def is_constant(x):
values = x.values.ravel()
return len(set(values[notnull(values)])) == 1
def no_nans(x):
return x.notnull().all().all()
# data is a tuple(object, is_contant, no_nans)
data = create_series() + create_dataframes()
return [(x, is_constant(x), no_nans(x)) for x in data]
_consistency_data = _create_consistency_data()
class TestMomentsConsistency(Base):
base_functions = [
(lambda v: Series(v).count(), None, 'count'),
(lambda v: Series(v).max(), None, 'max'),
(lambda v: Series(v).min(), None, 'min'),
(lambda v: Series(v).sum(), None, 'sum'),
(lambda v: Series(v).mean(), None, 'mean'),
(lambda v: Series(v).std(), 1, 'std'),
(lambda v: Series(v).cov(Series(v)), None, 'cov'),
(lambda v: Series(v).corr(Series(v)), None, 'corr'),
(lambda v: Series(v).var(), 1, 'var'),
# restore once GH 8086 is fixed
# lambda v: Series(v).skew(), 3, 'skew'),
# (lambda v: Series(v).kurt(), 4, 'kurt'),
# (lambda x, min_periods: mom.expanding_quantile(x, 0.3,
# min_periods=min_periods, 'quantile'),
# restore once GH 8084 is fixed
# lambda v: Series(v).quantile(0.3), None, 'quantile'),
(lambda v: Series(v).median(), None, 'median'),
(np.nanmax, 1, 'max'),
(np.nanmin, 1, 'min'),
(np.nansum, 1, 'sum'),
]
if np.__version__ >= LooseVersion('1.8.0'):
base_functions += [
(np.nanmean, 1, 'mean'),
(lambda v: np.nanstd(v, ddof=1), 1, 'std'),
(lambda v: np.nanvar(v, ddof=1), 1, 'var'),
]
if np.__version__ >= LooseVersion('1.9.0'):
base_functions += [(np.nanmedian, 1, 'median'), ]
no_nan_functions = [
(np.max, None, 'max'),
(np.min, None, 'min'),
(np.sum, None, 'sum'),
(np.mean, None, 'mean'),
(lambda v: np.std(v, ddof=1), 1, 'std'),
(lambda v: np.var(v, ddof=1), 1, 'var'),
(np.median, None, 'median'),
]
def _create_data(self):
super(TestMomentsConsistency, self)._create_data()
self.data = _consistency_data
def setUp(self):
self._create_data()
def _test_moments_consistency(self, min_periods, count, mean, mock_mean,
corr, var_unbiased=None, std_unbiased=None,
cov_unbiased=None, var_biased=None,
std_biased=None, cov_biased=None,
var_debiasing_factors=None):
def _non_null_values(x):
values = x.values.ravel()
return set(values[notnull(values)].tolist())
for (x, is_constant, no_nans) in self.data:
count_x = count(x)
mean_x = mean(x)
if mock_mean:
# check that mean equals mock_mean
expected = mock_mean(x)
assert_equal(mean_x, expected.astype('float64'))
# check that correlation of a series with itself is either 1 or NaN
corr_x_x = corr(x, x)
# self.assertTrue(_non_null_values(corr_x_x).issubset(set([1.]))) #
# restore once rolling_cov(x, x) is identically equal to var(x)
if is_constant:
exp = x.max() if isinstance(x, Series) else x.max().max()
# check mean of constant series
expected = x * np.nan
expected[count_x >= max(min_periods, 1)] = exp
assert_equal(mean_x, expected)
# check correlation of constant series with itself is NaN
expected[:] = np.nan
assert_equal(corr_x_x, expected)
if var_unbiased and var_biased and var_debiasing_factors:
# check variance debiasing factors
var_unbiased_x = var_unbiased(x)
var_biased_x = var_biased(x)
var_debiasing_factors_x = var_debiasing_factors(x)
assert_equal(var_unbiased_x, var_biased_x *
var_debiasing_factors_x)
for (std, var, cov) in [(std_biased, var_biased, cov_biased),
(std_unbiased, var_unbiased, cov_unbiased)
]:
# check that var(x), std(x), and cov(x) are all >= 0
var_x = var(x)
std_x = std(x)
self.assertFalse((var_x < 0).any().any())
self.assertFalse((std_x < 0).any().any())
if cov:
cov_x_x = cov(x, x)
self.assertFalse((cov_x_x < 0).any().any())
# check that var(x) == cov(x, x)
assert_equal(var_x, cov_x_x)
# check that var(x) == std(x)^2
assert_equal(var_x, std_x * std_x)
if var is var_biased:
# check that biased var(x) == mean(x^2) - mean(x)^2
mean_x2 = mean(x * x)
assert_equal(var_x, mean_x2 - (mean_x * mean_x))
if is_constant:
# check that variance of constant series is identically 0
self.assertFalse((var_x > 0).any().any())
expected = x * np.nan
expected[count_x >= max(min_periods, 1)] = 0.
if var is var_unbiased:
expected[count_x < 2] = np.nan
assert_equal(var_x, expected)
if isinstance(x, Series):
for (y, is_constant, no_nans) in self.data:
if not x.isnull().equals(y.isnull()):
# can only easily test two Series with similar
# structure
continue
# check that cor(x, y) is symmetric
corr_x_y = corr(x, y)
corr_y_x = corr(y, x)
assert_equal(corr_x_y, corr_y_x)
if cov:
# check that cov(x, y) is symmetric
cov_x_y = cov(x, y)
cov_y_x = cov(y, x)
assert_equal(cov_x_y, cov_y_x)
# check that cov(x, y) == (var(x+y) - var(x) -
# var(y)) / 2
var_x_plus_y = var(x + y)
var_y = var(y)
assert_equal(cov_x_y, 0.5 *
(var_x_plus_y - var_x - var_y))
# check that corr(x, y) == cov(x, y) / (std(x) *
# std(y))
std_y = std(y)
assert_equal(corr_x_y, cov_x_y / (std_x * std_y))
if cov is cov_biased:
# check that biased cov(x, y) == mean(x*y) -
# mean(x)*mean(y)
mean_y = mean(y)
mean_x_times_y = mean(x * y)
assert_equal(cov_x_y, mean_x_times_y -
(mean_x * mean_y))
@tm.slow
def test_ewm_consistency(self):
def _weights(s, com, adjust, ignore_na):
if isinstance(s, DataFrame):
if not len(s.columns):
return DataFrame(index=s.index, columns=s.columns)
w = concat([
_weights(s.iloc[:, i], com=com, adjust=adjust,
ignore_na=ignore_na)
for i, _ in enumerate(s.columns)], axis=1)
w.index = s.index
w.columns = s.columns
return w
w = Series(np.nan, index=s.index)
alpha = 1. / (1. + com)
if ignore_na:
w[s.notnull()] = _weights(s[s.notnull()], com=com,
adjust=adjust, ignore_na=False)
elif adjust:
for i in range(len(s)):
if s.iat[i] == s.iat[i]:
w.iat[i] = pow(1. / (1. - alpha), i)
else:
sum_wts = 0.
prev_i = -1
for i in range(len(s)):
if s.iat[i] == s.iat[i]:
if prev_i == -1:
w.iat[i] = 1.
else:
w.iat[i] = alpha * sum_wts / pow(1. - alpha,
i - prev_i)
sum_wts += w.iat[i]
prev_i = i
return w
def _variance_debiasing_factors(s, com, adjust, ignore_na):
weights = _weights(s, com=com, adjust=adjust, ignore_na=ignore_na)
cum_sum = weights.cumsum().fillna(method='ffill')
cum_sum_sq = (weights * weights).cumsum().fillna(method='ffill')
numerator = cum_sum * cum_sum
denominator = numerator - cum_sum_sq
denominator[denominator <= 0.] = np.nan
return numerator / denominator
def _ewma(s, com, min_periods, adjust, ignore_na):
weights = _weights(s, com=com, adjust=adjust, ignore_na=ignore_na)
result = s.multiply(weights).cumsum().divide(weights.cumsum(
)).fillna(method='ffill')
result[s.expanding().count() < (max(min_periods, 1) if min_periods
else 1)] = np.nan
return result
com = 3.
for min_periods, adjust, ignore_na in product([0, 1, 2, 3, 4],
[True, False],
[False, True]):
# test consistency between different ewm* moments
self._test_moments_consistency(
min_periods=min_periods,
count=lambda x: x.expanding().count(),
mean=lambda x: x.ewm(com=com, min_periods=min_periods,
adjust=adjust,
ignore_na=ignore_na).mean(),
mock_mean=lambda x: _ewma(x, com=com,
min_periods=min_periods,
adjust=adjust,
ignore_na=ignore_na),
corr=lambda x, y: x.ewm(com=com, min_periods=min_periods,
adjust=adjust,
ignore_na=ignore_na).corr(y),
var_unbiased=lambda x: (
x.ewm(com=com, min_periods=min_periods,
adjust=adjust,
ignore_na=ignore_na).var(bias=False)),
std_unbiased=lambda x: (
x.ewm(com=com, min_periods=min_periods,
adjust=adjust, ignore_na=ignore_na)
.std(bias=False)),
cov_unbiased=lambda x, y: (
x.ewm(com=com, min_periods=min_periods,
adjust=adjust, ignore_na=ignore_na)
.cov(y, bias=False)),
var_biased=lambda x: (
x.ewm(com=com, min_periods=min_periods,
adjust=adjust, ignore_na=ignore_na)
.var(bias=True)),
std_biased=lambda x: x.ewm(com=com, min_periods=min_periods,
adjust=adjust,
ignore_na=ignore_na).std(bias=True),
cov_biased=lambda x, y: (
x.ewm(com=com, min_periods=min_periods,
adjust=adjust, ignore_na=ignore_na)
.cov(y, bias=True)),
var_debiasing_factors=lambda x: (
_variance_debiasing_factors(x, com=com, adjust=adjust,
ignore_na=ignore_na)))
@tm.slow
def test_expanding_consistency(self):
# suppress warnings about empty slices, as we are deliberately testing
# with empty/0-length Series/DataFrames
with warnings.catch_warnings():
warnings.filterwarnings("ignore",
message=".*(empty slice|0 for slice).*",
category=RuntimeWarning)
for min_periods in [0, 1, 2, 3, 4]:
# test consistency between different expanding_* moments
self._test_moments_consistency(
min_periods=min_periods,
count=lambda x: x.expanding().count(),
mean=lambda x: x.expanding(
min_periods=min_periods).mean(),
mock_mean=lambda x: x.expanding(
min_periods=min_periods).sum() / x.expanding().count(),
corr=lambda x, y: x.expanding(
min_periods=min_periods).corr(y),
var_unbiased=lambda x: x.expanding(
min_periods=min_periods).var(),
std_unbiased=lambda x: x.expanding(
min_periods=min_periods).std(),
cov_unbiased=lambda x, y: x.expanding(
min_periods=min_periods).cov(y),
var_biased=lambda x: x.expanding(
min_periods=min_periods).var(ddof=0),
std_biased=lambda x: x.expanding(
min_periods=min_periods).std(ddof=0),
cov_biased=lambda x, y: x.expanding(
min_periods=min_periods).cov(y, ddof=0),
var_debiasing_factors=lambda x: (
x.expanding().count() /
(x.expanding().count() - 1.)
.replace(0., np.nan)))
# test consistency between expanding_xyz() and either (a)
# expanding_apply of Series.xyz(), or (b) expanding_apply of
# np.nanxyz()
for (x, is_constant, no_nans) in self.data:
functions = self.base_functions
# GH 8269
if no_nans:
functions = self.base_functions + self.no_nan_functions
for (f, require_min_periods, name) in functions:
expanding_f = getattr(
x.expanding(min_periods=min_periods), name)
if (require_min_periods and
(min_periods is not None) and
(min_periods < require_min_periods)):
continue
if name == 'count':
expanding_f_result = expanding_f()
expanding_apply_f_result = x.expanding(
min_periods=0).apply(func=f)
else:
if name in ['cov', 'corr']:
expanding_f_result = expanding_f(
pairwise=False)
else:
expanding_f_result = expanding_f()
expanding_apply_f_result = x.expanding(
min_periods=min_periods).apply(func=f)
if not tm._incompat_bottleneck_version(name):
assert_equal(expanding_f_result,
expanding_apply_f_result)
if (name in ['cov', 'corr']) and isinstance(x,
DataFrame):
# test pairwise=True
expanding_f_result = expanding_f(x, pairwise=True)
expected = Panel(items=x.index,
major_axis=x.columns,
minor_axis=x.columns)
for i, _ in enumerate(x.columns):
for j, _ in enumerate(x.columns):
expected.iloc[:, i, j] = getattr(
x.iloc[:, i].expanding(
min_periods=min_periods),
name)(x.iloc[:, j])
tm.assert_panel_equal(expanding_f_result, expected)
@tm.slow
def test_rolling_consistency(self):
# suppress warnings about empty slices, as we are deliberately testing
# with empty/0-length Series/DataFrames
with warnings.catch_warnings():
warnings.filterwarnings("ignore",
message=".*(empty slice|0 for slice).*",
category=RuntimeWarning)
def cases():
for window in [1, 2, 3, 10, 20]:
for min_periods in set([0, 1, 2, 3, 4, window]):
if min_periods and (min_periods > window):
continue
for center in [False, True]:
yield window, min_periods, center
for window, min_periods, center in cases():
# test consistency between different rolling_* moments
self._test_moments_consistency(
min_periods=min_periods,
count=lambda x: (
x.rolling(window=window, center=center)
.count()),
mean=lambda x: (
x.rolling(window=window, min_periods=min_periods,
center=center).mean()),
mock_mean=lambda x: (
x.rolling(window=window,
min_periods=min_periods,
center=center).sum()
.divide(x.rolling(window=window,
min_periods=min_periods,
center=center).count())),
corr=lambda x, y: (
x.rolling(window=window, min_periods=min_periods,
center=center).corr(y)),
var_unbiased=lambda x: (
x.rolling(window=window, min_periods=min_periods,
center=center).var()),
std_unbiased=lambda x: (
x.rolling(window=window, min_periods=min_periods,
center=center).std()),
cov_unbiased=lambda x, y: (
x.rolling(window=window, min_periods=min_periods,
center=center).cov(y)),
var_biased=lambda x: (
x.rolling(window=window, min_periods=min_periods,
center=center).var(ddof=0)),
std_biased=lambda x: (
x.rolling(window=window, min_periods=min_periods,
center=center).std(ddof=0)),
cov_biased=lambda x, y: (
x.rolling(window=window, min_periods=min_periods,
center=center).cov(y, ddof=0)),
var_debiasing_factors=lambda x: (
x.rolling(window=window, center=center).count()
.divide((x.rolling(window=window, center=center)
.count() - 1.)
.replace(0., np.nan))))
# test consistency between rolling_xyz() and either (a)
# rolling_apply of Series.xyz(), or (b) rolling_apply of
# np.nanxyz()
for (x, is_constant, no_nans) in self.data:
functions = self.base_functions
# GH 8269
if no_nans:
functions = self.base_functions + self.no_nan_functions
for (f, require_min_periods, name) in functions:
rolling_f = getattr(
x.rolling(window=window, center=center,
min_periods=min_periods), name)
if require_min_periods and (
min_periods is not None) and (
min_periods < require_min_periods):
continue
if name == 'count':
rolling_f_result = rolling_f()
rolling_apply_f_result = x.rolling(
window=window, min_periods=0,
center=center).apply(func=f)
else:
if name in ['cov', 'corr']:
rolling_f_result = rolling_f(
pairwise=False)
else:
rolling_f_result = rolling_f()
rolling_apply_f_result = x.rolling(
window=window, min_periods=min_periods,
center=center).apply(func=f)
if not tm._incompat_bottleneck_version(name):
assert_equal(rolling_f_result,
rolling_apply_f_result)
if (name in ['cov', 'corr']) and isinstance(
x, DataFrame):
# test pairwise=True
rolling_f_result = rolling_f(x,
pairwise=True)
expected = Panel(items=x.index,
major_axis=x.columns,
minor_axis=x.columns)
for i, _ in enumerate(x.columns):
for j, _ in enumerate(x.columns):
expected.iloc[:, i, j] = (
getattr(
x.iloc[:, i]
.rolling(window=window,
min_periods=min_periods,
center=center),
name)(x.iloc[:, j]))
tm.assert_panel_equal(rolling_f_result, expected)
# binary moments
def test_rolling_cov(self):
A = self.series
B = A + randn(len(A))
result = A.rolling(window=50, min_periods=25).cov(B)
tm.assert_almost_equal(result[-1], np.cov(A[-50:], B[-50:])[0, 1])
def test_rolling_cov_pairwise(self):
self._check_pairwise_moment('rolling', 'cov', window=10, min_periods=5)
def test_rolling_corr(self):
A = self.series
B = A + randn(len(A))
result = A.rolling(window=50, min_periods=25).corr(B)
tm.assert_almost_equal(result[-1], np.corrcoef(A[-50:], B[-50:])[0, 1])
# test for correct bias correction
a = tm.makeTimeSeries()
b = tm.makeTimeSeries()
a[:5] = np.nan
b[:10] = np.nan
result = a.rolling(window=len(a), min_periods=1).corr(b)
tm.assert_almost_equal(result[-1], a.corr(b))
def test_rolling_corr_pairwise(self):
self._check_pairwise_moment('rolling', 'corr', window=10,
min_periods=5)
def _check_pairwise_moment(self, dispatch, name, **kwargs):
def get_result(obj, obj2=None):
return getattr(getattr(obj, dispatch)(**kwargs), name)(obj2)
panel = get_result(self.frame)
actual = panel.ix[:, 1, 5]
expected = get_result(self.frame[1], self.frame[5])
tm.assert_series_equal(actual, expected, check_names=False)
self.assertEqual(actual.name, 5)
def test_flex_binary_moment(self):
# GH3155
# don't blow the stack
self.assertRaises(TypeError, rwindow._flex_binary_moment, 5, 6, None)
def test_corr_sanity(self):
# GH 3155
df = DataFrame(np.array(
[[0.87024726, 0.18505595], [0.64355431, 0.3091617],
[0.92372966, 0.50552513], [0.00203756, 0.04520709],
[0.84780328, 0.33394331], [0.78369152, 0.63919667]]))
res = df[0].rolling(5, center=True).corr(df[1])
self.assertTrue(all([np.abs(np.nan_to_num(x)) <= 1 for x in res]))
# and some fuzzing
for i in range(10):
df = DataFrame(np.random.rand(30, 2))
res = df[0].rolling(5, center=True).corr(df[1])
try:
self.assertTrue(all([np.abs(np.nan_to_num(x)) <= 1 for x in res
]))
except:
print(res)
def test_flex_binary_frame(self):
def _check(method):
series = self.frame[1]
res = getattr(series.rolling(window=10), method)(self.frame)
res2 = getattr(self.frame.rolling(window=10), method)(series)
exp = self.frame.apply(lambda x: getattr(
series.rolling(window=10), method)(x))
tm.assert_frame_equal(res, exp)
tm.assert_frame_equal(res2, exp)
frame2 = self.frame.copy()
frame2.values[:] = np.random.randn(*frame2.shape)
res3 = getattr(self.frame.rolling(window=10), method)(frame2)
exp = DataFrame(dict((k, getattr(self.frame[k].rolling(
window=10), method)(frame2[k])) for k in self.frame))
tm.assert_frame_equal(res3, exp)
methods = ['corr', 'cov']
for meth in methods:
_check(meth)
def test_ewmcov(self):
self._check_binary_ew('cov')
def test_ewmcov_pairwise(self):
self._check_pairwise_moment('ewm', 'cov', span=10, min_periods=5)
def test_ewmcorr(self):
self._check_binary_ew('corr')
def test_ewmcorr_pairwise(self):
self._check_pairwise_moment('ewm', 'corr', span=10, min_periods=5)
def _check_binary_ew(self, name):
def func(A, B, com, **kwargs):
return getattr(A.ewm(com, **kwargs), name)(B)
A = Series(randn(50), index=np.arange(50))
B = A[2:] + randn(48)
A[:10] = np.NaN
B[-10:] = np.NaN
result = func(A, B, 20, min_periods=5)
self.assertTrue(np.isnan(result.values[:14]).all())
self.assertFalse(np.isnan(result.values[14:]).any())
# GH 7898
for min_periods in (0, 1, 2):
result = func(A, B, 20, min_periods=min_periods)
# binary functions (ewmcov, ewmcorr) with bias=False require at
# least two values
self.assertTrue(np.isnan(result.values[:11]).all())
self.assertFalse(np.isnan(result.values[11:]).any())
# check series of length 0
result = func(Series([]), Series([]), 50, min_periods=min_periods)
tm.assert_series_equal(result, Series([]))
# check series of length 1
result = func(
Series([1.]), Series([1.]), 50, min_periods=min_periods)
tm.assert_series_equal(result, Series([np.NaN]))
self.assertRaises(Exception, func, A, randn(50), 20, min_periods=5)
def test_expanding_apply(self):
ser = Series([])
tm.assert_series_equal(ser, ser.expanding().apply(lambda x: x.mean()))
def expanding_mean(x, min_periods=1, freq=None):
return mom.expanding_apply(x, lambda x: x.mean(),
min_periods=min_periods, freq=freq)
self._check_expanding(expanding_mean, np.mean)
# GH 8080
s = Series([None, None, None])
result = s.expanding(min_periods=0).apply(lambda x: len(x))
expected = Series([1., 2., 3.])
tm.assert_series_equal(result, expected)
def test_expanding_apply_args_kwargs(self):
def mean_w_arg(x, const):
return np.mean(x) + const
df = DataFrame(np.random.rand(20, 3))
expected = df.expanding().apply(np.mean) + 20.
tm.assert_frame_equal(df.expanding().apply(mean_w_arg, args=(20, )),
expected)
tm.assert_frame_equal(df.expanding().apply(mean_w_arg,
kwargs={'const': 20}),
expected)
def test_expanding_corr(self):
A = self.series.dropna()
B = (A + randn(len(A)))[:-5]
result = A.expanding().corr(B)
rolling_result = A.rolling(window=len(A), min_periods=1).corr(B)
tm.assert_almost_equal(rolling_result, result)
def test_expanding_count(self):
result = self.series.expanding().count()
tm.assert_almost_equal(result, self.series.rolling(
window=len(self.series)).count())
def test_expanding_quantile(self):
result = self.series.expanding().quantile(0.5)
rolling_result = self.series.rolling(window=len(self.series),
min_periods=1).quantile(0.5)
tm.assert_almost_equal(result, rolling_result)
def test_expanding_cov(self):
A = self.series
B = (A + randn(len(A)))[:-5]
result = A.expanding().cov(B)
rolling_result = A.rolling(window=len(A), min_periods=1).cov(B)
tm.assert_almost_equal(rolling_result, result)
def test_expanding_max(self):
self._check_expanding(mom.expanding_max, np.max, preserve_nan=False)
def test_expanding_cov_pairwise(self):
result = self.frame.expanding().corr()
rolling_result = self.frame.rolling(window=len(self.frame),
min_periods=1).corr()
for i in result.items:
tm.assert_almost_equal(result[i], rolling_result[i])
def test_expanding_corr_pairwise(self):
result = self.frame.expanding().corr()
rolling_result = self.frame.rolling(window=len(self.frame),
min_periods=1).corr()
for i in result.items:
tm.assert_almost_equal(result[i], rolling_result[i])
def test_expanding_cov_diff_index(self):
# GH 7512
s1 = Series([1, 2, 3], index=[0, 1, 2])
s2 = Series([1, 3], index=[0, 2])
result = s1.expanding().cov(s2)
expected = Series([None, None, 2.0])
tm.assert_series_equal(result, expected)
s2a = Series([1, None, 3], index=[0, 1, 2])
result = s1.expanding().cov(s2a)
tm.assert_series_equal(result, expected)
s1 = Series([7, 8, 10], index=[0, 1, 3])
s2 = Series([7, 9, 10], index=[0, 2, 3])
result = s1.expanding().cov(s2)
expected = Series([None, None, None, 4.5])
tm.assert_series_equal(result, expected)
def test_expanding_corr_diff_index(self):
# GH 7512
s1 = Series([1, 2, 3], index=[0, 1, 2])
s2 = Series([1, 3], index=[0, 2])
result = s1.expanding().corr(s2)
expected = Series([None, None, 1.0])
tm.assert_series_equal(result, expected)
s2a = Series([1, None, 3], index=[0, 1, 2])
result = s1.expanding().corr(s2a)
tm.assert_series_equal(result, expected)
s1 = Series([7, 8, 10], index=[0, 1, 3])
s2 = Series([7, 9, 10], index=[0, 2, 3])
result = s1.expanding().corr(s2)
expected = Series([None, None, None, 1.])
tm.assert_series_equal(result, expected)
def test_rolling_cov_diff_length(self):
# GH 7512
s1 = Series([1, 2, 3], index=[0, 1, 2])
s2 = Series([1, 3], index=[0, 2])
result = s1.rolling(window=3, min_periods=2).cov(s2)
expected = Series([None, None, 2.0])
tm.assert_series_equal(result, expected)
s2a = Series([1, None, 3], index=[0, 1, 2])
result = s1.rolling(window=3, min_periods=2).cov(s2a)
tm.assert_series_equal(result, expected)
def test_rolling_corr_diff_length(self):
# GH 7512
s1 = Series([1, 2, 3], index=[0, 1, 2])
s2 = Series([1, 3], index=[0, 2])
result = s1.rolling(window=3, min_periods=2).corr(s2)
expected = Series([None, None, 1.0])
tm.assert_series_equal(result, expected)
s2a = Series([1, None, 3], index=[0, 1, 2])
result = s1.rolling(window=3, min_periods=2).corr(s2a)
tm.assert_series_equal(result, expected)
def test_rolling_functions_window_non_shrinkage(self):
# GH 7764
s = Series(range(4))
s_expected = Series(np.nan, index=s.index)
df = DataFrame([[1, 5], [3, 2], [3, 9], [-1, 0]], columns=['A', 'B'])
df_expected = DataFrame(np.nan, index=df.index, columns=df.columns)
df_expected_panel = Panel(items=df.index, major_axis=df.columns,
minor_axis=df.columns)
functions = [lambda x: (x.rolling(window=10, min_periods=5)
.cov(x, pairwise=False)),
lambda x: (x.rolling(window=10, min_periods=5)
.corr(x, pairwise=False)),
lambda x: x.rolling(window=10, min_periods=5).max(),
lambda x: x.rolling(window=10, min_periods=5).min(),
lambda x: x.rolling(window=10, min_periods=5).sum(),
lambda x: x.rolling(window=10, min_periods=5).mean(),
lambda x: x.rolling(window=10, min_periods=5).std(),
lambda x: x.rolling(window=10, min_periods=5).var(),
lambda x: x.rolling(window=10, min_periods=5).skew(),
lambda x: x.rolling(window=10, min_periods=5).kurt(),
lambda x: x.rolling(
window=10, min_periods=5).quantile(quantile=0.5),
lambda x: x.rolling(window=10, min_periods=5).median(),
lambda x: x.rolling(window=10, min_periods=5).apply(sum),
lambda x: x.rolling(win_type='boxcar',
window=10, min_periods=5).mean()]
for f in functions:
try:
s_result = f(s)
tm.assert_series_equal(s_result, s_expected)
df_result = f(df)
tm.assert_frame_equal(df_result, df_expected)
except (ImportError):
# scipy needed for rolling_window
continue
functions = [lambda x: (x.rolling(window=10, min_periods=5)
.cov(x, pairwise=True)),
lambda x: (x.rolling(window=10, min_periods=5)
.corr(x, pairwise=True))]
for f in functions:
df_result_panel = f(df)
tm.assert_panel_equal(df_result_panel, df_expected_panel)
def test_moment_functions_zero_length(self):
# GH 8056
s = Series()
s_expected = s
df1 = DataFrame()
df1_expected = df1
df1_expected_panel = Panel(items=df1.index, major_axis=df1.columns,
minor_axis=df1.columns)
df2 = DataFrame(columns=['a'])
df2['a'] = df2['a'].astype('float64')
df2_expected = df2
df2_expected_panel = Panel(items=df2.index, major_axis=df2.columns,
minor_axis=df2.columns)
functions = [lambda x: x.expanding().count(),
lambda x: x.expanding(min_periods=5).cov(
x, pairwise=False),
lambda x: x.expanding(min_periods=5).corr(
x, pairwise=False),
lambda x: x.expanding(min_periods=5).max(),
lambda x: x.expanding(min_periods=5).min(),
lambda x: x.expanding(min_periods=5).sum(),
lambda x: x.expanding(min_periods=5).mean(),
lambda x: x.expanding(min_periods=5).std(),
lambda x: x.expanding(min_periods=5).var(),
lambda x: x.expanding(min_periods=5).skew(),
lambda x: x.expanding(min_periods=5).kurt(),
lambda x: x.expanding(min_periods=5).quantile(0.5),
lambda x: x.expanding(min_periods=5).median(),
lambda x: x.expanding(min_periods=5).apply(sum),
lambda x: x.rolling(window=10).count(),
lambda x: x.rolling(window=10, min_periods=5).cov(
x, pairwise=False),
lambda x: x.rolling(window=10, min_periods=5).corr(
x, pairwise=False),
lambda x: x.rolling(window=10, min_periods=5).max(),
lambda x: x.rolling(window=10, min_periods=5).min(),
lambda x: x.rolling(window=10, min_periods=5).sum(),
lambda x: x.rolling(window=10, min_periods=5).mean(),
lambda x: x.rolling(window=10, min_periods=5).std(),
lambda x: x.rolling(window=10, min_periods=5).var(),
lambda x: x.rolling(window=10, min_periods=5).skew(),
lambda x: x.rolling(window=10, min_periods=5).kurt(),
lambda x: x.rolling(
window=10, min_periods=5).quantile(0.5),
lambda x: x.rolling(window=10, min_periods=5).median(),
lambda x: x.rolling(window=10, min_periods=5).apply(sum),
lambda x: x.rolling(win_type='boxcar',
window=10, min_periods=5).mean(),
]
for f in functions:
try:
s_result = f(s)
tm.assert_series_equal(s_result, s_expected)
df1_result = f(df1)
tm.assert_frame_equal(df1_result, df1_expected)
df2_result = f(df2)
tm.assert_frame_equal(df2_result, df2_expected)
except (ImportError):
# scipy needed for rolling_window
continue
functions = [lambda x: (x.expanding(min_periods=5)
.cov(x, pairwise=True)),
lambda x: (x.expanding(min_periods=5)
.corr(x, pairwise=True)),
lambda x: (x.rolling(window=10, min_periods=5)
.cov(x, pairwise=True)),
lambda x: (x.rolling(window=10, min_periods=5)
.corr(x, pairwise=True)),
]
for f in functions:
df1_result_panel = f(df1)
tm.assert_panel_equal(df1_result_panel, df1_expected_panel)
df2_result_panel = f(df2)
tm.assert_panel_equal(df2_result_panel, df2_expected_panel)
def test_expanding_cov_pairwise_diff_length(self):
# GH 7512
df1 = DataFrame([[1, 5], [3, 2], [3, 9]], columns=['A', 'B'])
df1a = DataFrame([[1, 5], [3, 9]], index=[0, 2], columns=['A', 'B'])
df2 = DataFrame([[5, 6], [None, None], [2, 1]], columns=['X', 'Y'])
df2a = DataFrame([[5, 6], [2, 1]], index=[0, 2], columns=['X', 'Y'])
result1 = df1.expanding().cov(df2a, pairwise=True)[2]
result2 = df1.expanding().cov(df2a, pairwise=True)[2]
result3 = df1a.expanding().cov(df2, pairwise=True)[2]
result4 = df1a.expanding().cov(df2a, pairwise=True)[2]
expected = DataFrame([[-3., -5.], [-6., -10.]], index=['A', 'B'],
columns=['X', 'Y'])
tm.assert_frame_equal(result1, expected)
tm.assert_frame_equal(result2, expected)
tm.assert_frame_equal(result3, expected)
tm.assert_frame_equal(result4, expected)
def test_expanding_corr_pairwise_diff_length(self):
# GH 7512
df1 = DataFrame([[1, 2], [3, 2], [3, 4]], columns=['A', 'B'])
df1a = DataFrame([[1, 2], [3, 4]], index=[0, 2], columns=['A', 'B'])
df2 = DataFrame([[5, 6], [None, None], [2, 1]], columns=['X', 'Y'])
df2a = DataFrame([[5, 6], [2, 1]], index=[0, 2], columns=['X', 'Y'])
result1 = df1.expanding().corr(df2, pairwise=True)[2]
result2 = df1.expanding().corr(df2a, pairwise=True)[2]
result3 = df1a.expanding().corr(df2, pairwise=True)[2]
result4 = df1a.expanding().corr(df2a, pairwise=True)[2]
expected = DataFrame([[-1.0, -1.0], [-1.0, -1.0]], index=['A', 'B'],
columns=['X', 'Y'])
tm.assert_frame_equal(result1, expected)
tm.assert_frame_equal(result2, expected)
tm.assert_frame_equal(result3, expected)
tm.assert_frame_equal(result4, expected)
def test_pairwise_stats_column_names_order(self):
# GH 7738
df1s = [DataFrame([[2, 4], [1, 2], [5, 2], [8, 1]], columns=[0, 1]),
DataFrame([[2, 4], [1, 2], [5, 2], [8, 1]], columns=[1, 0]),
DataFrame([[2, 4], [1, 2], [5, 2], [8, 1]], columns=[1, 1]),
DataFrame([[2, 4], [1, 2], [5, 2], [8, 1]],
columns=['C', 'C']),
DataFrame([[2, 4], [1, 2], [5, 2], [8, 1]], columns=[1., 0]),
DataFrame([[2, 4], [1, 2], [5, 2], [8, 1]], columns=[0., 1]),
DataFrame([[2, 4], [1, 2], [5, 2], [8, 1]], columns=['C', 1]),
DataFrame([[2., 4.], [1., 2.], [5., 2.], [8., 1.]],
columns=[1, 0.]),
DataFrame([[2, 4.], [1, 2.], [5, 2.], [8, 1.]],
columns=[0, 1.]),
DataFrame([[2, 4], [1, 2], [5, 2], [8, 1.]],
columns=[1., 'X']), ]
df2 = DataFrame([[None, 1, 1], [None, 1, 2],
[None, 3, 2], [None, 8, 1]], columns=['Y', 'Z', 'X'])
s = Series([1, 1, 3, 8])
# suppress warnings about incomparable objects, as we are deliberately
# testing with such column labels
with warnings.catch_warnings():
warnings.filterwarnings("ignore",
message=".*incomparable objects.*",
category=RuntimeWarning)
# DataFrame methods (which do not call _flex_binary_moment())
for f in [lambda x: x.cov(), lambda x: x.corr(), ]:
results = [f(df) for df in df1s]
for (df, result) in zip(df1s, results):
tm.assert_index_equal(result.index, df.columns)
tm.assert_index_equal(result.columns, df.columns)
for i, result in enumerate(results):
if i > 0:
# compare internal values, as columns can be different
self.assert_numpy_array_equal(result.values,
results[0].values)
# DataFrame with itself, pairwise=True
for f in [lambda x: x.expanding().cov(pairwise=True),
lambda x: x.expanding().corr(pairwise=True),
lambda x: x.rolling(window=3).cov(pairwise=True),
lambda x: x.rolling(window=3).corr(pairwise=True),
lambda x: x.ewm(com=3).cov(pairwise=True),
lambda x: x.ewm(com=3).corr(pairwise=True), ]:
results = [f(df) for df in df1s]
for (df, result) in zip(df1s, results):
tm.assert_index_equal(result.items, df.index)
tm.assert_index_equal(result.major_axis, df.columns)
tm.assert_index_equal(result.minor_axis, df.columns)
for i, result in enumerate(results):
if i > 0:
self.assert_numpy_array_equal(result.values,
results[0].values)
# DataFrame with itself, pairwise=False
for f in [lambda x: x.expanding().cov(pairwise=False),
lambda x: x.expanding().corr(pairwise=False),
lambda x: x.rolling(window=3).cov(pairwise=False),
lambda x: x.rolling(window=3).corr(pairwise=False),
lambda x: x.ewm(com=3).cov(pairwise=False),
lambda x: x.ewm(com=3).corr(pairwise=False), ]:
results = [f(df) for df in df1s]
for (df, result) in zip(df1s, results):
tm.assert_index_equal(result.index, df.index)
tm.assert_index_equal(result.columns, df.columns)
for i, result in enumerate(results):
if i > 0:
self.assert_numpy_array_equal(result.values,
results[0].values)
# DataFrame with another DataFrame, pairwise=True
for f in [lambda x, y: x.expanding().cov(y, pairwise=True),
lambda x, y: x.expanding().corr(y, pairwise=True),
lambda x, y: x.rolling(window=3).cov(y, pairwise=True),
lambda x, y: x.rolling(window=3).corr(y, pairwise=True),
lambda x, y: x.ewm(com=3).cov(y, pairwise=True),
lambda x, y: x.ewm(com=3).corr(y, pairwise=True), ]:
results = [f(df, df2) for df in df1s]
for (df, result) in zip(df1s, results):
tm.assert_index_equal(result.items, df.index)
tm.assert_index_equal(result.major_axis, df.columns)
tm.assert_index_equal(result.minor_axis, df2.columns)
for i, result in enumerate(results):
if i > 0:
self.assert_numpy_array_equal(result.values,
results[0].values)
# DataFrame with another DataFrame, pairwise=False
for f in [lambda x, y: x.expanding().cov(y, pairwise=False),
lambda x, y: x.expanding().corr(y, pairwise=False),
lambda x, y: x.rolling(window=3).cov(y, pairwise=False),
lambda x, y: x.rolling(window=3).corr(y, pairwise=False),
lambda x, y: x.ewm(com=3).cov(y, pairwise=False),
lambda x, y: x.ewm(com=3).corr(y, pairwise=False), ]:
results = [f(df, df2) if df.columns.is_unique else None
for df in df1s]
for (df, result) in zip(df1s, results):
if result is not None:
expected_index = df.index.union(df2.index)
expected_columns = df.columns.union(df2.columns)
tm.assert_index_equal(result.index, expected_index)
tm.assert_index_equal(result.columns, expected_columns)
else:
tm.assertRaisesRegexp(
ValueError, "'arg1' columns are not unique", f, df,
df2)
tm.assertRaisesRegexp(
ValueError, "'arg2' columns are not unique", f,
df2, df)
# DataFrame with a Series
for f in [lambda x, y: x.expanding().cov(y),
lambda x, y: x.expanding().corr(y),
lambda x, y: x.rolling(window=3).cov(y),
lambda x, y: x.rolling(window=3).corr(y),
lambda x, y: x.ewm(com=3).cov(y),
lambda x, y: x.ewm(com=3).corr(y), ]:
results = [f(df, s) for df in df1s] + [f(s, df) for df in df1s]
for (df, result) in zip(df1s, results):
tm.assert_index_equal(result.index, df.index)
tm.assert_index_equal(result.columns, df.columns)
for i, result in enumerate(results):
if i > 0:
self.assert_numpy_array_equal(result.values,
results[0].values)
def test_rolling_skew_edge_cases(self):
all_nan = Series([np.NaN] * 5)
# yields all NaN (0 variance)
d = Series([1] * 5)
x = d.rolling(window=5).skew()
tm.assert_series_equal(all_nan, x)
# yields all NaN (window too small)
d = Series(np.random.randn(5))
x = d.rolling(window=2).skew()
tm.assert_series_equal(all_nan, x)
# yields [NaN, NaN, NaN, 0.177994, 1.548824]
d = Series([-1.50837035, -0.1297039, 0.19501095, 1.73508164, 0.41941401
])
expected = Series([np.NaN, np.NaN, np.NaN, 0.177994, 1.548824])
x = d.rolling(window=4).skew()
tm.assert_series_equal(expected, x)
def test_rolling_kurt_edge_cases(self):
all_nan = Series([np.NaN] * 5)
# yields all NaN (0 variance)
d = Series([1] * 5)
x = d.rolling(window=5).kurt()
tm.assert_series_equal(all_nan, x)
# yields all NaN (window too small)
d = Series(np.random.randn(5))
x = d.rolling(window=3).kurt()
tm.assert_series_equal(all_nan, x)
# yields [NaN, NaN, NaN, 1.224307, 2.671499]
d = Series([-1.50837035, -0.1297039, 0.19501095, 1.73508164, 0.41941401
])
expected = Series([np.NaN, np.NaN, np.NaN, 1.224307, 2.671499])
x = d.rolling(window=4).kurt()
tm.assert_series_equal(expected, x)
def _check_expanding_ndarray(self, func, static_comp, has_min_periods=True,
has_time_rule=True, preserve_nan=True):
result = func(self.arr)
tm.assert_almost_equal(result[10], static_comp(self.arr[:11]))
if preserve_nan:
assert (np.isnan(result[self._nan_locs]).all())
arr = randn(50)
if has_min_periods:
result = func(arr, min_periods=30)
assert (np.isnan(result[:29]).all())
tm.assert_almost_equal(result[-1], static_comp(arr[:50]))
# min_periods is working correctly
result = func(arr, min_periods=15)
self.assertTrue(np.isnan(result[13]))
self.assertFalse(np.isnan(result[14]))
arr2 = randn(20)
result = func(arr2, min_periods=5)
self.assertTrue(isnull(result[3]))
self.assertTrue(notnull(result[4]))
# min_periods=0
result0 = func(arr, min_periods=0)
result1 = func(arr, min_periods=1)
tm.assert_almost_equal(result0, result1)
else:
result = func(arr)
tm.assert_almost_equal(result[-1], static_comp(arr[:50]))
def _check_expanding_structures(self, func):
series_result = func(self.series)
tm.assertIsInstance(series_result, Series)
frame_result = func(self.frame)
self.assertEqual(type(frame_result), DataFrame)
def _check_expanding(self, func, static_comp, has_min_periods=True,
has_time_rule=True, preserve_nan=True):
with warnings.catch_warnings(record=True):
self._check_expanding_ndarray(func, static_comp,
has_min_periods=has_min_periods,
has_time_rule=has_time_rule,
preserve_nan=preserve_nan)
with warnings.catch_warnings(record=True):
self._check_expanding_structures(func)
def test_rolling_max_gh6297(self):
"""Replicate result expected in GH #6297"""
indices = [datetime(1975, 1, i) for i in range(1, 6)]
# So that we can have 2 datapoints on one of the days
indices.append(datetime(1975, 1, 3, 6, 0))
series = Series(range(1, 7), index=indices)
# Use floats instead of ints as values
series = series.map(lambda x: float(x))
# Sort chronologically
series = series.sort_index()
expected = Series([1.0, 2.0, 6.0, 4.0, 5.0],
index=[datetime(1975, 1, i, 0) for i in range(1, 6)])
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
x = series.rolling(window=1, freq='D').max()
tm.assert_series_equal(expected, x)
def test_rolling_max_how_resample(self):
indices = [datetime(1975, 1, i) for i in range(1, 6)]
# So that we can have 3 datapoints on last day (4, 10, and 20)
indices.append(datetime(1975, 1, 5, 1))
indices.append(datetime(1975, 1, 5, 2))
series = Series(list(range(0, 5)) + [10, 20], index=indices)
# Use floats instead of ints as values
series = series.map(lambda x: float(x))
# Sort chronologically
series = series.sort_index()
# Default how should be max
expected = Series([0.0, 1.0, 2.0, 3.0, 20.0],
index=[datetime(1975, 1, i, 0) for i in range(1, 6)])
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
x = series.rolling(window=1, freq='D').max()
tm.assert_series_equal(expected, x)
# Now specify median (10.0)
expected = Series([0.0, 1.0, 2.0, 3.0, 10.0],
index=[datetime(1975, 1, i, 0) for i in range(1, 6)])
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
x = series.rolling(window=1, freq='D').max(how='median')
tm.assert_series_equal(expected, x)
# Now specify mean (4+10+20)/3
v = (4.0 + 10.0 + 20.0) / 3.0
expected = Series([0.0, 1.0, 2.0, 3.0, v],
index=[datetime(1975, 1, i, 0) for i in range(1, 6)])
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
x = series.rolling(window=1, freq='D').max(how='mean')
tm.assert_series_equal(expected, x)
def test_rolling_min_how_resample(self):
indices = [datetime(1975, 1, i) for i in range(1, 6)]
# So that we can have 3 datapoints on last day (4, 10, and 20)
indices.append(datetime(1975, 1, 5, 1))
indices.append(datetime(1975, 1, 5, 2))
series = Series(list(range(0, 5)) + [10, 20], index=indices)
# Use floats instead of ints as values
series = series.map(lambda x: float(x))
# Sort chronologically
series = series.sort_index()
# Default how should be min
expected = Series([0.0, 1.0, 2.0, 3.0, 4.0],
index=[datetime(1975, 1, i, 0) for i in range(1, 6)])
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
r = series.rolling(window=1, freq='D')
tm.assert_series_equal(expected, r.min())
def test_rolling_median_how_resample(self):
indices = [datetime(1975, 1, i) for i in range(1, 6)]
# So that we can have 3 datapoints on last day (4, 10, and 20)
indices.append(datetime(1975, 1, 5, 1))
indices.append(datetime(1975, 1, 5, 2))
series = Series(list(range(0, 5)) + [10, 20], index=indices)
# Use floats instead of ints as values
series = series.map(lambda x: float(x))
# Sort chronologically
series = series.sort_index()
# Default how should be median
expected = Series([0.0, 1.0, 2.0, 3.0, 10],
index=[datetime(1975, 1, i, 0) for i in range(1, 6)])
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
x = series.rolling(window=1, freq='D').median()
tm.assert_series_equal(expected, x)
def test_rolling_median_memory_error(self):
# GH11722
n = 20000
Series(np.random.randn(n)).rolling(window=2, center=False).median()
Series(np.random.randn(n)).rolling(window=2, center=False).median()
def test_rolling_min_max_numeric_types(self):
# GH12373
types_test = [np.dtype("f{}".format(width)) for width in [4, 8]]
types_test.extend([np.dtype("{}{}".format(sign, width))
for width in [1, 2, 4, 8] for sign in "ui"])
for data_type in types_test:
# Just testing that these don't throw exceptions and that
# the return type is float64. Other tests will cover quantitative
# correctness
result = (DataFrame(np.arange(20, dtype=data_type))
.rolling(window=5).max())
self.assertEqual(result.dtypes[0], np.dtype("f8"))
result = (DataFrame(np.arange(20, dtype=data_type))
.rolling(window=5).min())
self.assertEqual(result.dtypes[0], np.dtype("f8"))
class TestGrouperGrouping(tm.TestCase):
def setUp(self):
self.series = Series(np.arange(10))
self.frame = DataFrame({'A': [1] * 20 + [2] * 12 + [3] * 8,
'B': np.arange(40)})
def test_mutated(self):
def f():
self.frame.groupby('A', foo=1)
self.assertRaises(TypeError, f)
g = self.frame.groupby('A')
self.assertFalse(g.mutated)
g = self.frame.groupby('A', mutated=True)
self.assertTrue(g.mutated)
def test_getitem(self):
g = self.frame.groupby('A')
g_mutated = self.frame.groupby('A', mutated=True)
expected = g_mutated.B.apply(lambda x: x.rolling(2).mean())
result = g.rolling(2).mean().B
tm.assert_series_equal(result, expected)
result = g.rolling(2).B.mean()
tm.assert_series_equal(result, expected)
result = g.B.rolling(2).mean()
tm.assert_series_equal(result, expected)
result = self.frame.B.groupby(self.frame.A).rolling(2).mean()
tm.assert_series_equal(result, expected)
def test_getitem_multiple(self):
# GH 13174
g = self.frame.groupby('A')
r = g.rolling(2)
g_mutated = self.frame.groupby('A', mutated=True)
expected = g_mutated.B.apply(lambda x: x.rolling(2).count())
result = r.B.count()
tm.assert_series_equal(result, expected)
result = r.B.count()
tm.assert_series_equal(result, expected)
def test_rolling(self):
g = self.frame.groupby('A')
r = g.rolling(window=4)
for f in ['sum', 'mean', 'min', 'max', 'count', 'kurt', 'skew']:
result = getattr(r, f)()
expected = g.apply(lambda x: getattr(x.rolling(4), f)())
tm.assert_frame_equal(result, expected)
for f in ['std', 'var']:
result = getattr(r, f)(ddof=1)
expected = g.apply(lambda x: getattr(x.rolling(4), f)(ddof=1))
tm.assert_frame_equal(result, expected)
result = r.quantile(0.5)
expected = g.apply(lambda x: x.rolling(4).quantile(0.5))
tm.assert_frame_equal(result, expected)
def test_rolling_corr_cov(self):
g = self.frame.groupby('A')
r = g.rolling(window=4)
for f in ['corr', 'cov']:
result = getattr(r, f)(self.frame)
def func(x):
return getattr(x.rolling(4), f)(self.frame)
expected = g.apply(func)
tm.assert_frame_equal(result, expected)
result = getattr(r.B, f)(pairwise=True)
def func(x):
return getattr(x.B.rolling(4), f)(pairwise=True)
expected = g.apply(func)
tm.assert_series_equal(result, expected)
def test_rolling_apply(self):
g = self.frame.groupby('A')
r = g.rolling(window=4)
# reduction
result = r.apply(lambda x: x.sum())
expected = g.apply(lambda x: x.rolling(4).apply(lambda y: y.sum()))
tm.assert_frame_equal(result, expected)
def test_expanding(self):
g = self.frame.groupby('A')
r = g.expanding()
for f in ['sum', 'mean', 'min', 'max', 'count', 'kurt', 'skew']:
result = getattr(r, f)()
expected = g.apply(lambda x: getattr(x.expanding(), f)())
tm.assert_frame_equal(result, expected)
for f in ['std', 'var']:
result = getattr(r, f)(ddof=0)
expected = g.apply(lambda x: getattr(x.expanding(), f)(ddof=0))
tm.assert_frame_equal(result, expected)
result = r.quantile(0.5)
expected = g.apply(lambda x: x.expanding().quantile(0.5))
tm.assert_frame_equal(result, expected)
def test_expanding_corr_cov(self):
g = self.frame.groupby('A')
r = g.expanding()
for f in ['corr', 'cov']:
result = getattr(r, f)(self.frame)
def func(x):
return getattr(x.expanding(), f)(self.frame)
expected = g.apply(func)
tm.assert_frame_equal(result, expected)
result = getattr(r.B, f)(pairwise=True)
def func(x):
return getattr(x.B.expanding(), f)(pairwise=True)
expected = g.apply(func)
tm.assert_series_equal(result, expected)
def test_expanding_apply(self):
g = self.frame.groupby('A')
r = g.expanding()
# reduction
result = r.apply(lambda x: x.sum())
expected = g.apply(lambda x: x.expanding().apply(lambda y: y.sum()))
tm.assert_frame_equal(result, expected)
class TestRollingTS(tm.TestCase):
# rolling time-series friendly
# xref GH13327
def setUp(self):
self.regular = DataFrame({'A': pd.date_range('20130101',
periods=5,
freq='s'),
'B': range(5)}).set_index('A')
self.ragged = DataFrame({'B': range(5)})
self.ragged.index = [Timestamp('20130101 09:00:00'),
Timestamp('20130101 09:00:02'),
Timestamp('20130101 09:00:03'),
Timestamp('20130101 09:00:05'),
Timestamp('20130101 09:00:06')]
def test_doc_string(self):
df = DataFrame({'B': [0, 1, 2, np.nan, 4]},
index=[Timestamp('20130101 09:00:00'),
Timestamp('20130101 09:00:02'),
Timestamp('20130101 09:00:03'),
Timestamp('20130101 09:00:05'),
Timestamp('20130101 09:00:06')])
df
df.rolling('2s').sum()
def test_valid(self):
df = self.regular
# not a valid freq
with self.assertRaises(ValueError):
df.rolling(window='foobar')
# not a datetimelike index
with self.assertRaises(ValueError):
df.reset_index().rolling(window='foobar')
# non-fixed freqs
for freq in ['2MS', pd.offsets.MonthBegin(2)]:
with self.assertRaises(ValueError):
df.rolling(window=freq)
for freq in ['1D', pd.offsets.Day(2), '2ms']:
df.rolling(window=freq)
# non-integer min_periods
for minp in [1.0, 'foo', np.array([1, 2, 3])]:
with self.assertRaises(ValueError):
df.rolling(window='1D', min_periods=minp)
# center is not implemented
with self.assertRaises(NotImplementedError):
df.rolling(window='1D', center=True)
def test_on(self):
df = self.regular
# not a valid column
with self.assertRaises(ValueError):
df.rolling(window='2s', on='foobar')
# column is valid
df = df.copy()
df['C'] = pd.date_range('20130101', periods=len(df))
df.rolling(window='2d', on='C').sum()
# invalid columns
with self.assertRaises(ValueError):
df.rolling(window='2d', on='B')
# ok even though on non-selected
df.rolling(window='2d', on='C').B.sum()
def test_monotonic_on(self):
# on/index must be monotonic
df = DataFrame({'A': pd.date_range('20130101',
periods=5,
freq='s'),
'B': range(5)})
self.assertTrue(df.A.is_monotonic)
df.rolling('2s', on='A').sum()
df = df.set_index('A')
self.assertTrue(df.index.is_monotonic)
df.rolling('2s').sum()
# non-monotonic
df.index = reversed(df.index.tolist())
self.assertFalse(df.index.is_monotonic)
with self.assertRaises(ValueError):
df.rolling('2s').sum()
df = df.reset_index()
with self.assertRaises(ValueError):
df.rolling('2s', on='A').sum()
def test_frame_on(self):
df = DataFrame({'B': range(5),
'C': pd.date_range('20130101 09:00:00',
periods=5,
freq='3s')})
df['A'] = [Timestamp('20130101 09:00:00'),
Timestamp('20130101 09:00:02'),
Timestamp('20130101 09:00:03'),
Timestamp('20130101 09:00:05'),
Timestamp('20130101 09:00:06')]
# we are doing simulating using 'on'
expected = (df.set_index('A')
.rolling('2s')
.B
.sum()
.reset_index(drop=True)
)
result = (df.rolling('2s', on='A')
.B
.sum()
)
tm.assert_series_equal(result, expected)
# test as a frame
# we should be ignoring the 'on' as an aggregation column
# note that the expected is setting, computing, and reseting
# so the columns need to be switched compared
# to the actual result where they are ordered as in the
# original
expected = (df.set_index('A')
.rolling('2s')[['B']]
.sum()
.reset_index()[['B', 'A']]
)
result = (df.rolling('2s', on='A')[['B']]
.sum()
)
tm.assert_frame_equal(result, expected)
def test_frame_on2(self):
# using multiple aggregation columns
df = DataFrame({'A': [0, 1, 2, 3, 4],
'B': [0, 1, 2, np.nan, 4],
'C': pd.Index([pd.Timestamp('20130101 09:00:00'),
pd.Timestamp('20130101 09:00:02'),
pd.Timestamp('20130101 09:00:03'),
pd.Timestamp('20130101 09:00:05'),
pd.Timestamp('20130101 09:00:06')])},
columns=['A', 'C', 'B'])
expected1 = DataFrame({'A': [0., 1, 3, 3, 7],
'B': [0, 1, 3, np.nan, 4],
'C': df['C']},
columns=['A', 'C', 'B'])
result = df.rolling('2s', on='C').sum()
expected = expected1
tm.assert_frame_equal(result, expected)
expected = Series([0, 1, 3, np.nan, 4], name='B')
result = df.rolling('2s', on='C').B.sum()
tm.assert_series_equal(result, expected)
expected = expected1[['A', 'B', 'C']]
result = df.rolling('2s', on='C')[['A', 'B', 'C']].sum()
tm.assert_frame_equal(result, expected)
def test_basic_regular(self):
df = self.regular.copy()
df.index = pd.date_range('20130101', periods=5, freq='D')
expected = df.rolling(window=1, min_periods=1).sum()
result = df.rolling(window='1D').sum()
tm.assert_frame_equal(result, expected)
df.index = pd.date_range('20130101', periods=5, freq='2D')
expected = df.rolling(window=1, min_periods=1).sum()
result = df.rolling(window='2D', min_periods=1).sum()
tm.assert_frame_equal(result, expected)
expected = df.rolling(window=1, min_periods=1).sum()
result = df.rolling(window='2D', min_periods=1).sum()
tm.assert_frame_equal(result, expected)
expected = df.rolling(window=1).sum()
result = df.rolling(window='2D').sum()
tm.assert_frame_equal(result, expected)
def test_min_periods(self):
# compare for min_periods
df = self.regular
# these slightly different
expected = df.rolling(2, min_periods=1).sum()
result = df.rolling('2s').sum()
tm.assert_frame_equal(result, expected)
expected = df.rolling(2, min_periods=1).sum()
result = df.rolling('2s', min_periods=1).sum()
tm.assert_frame_equal(result, expected)
def test_ragged_sum(self):
df = self.ragged
result = df.rolling(window='1s', min_periods=1).sum()
expected = df.copy()
expected['B'] = [0.0, 1, 2, 3, 4]
tm.assert_frame_equal(result, expected)
result = df.rolling(window='2s', min_periods=1).sum()
expected = df.copy()
expected['B'] = [0.0, 1, 3, 3, 7]
tm.assert_frame_equal(result, expected)
result = df.rolling(window='2s', min_periods=2).sum()
expected = df.copy()
expected['B'] = [np.nan, np.nan, 3, np.nan, 7]
tm.assert_frame_equal(result, expected)
result = df.rolling(window='3s', min_periods=1).sum()
expected = df.copy()
expected['B'] = [0.0, 1, 3, 5, 7]
tm.assert_frame_equal(result, expected)
result = df.rolling(window='3s').sum()
expected = df.copy()
expected['B'] = [0.0, 1, 3, 5, 7]
tm.assert_frame_equal(result, expected)
result = df.rolling(window='4s', min_periods=1).sum()
expected = df.copy()
expected['B'] = [0.0, 1, 3, 6, 9]
tm.assert_frame_equal(result, expected)
result = df.rolling(window='4s', min_periods=3).sum()
expected = df.copy()
expected['B'] = [np.nan, np.nan, 3, 6, 9]
tm.assert_frame_equal(result, expected)
result = df.rolling(window='5s', min_periods=1).sum()
expected = df.copy()
expected['B'] = [0.0, 1, 3, 6, 10]
tm.assert_frame_equal(result, expected)
def test_ragged_mean(self):
df = self.ragged
result = df.rolling(window='1s', min_periods=1).mean()
expected = df.copy()
expected['B'] = [0.0, 1, 2, 3, 4]
tm.assert_frame_equal(result, expected)
result = df.rolling(window='2s', min_periods=1).mean()
expected = df.copy()
expected['B'] = [0.0, 1, 1.5, 3.0, 3.5]
tm.assert_frame_equal(result, expected)
def test_ragged_median(self):
df = self.ragged
result = df.rolling(window='1s', min_periods=1).median()
expected = df.copy()
expected['B'] = [0.0, 1, 2, 3, 4]
tm.assert_frame_equal(result, expected)
result = df.rolling(window='2s', min_periods=1).median()
expected = df.copy()
expected['B'] = [0.0, 1, 1.5, 3.0, 3.5]
tm.assert_frame_equal(result, expected)
def test_ragged_quantile(self):
df = self.ragged
result = df.rolling(window='1s', min_periods=1).quantile(0.5)
expected = df.copy()
expected['B'] = [0.0, 1, 2, 3, 4]
tm.assert_frame_equal(result, expected)
result = df.rolling(window='2s', min_periods=1).quantile(0.5)
expected = df.copy()
expected['B'] = [0.0, 1, 1.0, 3.0, 3.0]
tm.assert_frame_equal(result, expected)
def test_ragged_std(self):
df = self.ragged
result = df.rolling(window='1s', min_periods=1).std(ddof=0)
expected = df.copy()
expected['B'] = [0.0] * 5
tm.assert_frame_equal(result, expected)
result = df.rolling(window='1s', min_periods=1).std(ddof=1)
expected = df.copy()
expected['B'] = [np.nan] * 5
tm.assert_frame_equal(result, expected)
result = df.rolling(window='3s', min_periods=1).std(ddof=0)
expected = df.copy()
expected['B'] = [0.0] + [0.5] * 4
tm.assert_frame_equal(result, expected)
result = df.rolling(window='5s', min_periods=1).std(ddof=1)
expected = df.copy()
expected['B'] = [np.nan, 0.707107, 1.0, 1.0, 1.290994]
tm.assert_frame_equal(result, expected)
def test_ragged_var(self):
df = self.ragged
result = df.rolling(window='1s', min_periods=1).var(ddof=0)
expected = df.copy()
expected['B'] = [0.0] * 5
tm.assert_frame_equal(result, expected)
result = df.rolling(window='1s', min_periods=1).var(ddof=1)
expected = df.copy()
expected['B'] = [np.nan] * 5
tm.assert_frame_equal(result, expected)
result = df.rolling(window='3s', min_periods=1).var(ddof=0)
expected = df.copy()
expected['B'] = [0.0] + [0.25] * 4
tm.assert_frame_equal(result, expected)
result = df.rolling(window='5s', min_periods=1).var(ddof=1)
expected = df.copy()
expected['B'] = [np.nan, 0.5, 1.0, 1.0, 1 + 2 / 3.]
tm.assert_frame_equal(result, expected)
def test_ragged_skew(self):
df = self.ragged
result = df.rolling(window='3s', min_periods=1).skew()
expected = df.copy()
expected['B'] = [np.nan] * 5
tm.assert_frame_equal(result, expected)
result = df.rolling(window='5s', min_periods=1).skew()
expected = df.copy()
expected['B'] = [np.nan] * 2 + [0.0, 0.0, 0.0]
tm.assert_frame_equal(result, expected)
def test_ragged_kurt(self):
df = self.ragged
result = df.rolling(window='3s', min_periods=1).kurt()
expected = df.copy()
expected['B'] = [np.nan] * 5
tm.assert_frame_equal(result, expected)
result = df.rolling(window='5s', min_periods=1).kurt()
expected = df.copy()
expected['B'] = [np.nan] * 4 + [-1.2]
tm.assert_frame_equal(result, expected)
def test_ragged_count(self):
df = self.ragged
result = df.rolling(window='1s', min_periods=1).count()
expected = df.copy()
expected['B'] = [1.0, 1, 1, 1, 1]
tm.assert_frame_equal(result, expected)
df = self.ragged
result = df.rolling(window='1s').count()
tm.assert_frame_equal(result, expected)
result = df.rolling(window='2s', min_periods=1).count()
expected = df.copy()
expected['B'] = [1.0, 1, 2, 1, 2]
tm.assert_frame_equal(result, expected)
result = df.rolling(window='2s', min_periods=2).count()
expected = df.copy()
expected['B'] = [np.nan, np.nan, 2, np.nan, 2]
tm.assert_frame_equal(result, expected)
def test_regular_min(self):
df = DataFrame({'A': pd.date_range('20130101',
periods=5,
freq='s'),
'B': [0.0, 1, 2, 3, 4]}).set_index('A')
result = df.rolling('1s').min()
expected = df.copy()
expected['B'] = [0.0, 1, 2, 3, 4]
tm.assert_frame_equal(result, expected)
df = DataFrame({'A': pd.date_range('20130101',
periods=5,
freq='s'),
'B': [5, 4, 3, 4, 5]}).set_index('A')
tm.assert_frame_equal(result, expected)
result = df.rolling('2s').min()
expected = df.copy()
expected['B'] = [5.0, 4, 3, 3, 4]
tm.assert_frame_equal(result, expected)
result = df.rolling('5s').min()
expected = df.copy()
expected['B'] = [5.0, 4, 3, 3, 3]
tm.assert_frame_equal(result, expected)
def test_ragged_min(self):
df = self.ragged
result = df.rolling(window='1s', min_periods=1).min()
expected = df.copy()
expected['B'] = [0.0, 1, 2, 3, 4]
tm.assert_frame_equal(result, expected)
result = df.rolling(window='2s', min_periods=1).min()
expected = df.copy()
expected['B'] = [0.0, 1, 1, 3, 3]
tm.assert_frame_equal(result, expected)
result = df.rolling(window='5s', min_periods=1).min()
expected = df.copy()
expected['B'] = [0.0, 0, 0, 1, 1]
tm.assert_frame_equal(result, expected)
def test_perf_min(self):
N = 10000
dfp = DataFrame({'B': np.random.randn(N)},
index=pd.date_range('20130101',
periods=N,
freq='s'))
expected = dfp.rolling(2, min_periods=1).min()
result = dfp.rolling('2s').min()
self.assertTrue(((result - expected) < 0.01).all().bool())
expected = dfp.rolling(200, min_periods=1).min()
result = dfp.rolling('200s').min()
self.assertTrue(((result - expected) < 0.01).all().bool())
def test_ragged_max(self):
df = self.ragged
result = df.rolling(window='1s', min_periods=1).max()
expected = df.copy()
expected['B'] = [0.0, 1, 2, 3, 4]
tm.assert_frame_equal(result, expected)
result = df.rolling(window='2s', min_periods=1).max()
expected = df.copy()
expected['B'] = [0.0, 1, 2, 3, 4]
tm.assert_frame_equal(result, expected)
result = df.rolling(window='5s', min_periods=1).max()
expected = df.copy()
expected['B'] = [0.0, 1, 2, 3, 4]
tm.assert_frame_equal(result, expected)
def test_ragged_apply(self):
df = self.ragged
f = lambda x: 1
result = df.rolling(window='1s', min_periods=1).apply(f)
expected = df.copy()
expected['B'] = 1.
tm.assert_frame_equal(result, expected)
result = df.rolling(window='2s', min_periods=1).apply(f)
expected = df.copy()
expected['B'] = 1.
tm.assert_frame_equal(result, expected)
result = df.rolling(window='5s', min_periods=1).apply(f)
expected = df.copy()
expected['B'] = 1.
tm.assert_frame_equal(result, expected)
def test_all(self):
# simple comparision of integer vs time-based windowing
df = self.regular * 2
er = df.rolling(window=1)
r = df.rolling(window='1s')
for f in ['sum', 'mean', 'count', 'median', 'std',
'var', 'kurt', 'skew', 'min', 'max']:
result = getattr(r, f)()
expected = getattr(er, f)()
tm.assert_frame_equal(result, expected)
result = r.quantile(0.5)
expected = er.quantile(0.5)
tm.assert_frame_equal(result, expected)
result = r.apply(lambda x: 1)
expected = er.apply(lambda x: 1)
tm.assert_frame_equal(result, expected)
def test_all2(self):
# more sophisticated comparision of integer vs.
# time-based windowing
df = DataFrame({'B': np.arange(50)},
index=pd.date_range('20130101',
periods=50, freq='H')
)
# in-range data
dft = df.between_time("09:00", "16:00")
r = dft.rolling(window='5H')
for f in ['sum', 'mean', 'count', 'median', 'std',
'var', 'kurt', 'skew', 'min', 'max']:
result = getattr(r, f)()
# we need to roll the days separately
# to compare with a time-based roll
# finally groupby-apply will return a multi-index
# so we need to drop the day
def agg_by_day(x):
x = x.between_time("09:00", "16:00")
return getattr(x.rolling(5, min_periods=1), f)()
expected = df.groupby(df.index.day).apply(
agg_by_day).reset_index(level=0, drop=True)
tm.assert_frame_equal(result, expected)
| apache-2.0 |
rbalda/neural_ocr | env/lib/python2.7/site-packages/matplotlib/offsetbox.py | 4 | 55560 | """
The OffsetBox is a simple container artist. The child artist are meant
to be drawn at a relative position to its parent. The [VH]Packer,
DrawingArea and TextArea are derived from the OffsetBox.
The [VH]Packer automatically adjust the relative postisions of their
children, which should be instances of the OffsetBox. This is used to
align similar artists together, e.g., in legend.
The DrawingArea can contain any Artist as a child. The
DrawingArea has a fixed width and height. The position of children
relative to the parent is fixed. The TextArea is contains a single
Text instance. The width and height of the TextArea instance is the
width and height of the its child text.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
from matplotlib.externals.six.moves import xrange, zip
import warnings
import matplotlib.transforms as mtransforms
import matplotlib.artist as martist
import matplotlib.text as mtext
import matplotlib.path as mpath
import numpy as np
from matplotlib.transforms import Bbox, BboxBase, TransformedBbox
from matplotlib.font_manager import FontProperties
from matplotlib.patches import FancyBboxPatch, FancyArrowPatch
from matplotlib import rcParams
from matplotlib import docstring
#from bboximage import BboxImage
from matplotlib.image import BboxImage
from matplotlib.patches import bbox_artist as mbbox_artist
from matplotlib.text import _AnnotationBase
DEBUG = False
# for debuging use
def bbox_artist(*args, **kwargs):
if DEBUG:
mbbox_artist(*args, **kwargs)
# _get_packed_offsets() and _get_aligned_offsets() are coded assuming
# that we are packing boxes horizontally. But same function will be
# used with vertical packing.
def _get_packed_offsets(wd_list, total, sep, mode="fixed"):
"""
Geiven a list of (width, xdescent) of each boxes, calculate the
total width and the x-offset positions of each items according to
*mode*. xdescent is analagous to the usual descent, but along the
x-direction. xdescent values are currently ignored.
*wd_list* : list of (width, xdescent) of boxes to be packed.
*sep* : spacing between boxes
*total* : Intended total length. None if not used.
*mode* : packing mode. 'fixed', 'expand', or 'equal'.
"""
w_list, d_list = list(zip(*wd_list))
# d_list is currently not used.
if mode == "fixed":
offsets_ = np.add.accumulate([0] + [w + sep for w in w_list])
offsets = offsets_[:-1]
if total is None:
total = offsets_[-1] - sep
return total, offsets
elif mode == "expand":
if len(w_list) > 1:
sep = (total - sum(w_list)) / (len(w_list) - 1.)
else:
sep = 0.
offsets_ = np.add.accumulate([0] + [w + sep for w in w_list])
offsets = offsets_[:-1]
return total, offsets
elif mode == "equal":
maxh = max(w_list)
if total is None:
total = (maxh + sep) * len(w_list)
else:
sep = float(total) / (len(w_list)) - maxh
offsets = np.array([(maxh + sep) * i for i in range(len(w_list))])
return total, offsets
else:
raise ValueError("Unknown mode : %s" % (mode,))
def _get_aligned_offsets(hd_list, height, align="baseline"):
"""
Given a list of (height, descent) of each boxes, align the boxes
with *align* and calculate the y-offsets of each boxes.
total width and the offset positions of each items according to
*mode*. xdescent is analogous to the usual descent, but along the
x-direction. xdescent values are currently ignored.
*hd_list* : list of (width, xdescent) of boxes to be aligned.
*sep* : spacing between boxes
*height* : Intended total length. None if not used.
*align* : align mode. 'baseline', 'top', 'bottom', or 'center'.
"""
if height is None:
height = max([h for h, d in hd_list])
if align == "baseline":
height_descent = max([h - d for h, d in hd_list])
descent = max([d for h, d in hd_list])
height = height_descent + descent
offsets = [0. for h, d in hd_list]
elif align in ["left", "top"]:
descent = 0.
offsets = [d for h, d in hd_list]
elif align in ["right", "bottom"]:
descent = 0.
offsets = [height - h + d for h, d in hd_list]
elif align == "center":
descent = 0.
offsets = [(height - h) * .5 + d for h, d in hd_list]
else:
raise ValueError("Unknown Align mode : %s" % (align,))
return height, descent, offsets
class OffsetBox(martist.Artist):
"""
The OffsetBox is a simple container artist. The child artist are meant
to be drawn at a relative position to its parent.
"""
def __init__(self, *args, **kwargs):
super(OffsetBox, self).__init__(*args, **kwargs)
# Clipping has not been implemented in the OffesetBox family, so
# disable the clip flag for consistency. It can always be turned back
# on to zero effect.
self.set_clip_on(False)
self._children = []
self._offset = (0, 0)
def __getstate__(self):
state = martist.Artist.__getstate__(self)
# pickle cannot save instancemethods, so handle them here
from .cbook import _InstanceMethodPickler
import inspect
offset = state['_offset']
if inspect.ismethod(offset):
state['_offset'] = _InstanceMethodPickler(offset)
return state
def __setstate__(self, state):
self.__dict__ = state
from .cbook import _InstanceMethodPickler
if isinstance(self._offset, _InstanceMethodPickler):
self._offset = self._offset.get_instancemethod()
self.stale = True
def set_figure(self, fig):
"""
Set the figure
accepts a class:`~matplotlib.figure.Figure` instance
"""
martist.Artist.set_figure(self, fig)
for c in self.get_children():
c.set_figure(fig)
@martist.Artist.axes.setter
def axes(self, ax):
# TODO deal with this better
martist.Artist.axes.fset(self, ax)
for c in self.get_children():
if c is not None:
c.axes = ax
def contains(self, mouseevent):
for c in self.get_children():
a, b = c.contains(mouseevent)
if a:
return a, b
return False, {}
def set_offset(self, xy):
"""
Set the offset
accepts x, y, tuple, or a callable object.
"""
self._offset = xy
self.stale = True
def get_offset(self, width, height, xdescent, ydescent, renderer):
"""
Get the offset
accepts extent of the box
"""
if six.callable(self._offset):
return self._offset(width, height, xdescent, ydescent, renderer)
else:
return self._offset
def set_width(self, width):
"""
Set the width
accepts float
"""
self.width = width
self.stale = True
def set_height(self, height):
"""
Set the height
accepts float
"""
self.height = height
self.stale = True
def get_visible_children(self):
"""
Return a list of visible artists it contains.
"""
return [c for c in self._children if c.get_visible()]
def get_children(self):
"""
Return a list of artists it contains.
"""
return self._children
def get_extent_offsets(self, renderer):
raise Exception("")
def get_extent(self, renderer):
"""
Return with, height, xdescent, ydescent of box
"""
w, h, xd, yd, offsets = self.get_extent_offsets(renderer)
return w, h, xd, yd
def get_window_extent(self, renderer):
'''
get the bounding box in display space.
'''
w, h, xd, yd, offsets = self.get_extent_offsets(renderer)
px, py = self.get_offset(w, h, xd, yd, renderer)
return mtransforms.Bbox.from_bounds(px - xd, py - yd, w, h)
def draw(self, renderer):
"""
Update the location of children if necessary and draw them
to the given *renderer*.
"""
width, height, xdescent, ydescent, offsets = self.get_extent_offsets(
renderer)
px, py = self.get_offset(width, height, xdescent, ydescent, renderer)
for c, (ox, oy) in zip(self.get_visible_children(), offsets):
c.set_offset((px + ox, py + oy))
c.draw(renderer)
bbox_artist(self, renderer, fill=False, props=dict(pad=0.))
self.stale = False
class PackerBase(OffsetBox):
def __init__(self, pad=None, sep=None, width=None, height=None,
align=None, mode=None,
children=None):
"""
Parameters
----------
pad : float, optional
Boundary pad.
sep : float, optional
Spacing between items.
width : float, optional
height : float, optional
Width and height of the container box, calculated if
`None`.
align : str, optional
Alignment of boxes. Can be one of ``top``, ``bottom``,
``left``, ``right``, ``center`` and ``baseline``
mode : str, optional
Packing mode.
Notes
-----
*pad* and *sep* need to given in points and will be scale with
the renderer dpi, while *width* and *height* need to be in
pixels.
"""
super(PackerBase, self).__init__()
self.height = height
self.width = width
self.sep = sep
self.pad = pad
self.mode = mode
self.align = align
self._children = children
class VPacker(PackerBase):
"""
The VPacker has its children packed vertically. It automatically
adjust the relative positions of children in the drawing time.
"""
def __init__(self, pad=None, sep=None, width=None, height=None,
align="baseline", mode="fixed",
children=None):
"""
Parameters
----------
pad : float, optional
Boundary pad.
sep : float, optional
Spacing between items.
width : float, optional
height : float, optional
width and height of the container box, calculated if
`None`.
align : str, optional
Alignment of boxes.
mode : str, optional
Packing mode.
Notes
-----
*pad* and *sep* need to given in points and will be scale with
the renderer dpi, while *width* and *height* need to be in
pixels.
"""
super(VPacker, self).__init__(pad, sep, width, height,
align, mode,
children)
def get_extent_offsets(self, renderer):
"""
update offset of childrens and return the extents of the box
"""
dpicor = renderer.points_to_pixels(1.)
pad = self.pad * dpicor
sep = self.sep * dpicor
if self.width is not None:
for c in self.get_visible_children():
if isinstance(c, PackerBase) and c.mode == "expand":
c.set_width(self.width)
whd_list = [c.get_extent(renderer)
for c in self.get_visible_children()]
whd_list = [(w, h, xd, (h - yd)) for w, h, xd, yd in whd_list]
wd_list = [(w, xd) for w, h, xd, yd in whd_list]
width, xdescent, xoffsets = _get_aligned_offsets(wd_list,
self.width,
self.align)
pack_list = [(h, yd) for w, h, xd, yd in whd_list]
height, yoffsets_ = _get_packed_offsets(pack_list, self.height,
sep, self.mode)
yoffsets = yoffsets_ + [yd for w, h, xd, yd in whd_list]
ydescent = height - yoffsets[0]
yoffsets = height - yoffsets
#w, h, xd, h_yd = whd_list[-1]
yoffsets = yoffsets - ydescent
return width + 2 * pad, height + 2 * pad, \
xdescent + pad, ydescent + pad, \
list(zip(xoffsets, yoffsets))
class HPacker(PackerBase):
"""
The HPacker has its children packed horizontally. It automatically
adjusts the relative positions of children at draw time.
"""
def __init__(self, pad=None, sep=None, width=None, height=None,
align="baseline", mode="fixed",
children=None):
"""
Parameters
----------
pad : float, optional
Boundary pad.
sep : float, optional
Spacing between items.
width : float, optional
height : float, optional
Width and height of the container box, calculated if
`None`.
align : str
Alignment of boxes.
mode : str
Packing mode.
Notes
-----
*pad* and *sep* need to given in points and will be scale with
the renderer dpi, while *width* and *height* need to be in
pixels.
"""
super(HPacker, self).__init__(pad, sep, width, height,
align, mode, children)
def get_extent_offsets(self, renderer):
"""
update offset of children and return the extents of the box
"""
dpicor = renderer.points_to_pixels(1.)
pad = self.pad * dpicor
sep = self.sep * dpicor
whd_list = [c.get_extent(renderer)
for c in self.get_visible_children()]
if not whd_list:
return 2 * pad, 2 * pad, pad, pad, []
if self.height is None:
height_descent = max([h - yd for w, h, xd, yd in whd_list])
ydescent = max([yd for w, h, xd, yd in whd_list])
height = height_descent + ydescent
else:
height = self.height - 2 * pad # width w/o pad
hd_list = [(h, yd) for w, h, xd, yd in whd_list]
height, ydescent, yoffsets = _get_aligned_offsets(hd_list,
self.height,
self.align)
pack_list = [(w, xd) for w, h, xd, yd in whd_list]
width, xoffsets_ = _get_packed_offsets(pack_list, self.width,
sep, self.mode)
xoffsets = xoffsets_ + [xd for w, h, xd, yd in whd_list]
xdescent = whd_list[0][2]
xoffsets = xoffsets - xdescent
return width + 2 * pad, height + 2 * pad, \
xdescent + pad, ydescent + pad, \
list(zip(xoffsets, yoffsets))
class PaddedBox(OffsetBox):
def __init__(self, child, pad=None, draw_frame=False, patch_attrs=None):
"""
*pad* : boundary pad
.. note::
*pad* need to given in points and will be
scale with the renderer dpi, while *width* and *height*
need to be in pixels.
"""
super(PaddedBox, self).__init__()
self.pad = pad
self._children = [child]
self.patch = FancyBboxPatch(
xy=(0.0, 0.0), width=1., height=1.,
facecolor='w', edgecolor='k',
mutation_scale=1, # self.prop.get_size_in_points(),
snap=True
)
self.patch.set_boxstyle("square", pad=0)
if patch_attrs is not None:
self.patch.update(patch_attrs)
self._drawFrame = draw_frame
def get_extent_offsets(self, renderer):
"""
update offset of childrens and return the extents of the box
"""
dpicor = renderer.points_to_pixels(1.)
pad = self.pad * dpicor
w, h, xd, yd = self._children[0].get_extent(renderer)
return w + 2 * pad, h + 2 * pad, \
xd + pad, yd + pad, \
[(0, 0)]
def draw(self, renderer):
"""
Update the location of children if necessary and draw them
to the given *renderer*.
"""
width, height, xdescent, ydescent, offsets = self.get_extent_offsets(
renderer)
px, py = self.get_offset(width, height, xdescent, ydescent, renderer)
for c, (ox, oy) in zip(self.get_visible_children(), offsets):
c.set_offset((px + ox, py + oy))
self.draw_frame(renderer)
for c in self.get_visible_children():
c.draw(renderer)
#bbox_artist(self, renderer, fill=False, props=dict(pad=0.))
self.stale = False
def update_frame(self, bbox, fontsize=None):
self.patch.set_bounds(bbox.x0, bbox.y0,
bbox.width, bbox.height)
if fontsize:
self.patch.set_mutation_scale(fontsize)
self.stale = True
def draw_frame(self, renderer):
# update the location and size of the legend
bbox = self.get_window_extent(renderer)
self.update_frame(bbox)
if self._drawFrame:
self.patch.draw(renderer)
class DrawingArea(OffsetBox):
"""
The DrawingArea can contain any Artist as a child. The DrawingArea
has a fixed width and height. The position of children relative to
the parent is fixed. The children can be clipped at the
boundaries of the parent.
"""
def __init__(self, width, height, xdescent=0.,
ydescent=0., clip=False):
"""
*width*, *height* : width and height of the container box.
*xdescent*, *ydescent* : descent of the box in x- and y-direction.
*clip* : Whether to clip the children
"""
super(DrawingArea, self).__init__()
self.width = width
self.height = height
self.xdescent = xdescent
self.ydescent = ydescent
self._clip_children = clip
self.offset_transform = mtransforms.Affine2D()
self.offset_transform.clear()
self.offset_transform.translate(0, 0)
self.dpi_transform = mtransforms.Affine2D()
@property
def clip_children(self):
"""
If the children of this DrawingArea should be clipped
by DrawingArea bounding box.
"""
return self._clip_children
@clip_children.setter
def clip_children(self, val):
self._clip_children = bool(val)
self.stale = True
def get_transform(self):
"""
Return the :class:`~matplotlib.transforms.Transform` applied
to the children
"""
return self.dpi_transform + self.offset_transform
def set_transform(self, t):
"""
set_transform is ignored.
"""
pass
def set_offset(self, xy):
"""
set offset of the container.
Accept : tuple of x,y cooridnate in disokay units.
"""
self._offset = xy
self.offset_transform.clear()
self.offset_transform.translate(xy[0], xy[1])
self.stale = True
def get_offset(self):
"""
return offset of the container.
"""
return self._offset
def get_window_extent(self, renderer):
'''
get the bounding box in display space.
'''
w, h, xd, yd = self.get_extent(renderer)
ox, oy = self.get_offset() # w, h, xd, yd)
return mtransforms.Bbox.from_bounds(ox - xd, oy - yd, w, h)
def get_extent(self, renderer):
"""
Return with, height, xdescent, ydescent of box
"""
dpi_cor = renderer.points_to_pixels(1.)
return self.width * dpi_cor, self.height * dpi_cor, \
self.xdescent * dpi_cor, self.ydescent * dpi_cor
def add_artist(self, a):
'Add any :class:`~matplotlib.artist.Artist` to the container box'
self._children.append(a)
if not a.is_transform_set():
a.set_transform(self.get_transform())
if self.axes is not None:
a.axes = self.axes
fig = self.figure
if fig is not None:
a.set_figure(fig)
def draw(self, renderer):
"""
Draw the children
"""
dpi_cor = renderer.points_to_pixels(1.)
self.dpi_transform.clear()
self.dpi_transform.scale(dpi_cor, dpi_cor)
# At this point the DrawingArea has a transform
# to the display space so the path created is
# good for clipping children
tpath = mtransforms.TransformedPath(
mpath.Path([[0, 0], [0, self.height],
[self.width, self.height],
[self.width, 0]]),
self.get_transform())
for c in self._children:
if self._clip_children and not (c.clipbox or c._clippath):
c.set_clip_path(tpath)
c.draw(renderer)
bbox_artist(self, renderer, fill=False, props=dict(pad=0.))
self.stale = False
class TextArea(OffsetBox):
"""
The TextArea is contains a single Text instance. The text is
placed at (0,0) with baseline+left alignment. The width and height
of the TextArea instance is the width and height of the its child
text.
"""
def __init__(self, s,
textprops=None,
multilinebaseline=None,
minimumdescent=True,
):
"""
Parameters
----------
s : str
a string to be displayed.
textprops : `~matplotlib.font_manager.FontProperties`, optional
multilinebaseline : bool, optional
If `True`, baseline for multiline text is adjusted so that
it is (approximatedly) center-aligned with singleline
text.
minimumdescent : bool, optional
If `True`, the box has a minimum descent of "p".
"""
if textprops is None:
textprops = {}
if "va" not in textprops:
textprops["va"] = "baseline"
self._text = mtext.Text(0, 0, s, **textprops)
OffsetBox.__init__(self)
self._children = [self._text]
self.offset_transform = mtransforms.Affine2D()
self.offset_transform.clear()
self.offset_transform.translate(0, 0)
self._baseline_transform = mtransforms.Affine2D()
self._text.set_transform(self.offset_transform +
self._baseline_transform)
self._multilinebaseline = multilinebaseline
self._minimumdescent = minimumdescent
def set_text(self, s):
"set text"
self._text.set_text(s)
self.stale = True
def get_text(self):
"get text"
return self._text.get_text()
def set_multilinebaseline(self, t):
"""
Set multilinebaseline .
If True, baseline for multiline text is
adjusted so that it is (approximatedly) center-aligned with
singleline text.
"""
self._multilinebaseline = t
self.stale = True
def get_multilinebaseline(self):
"""
get multilinebaseline .
"""
return self._multilinebaseline
def set_minimumdescent(self, t):
"""
Set minimumdescent .
If True, extent of the single line text is adjusted so that
it has minimum descent of "p"
"""
self._minimumdescent = t
self.stale = True
def get_minimumdescent(self):
"""
get minimumdescent.
"""
return self._minimumdescent
def set_transform(self, t):
"""
set_transform is ignored.
"""
pass
def set_offset(self, xy):
"""
set offset of the container.
Accept : tuple of x,y coordinates in display units.
"""
self._offset = xy
self.offset_transform.clear()
self.offset_transform.translate(xy[0], xy[1])
self.stale = True
def get_offset(self):
"""
return offset of the container.
"""
return self._offset
def get_window_extent(self, renderer):
'''
get the bounding box in display space.
'''
w, h, xd, yd = self.get_extent(renderer)
ox, oy = self.get_offset() # w, h, xd, yd)
return mtransforms.Bbox.from_bounds(ox - xd, oy - yd, w, h)
def get_extent(self, renderer):
clean_line, ismath = self._text.is_math_text(self._text._text)
_, h_, d_ = renderer.get_text_width_height_descent(
"lp", self._text._fontproperties, ismath=False)
bbox, info, d = self._text._get_layout(renderer)
w, h = bbox.width, bbox.height
line = info[-1][0] # last line
self._baseline_transform.clear()
if len(info) > 1 and self._multilinebaseline:
d_new = 0.5 * h - 0.5 * (h_ - d_)
self._baseline_transform.translate(0, d - d_new)
d = d_new
else: # single line
h_d = max(h_ - d_, h - d)
if self.get_minimumdescent():
## to have a minimum descent, #i.e., "l" and "p" have same
## descents.
d = max(d, d_)
#else:
# d = d
h = h_d + d
return w, h, 0., d
def draw(self, renderer):
"""
Draw the children
"""
self._text.draw(renderer)
bbox_artist(self, renderer, fill=False, props=dict(pad=0.))
self.stale = False
class AuxTransformBox(OffsetBox):
"""
Offset Box with the aux_transform . Its children will be
transformed with the aux_transform first then will be
offseted. The absolute coordinate of the aux_transform is meaning
as it will be automatically adjust so that the left-lower corner
of the bounding box of children will be set to (0,0) before the
offset transform.
It is similar to drawing area, except that the extent of the box
is not predetermined but calculated from the window extent of its
children. Furthermore, the extent of the children will be
calculated in the transformed coordinate.
"""
def __init__(self, aux_transform):
self.aux_transform = aux_transform
OffsetBox.__init__(self)
self.offset_transform = mtransforms.Affine2D()
self.offset_transform.clear()
self.offset_transform.translate(0, 0)
# ref_offset_transform is used to make the offset_transform is
# always reference to the lower-left corner of the bbox of its
# children.
self.ref_offset_transform = mtransforms.Affine2D()
self.ref_offset_transform.clear()
def add_artist(self, a):
'Add any :class:`~matplotlib.artist.Artist` to the container box'
self._children.append(a)
a.set_transform(self.get_transform())
self.stale = True
def get_transform(self):
"""
Return the :class:`~matplotlib.transforms.Transform` applied
to the children
"""
return self.aux_transform + \
self.ref_offset_transform + \
self.offset_transform
def set_transform(self, t):
"""
set_transform is ignored.
"""
pass
def set_offset(self, xy):
"""
set offset of the container.
Accept : tuple of x,y coordinate in disokay units.
"""
self._offset = xy
self.offset_transform.clear()
self.offset_transform.translate(xy[0], xy[1])
self.stale = True
def get_offset(self):
"""
return offset of the container.
"""
return self._offset
def get_window_extent(self, renderer):
'''
get the bounding box in display space.
'''
w, h, xd, yd = self.get_extent(renderer)
ox, oy = self.get_offset() # w, h, xd, yd)
return mtransforms.Bbox.from_bounds(ox - xd, oy - yd, w, h)
def get_extent(self, renderer):
# clear the offset transforms
_off = self.offset_transform.to_values() # to be restored later
self.ref_offset_transform.clear()
self.offset_transform.clear()
# calculate the extent
bboxes = [c.get_window_extent(renderer) for c in self._children]
ub = mtransforms.Bbox.union(bboxes)
# adjust ref_offset_tansform
self.ref_offset_transform.translate(-ub.x0, -ub.y0)
# restor offset transform
mtx = self.offset_transform.matrix_from_values(*_off)
self.offset_transform.set_matrix(mtx)
return ub.width, ub.height, 0., 0.
def draw(self, renderer):
"""
Draw the children
"""
for c in self._children:
c.draw(renderer)
bbox_artist(self, renderer, fill=False, props=dict(pad=0.))
self.stale = False
class AnchoredOffsetbox(OffsetBox):
"""
An offset box placed according to the legend location
loc. AnchoredOffsetbox has a single child. When multiple children
is needed, use other OffsetBox class to enclose them. By default,
the offset box is anchored against its parent axes. You may
explicitly specify the bbox_to_anchor.
"""
zorder = 5 # zorder of the legend
def __init__(self, loc,
pad=0.4, borderpad=0.5,
child=None, prop=None, frameon=True,
bbox_to_anchor=None,
bbox_transform=None,
**kwargs):
"""
loc is a string or an integer specifying the legend location.
The valid location codes are::
'upper right' : 1,
'upper left' : 2,
'lower left' : 3,
'lower right' : 4,
'right' : 5,
'center left' : 6,
'center right' : 7,
'lower center' : 8,
'upper center' : 9,
'center' : 10,
pad : pad around the child for drawing a frame. given in
fraction of fontsize.
borderpad : pad between offsetbox frame and the bbox_to_anchor,
child : OffsetBox instance that will be anchored.
prop : font property. This is only used as a reference for paddings.
frameon : draw a frame box if True.
bbox_to_anchor : bbox to anchor. Use self.axes.bbox if None.
bbox_transform : with which the bbox_to_anchor will be transformed.
"""
super(AnchoredOffsetbox, self).__init__(**kwargs)
self.set_bbox_to_anchor(bbox_to_anchor, bbox_transform)
self.set_child(child)
self.loc = loc
self.borderpad = borderpad
self.pad = pad
if prop is None:
self.prop = FontProperties(size=rcParams["legend.fontsize"])
elif isinstance(prop, dict):
self.prop = FontProperties(**prop)
if "size" not in prop:
self.prop.set_size(rcParams["legend.fontsize"])
else:
self.prop = prop
self.patch = FancyBboxPatch(
xy=(0.0, 0.0), width=1., height=1.,
facecolor='w', edgecolor='k',
mutation_scale=self.prop.get_size_in_points(),
snap=True
)
self.patch.set_boxstyle("square", pad=0)
self._drawFrame = frameon
def set_child(self, child):
"set the child to be anchored"
self._child = child
if child is not None:
child.axes = self.axes
self.stale = True
def get_child(self):
"return the child"
return self._child
def get_children(self):
"return the list of children"
return [self._child]
def get_extent(self, renderer):
"""
return the extent of the artist. The extent of the child
added with the pad is returned
"""
w, h, xd, yd = self.get_child().get_extent(renderer)
fontsize = renderer.points_to_pixels(self.prop.get_size_in_points())
pad = self.pad * fontsize
return w + 2 * pad, h + 2 * pad, xd + pad, yd + pad
def get_bbox_to_anchor(self):
"""
return the bbox that the legend will be anchored
"""
if self._bbox_to_anchor is None:
return self.axes.bbox
else:
transform = self._bbox_to_anchor_transform
if transform is None:
return self._bbox_to_anchor
else:
return TransformedBbox(self._bbox_to_anchor,
transform)
def set_bbox_to_anchor(self, bbox, transform=None):
"""
set the bbox that the child will be anchored.
*bbox* can be a Bbox instance, a list of [left, bottom, width,
height], or a list of [left, bottom] where the width and
height will be assumed to be zero. The bbox will be
transformed to display coordinate by the given transform.
"""
if bbox is None or isinstance(bbox, BboxBase):
self._bbox_to_anchor = bbox
else:
try:
l = len(bbox)
except TypeError:
raise ValueError("Invalid argument for bbox : %s" % str(bbox))
if l == 2:
bbox = [bbox[0], bbox[1], 0, 0]
self._bbox_to_anchor = Bbox.from_bounds(*bbox)
self._bbox_to_anchor_transform = transform
self.stale = True
def get_window_extent(self, renderer):
'''
get the bounding box in display space.
'''
self._update_offset_func(renderer)
w, h, xd, yd = self.get_extent(renderer)
ox, oy = self.get_offset(w, h, xd, yd, renderer)
return Bbox.from_bounds(ox - xd, oy - yd, w, h)
def _update_offset_func(self, renderer, fontsize=None):
"""
Update the offset func which depends on the dpi of the
renderer (because of the padding).
"""
if fontsize is None:
fontsize = renderer.points_to_pixels(
self.prop.get_size_in_points())
def _offset(w, h, xd, yd, renderer, fontsize=fontsize, self=self):
bbox = Bbox.from_bounds(0, 0, w, h)
borderpad = self.borderpad * fontsize
bbox_to_anchor = self.get_bbox_to_anchor()
x0, y0 = self._get_anchored_bbox(self.loc,
bbox,
bbox_to_anchor,
borderpad)
return x0 + xd, y0 + yd
self.set_offset(_offset)
def update_frame(self, bbox, fontsize=None):
self.patch.set_bounds(bbox.x0, bbox.y0,
bbox.width, bbox.height)
if fontsize:
self.patch.set_mutation_scale(fontsize)
def draw(self, renderer):
"draw the artist"
if not self.get_visible():
return
fontsize = renderer.points_to_pixels(self.prop.get_size_in_points())
self._update_offset_func(renderer, fontsize)
if self._drawFrame:
# update the location and size of the legend
bbox = self.get_window_extent(renderer)
self.update_frame(bbox, fontsize)
self.patch.draw(renderer)
width, height, xdescent, ydescent = self.get_extent(renderer)
px, py = self.get_offset(width, height, xdescent, ydescent, renderer)
self.get_child().set_offset((px, py))
self.get_child().draw(renderer)
self.stale = False
def _get_anchored_bbox(self, loc, bbox, parentbbox, borderpad):
"""
return the position of the bbox anchored at the parentbbox
with the loc code, with the borderpad.
"""
assert loc in range(1, 11) # called only internally
BEST, UR, UL, LL, LR, R, CL, CR, LC, UC, C = list(xrange(11))
anchor_coefs = {UR: "NE",
UL: "NW",
LL: "SW",
LR: "SE",
R: "E",
CL: "W",
CR: "E",
LC: "S",
UC: "N",
C: "C"}
c = anchor_coefs[loc]
container = parentbbox.padded(-borderpad)
anchored_box = bbox.anchored(c, container=container)
return anchored_box.x0, anchored_box.y0
class AnchoredText(AnchoredOffsetbox):
"""
AnchoredOffsetbox with Text.
"""
def __init__(self, s, loc, pad=0.4, borderpad=0.5, prop=None, **kwargs):
"""
Parameters
----------
s : string
Text.
loc : str
Location code.
pad : float, optional
Pad between the text and the frame as fraction of the font
size.
borderpad : float, optional
Pad between the frame and the axes (or *bbox_to_anchor*).
prop : `matplotlib.font_manager.FontProperties`
Font properties.
Notes
-----
Other keyword parameters of `AnchoredOffsetbox` are also
allowed.
"""
if prop is None:
prop = {}
propkeys = list(six.iterkeys(prop))
badkwargs = ('ha', 'horizontalalignment', 'va', 'verticalalignment')
if set(badkwargs) & set(propkeys):
warnings.warn("Mixing horizontalalignment or verticalalignment "
"with AnchoredText is not supported.")
self.txt = TextArea(s, textprops=prop,
minimumdescent=False)
fp = self.txt._text.get_fontproperties()
super(AnchoredText, self).__init__(loc, pad=pad, borderpad=borderpad,
child=self.txt,
prop=fp,
**kwargs)
class OffsetImage(OffsetBox):
def __init__(self, arr,
zoom=1,
cmap=None,
norm=None,
interpolation=None,
origin=None,
filternorm=1,
filterrad=4.0,
resample=False,
dpi_cor=True,
**kwargs
):
OffsetBox.__init__(self)
self._dpi_cor = dpi_cor
self.image = BboxImage(bbox=self.get_window_extent,
cmap=cmap,
norm=norm,
interpolation=interpolation,
origin=origin,
filternorm=filternorm,
filterrad=filterrad,
resample=resample,
**kwargs
)
self._children = [self.image]
self.set_zoom(zoom)
self.set_data(arr)
def set_data(self, arr):
self._data = np.asarray(arr)
self.image.set_data(self._data)
self.stale = True
def get_data(self):
return self._data
def set_zoom(self, zoom):
self._zoom = zoom
self.stale = True
def get_zoom(self):
return self._zoom
# def set_axes(self, axes):
# self.image.set_axes(axes)
# martist.Artist.set_axes(self, axes)
# def set_offset(self, xy):
# """
# set offset of the container.
# Accept : tuple of x,y coordinate in disokay units.
# """
# self._offset = xy
# self.offset_transform.clear()
# self.offset_transform.translate(xy[0], xy[1])
def get_offset(self):
"""
return offset of the container.
"""
return self._offset
def get_children(self):
return [self.image]
def get_window_extent(self, renderer):
'''
get the bounding box in display space.
'''
w, h, xd, yd = self.get_extent(renderer)
ox, oy = self.get_offset()
return mtransforms.Bbox.from_bounds(ox - xd, oy - yd, w, h)
def get_extent(self, renderer):
# FIXME dpi_cor is never used
if self._dpi_cor: # True, do correction
dpi_cor = renderer.points_to_pixels(1.)
else:
dpi_cor = 1.
zoom = self.get_zoom()
data = self.get_data()
ny, nx = data.shape[:2]
w, h = nx * zoom, ny * zoom
return w, h, 0, 0
def draw(self, renderer):
"""
Draw the children
"""
self.image.draw(renderer)
# bbox_artist(self, renderer, fill=False, props=dict(pad=0.))
self.stale = False
class AnnotationBbox(martist.Artist, _AnnotationBase):
"""
Annotation-like class, but with offsetbox instead of Text.
"""
zorder = 3
def __str__(self):
return "AnnotationBbox(%g,%g)" % (self.xy[0], self.xy[1])
@docstring.dedent_interpd
def __init__(self, offsetbox, xy,
xybox=None,
xycoords='data',
boxcoords=None,
frameon=True, pad=0.4, # BboxPatch
annotation_clip=None,
box_alignment=(0.5, 0.5),
bboxprops=None,
arrowprops=None,
fontsize=None,
**kwargs):
"""
*offsetbox* : OffsetBox instance
*xycoords* : same as Annotation but can be a tuple of two
strings which are interpreted as x and y coordinates.
*boxcoords* : similar to textcoords as Annotation but can be a
tuple of two strings which are interpreted as x and y
coordinates.
*box_alignment* : a tuple of two floats for a vertical and
horizontal alignment of the offset box w.r.t. the *boxcoords*.
The lower-left corner is (0.0) and upper-right corner is (1.1).
other parameters are identical to that of Annotation.
"""
martist.Artist.__init__(self, **kwargs)
_AnnotationBase.__init__(self,
xy,
xycoords=xycoords,
annotation_clip=annotation_clip)
self.offsetbox = offsetbox
self.arrowprops = arrowprops
self.set_fontsize(fontsize)
if xybox is None:
self.xybox = xy
else:
self.xybox = xybox
if boxcoords is None:
self.boxcoords = xycoords
else:
self.boxcoords = boxcoords
if arrowprops is not None:
self._arrow_relpos = self.arrowprops.pop("relpos", (0.5, 0.5))
self.arrow_patch = FancyArrowPatch((0, 0), (1, 1),
**self.arrowprops)
else:
self._arrow_relpos = None
self.arrow_patch = None
#self._fw, self._fh = 0., 0. # for alignment
self._box_alignment = box_alignment
# frame
self.patch = FancyBboxPatch(
xy=(0.0, 0.0), width=1., height=1.,
facecolor='w', edgecolor='k',
mutation_scale=self.prop.get_size_in_points(),
snap=True
)
self.patch.set_boxstyle("square", pad=pad)
if bboxprops:
self.patch.set(**bboxprops)
self._drawFrame = frameon
@property
def xyann(self):
return self.xybox
@xyann.setter
def xyann(self, xyann):
self.xybox = xyann
self.stale = True
@property
def anncoords(self):
return self.boxcoords
@anncoords.setter
def anncoords(self, coords):
self.boxcoords = coords
self.stale = True
def contains(self, event):
t, tinfo = self.offsetbox.contains(event)
#if self.arrow_patch is not None:
# a,ainfo=self.arrow_patch.contains(event)
# t = t or a
# self.arrow_patch is currently not checked as this can be a line - JJ
return t, tinfo
def get_children(self):
children = [self.offsetbox, self.patch]
if self.arrow_patch:
children.append(self.arrow_patch)
return children
def set_figure(self, fig):
if self.arrow_patch is not None:
self.arrow_patch.set_figure(fig)
self.offsetbox.set_figure(fig)
martist.Artist.set_figure(self, fig)
def set_fontsize(self, s=None):
"""
set fontsize in points
"""
if s is None:
s = rcParams["legend.fontsize"]
self.prop = FontProperties(size=s)
self.stale = True
def get_fontsize(self, s=None):
"""
return fontsize in points
"""
return self.prop.get_size_in_points()
def update_positions(self, renderer):
"""
Update the pixel positions of the annotated point and the text.
"""
xy_pixel = self._get_position_xy(renderer)
self._update_position_xybox(renderer, xy_pixel)
mutation_scale = renderer.points_to_pixels(self.get_fontsize())
self.patch.set_mutation_scale(mutation_scale)
if self.arrow_patch:
self.arrow_patch.set_mutation_scale(mutation_scale)
def _update_position_xybox(self, renderer, xy_pixel):
"""
Update the pixel positions of the annotation text and the arrow
patch.
"""
x, y = self.xybox
if isinstance(self.boxcoords, tuple):
xcoord, ycoord = self.boxcoords
x1, y1 = self._get_xy(renderer, x, y, xcoord)
x2, y2 = self._get_xy(renderer, x, y, ycoord)
ox0, oy0 = x1, y2
else:
ox0, oy0 = self._get_xy(renderer, x, y, self.boxcoords)
w, h, xd, yd = self.offsetbox.get_extent(renderer)
_fw, _fh = self._box_alignment
self.offsetbox.set_offset((ox0 - _fw * w + xd, oy0 - _fh * h + yd))
# update patch position
bbox = self.offsetbox.get_window_extent(renderer)
#self.offsetbox.set_offset((ox0-_fw*w, oy0-_fh*h))
self.patch.set_bounds(bbox.x0, bbox.y0,
bbox.width, bbox.height)
x, y = xy_pixel
ox1, oy1 = x, y
if self.arrowprops:
x0, y0 = x, y
d = self.arrowprops.copy()
# Use FancyArrowPatch if self.arrowprops has "arrowstyle" key.
# adjust the starting point of the arrow relative to
# the textbox.
# TODO : Rotation needs to be accounted.
relpos = self._arrow_relpos
ox0 = bbox.x0 + bbox.width * relpos[0]
oy0 = bbox.y0 + bbox.height * relpos[1]
# The arrow will be drawn from (ox0, oy0) to (ox1,
# oy1). It will be first clipped by patchA and patchB.
# Then it will be shrinked by shirnkA and shrinkB
# (in points). If patch A is not set, self.bbox_patch
# is used.
self.arrow_patch.set_positions((ox0, oy0), (ox1, oy1))
fs = self.prop.get_size_in_points()
mutation_scale = d.pop("mutation_scale", fs)
mutation_scale = renderer.points_to_pixels(mutation_scale)
self.arrow_patch.set_mutation_scale(mutation_scale)
patchA = d.pop("patchA", self.patch)
self.arrow_patch.set_patchA(patchA)
def draw(self, renderer):
"""
Draw the :class:`Annotation` object to the given *renderer*.
"""
if renderer is not None:
self._renderer = renderer
if not self.get_visible():
return
xy_pixel = self._get_position_xy(renderer)
if not self._check_xy(renderer, xy_pixel):
return
self.update_positions(renderer)
if self.arrow_patch is not None:
if self.arrow_patch.figure is None and self.figure is not None:
self.arrow_patch.figure = self.figure
self.arrow_patch.draw(renderer)
if self._drawFrame:
self.patch.draw(renderer)
self.offsetbox.draw(renderer)
self.stale = False
class DraggableBase(object):
"""
helper code for a draggable artist (legend, offsetbox)
The derived class must override following two method.
def saveoffset(self):
pass
def update_offset(self, dx, dy):
pass
*saveoffset* is called when the object is picked for dragging and it is
meant to save reference position of the artist.
*update_offset* is called during the dragging. dx and dy is the pixel
offset from the point where the mouse drag started.
Optionally you may override following two methods.
def artist_picker(self, artist, evt):
return self.ref_artist.contains(evt)
def finalize_offset(self):
pass
*artist_picker* is a picker method that will be
used. *finalize_offset* is called when the mouse is released. In
current implementaion of DraggableLegend and DraggableAnnotation,
*update_offset* places the artists simply in display
coordinates. And *finalize_offset* recalculate their position in
the normalized axes coordinate and set a relavant attribute.
"""
def __init__(self, ref_artist, use_blit=False):
self.ref_artist = ref_artist
self.got_artist = False
self.canvas = self.ref_artist.figure.canvas
self._use_blit = use_blit and self.canvas.supports_blit
c2 = self.canvas.mpl_connect('pick_event', self.on_pick)
c3 = self.canvas.mpl_connect('button_release_event', self.on_release)
ref_artist.set_picker(self.artist_picker)
self.cids = [c2, c3]
def on_motion(self, evt):
if self.got_artist:
dx = evt.x - self.mouse_x
dy = evt.y - self.mouse_y
self.update_offset(dx, dy)
self.canvas.draw()
def on_motion_blit(self, evt):
if self.got_artist:
dx = evt.x - self.mouse_x
dy = evt.y - self.mouse_y
self.update_offset(dx, dy)
self.canvas.restore_region(self.background)
self.ref_artist.draw(self.ref_artist.figure._cachedRenderer)
self.canvas.blit(self.ref_artist.figure.bbox)
def on_pick(self, evt):
if evt.artist == self.ref_artist:
self.mouse_x = evt.mouseevent.x
self.mouse_y = evt.mouseevent.y
self.got_artist = True
if self._use_blit:
self.ref_artist.set_animated(True)
self.canvas.draw()
self.background = self.canvas.copy_from_bbox(
self.ref_artist.figure.bbox)
self.ref_artist.draw(self.ref_artist.figure._cachedRenderer)
self.canvas.blit(self.ref_artist.figure.bbox)
self._c1 = self.canvas.mpl_connect('motion_notify_event',
self.on_motion_blit)
else:
self._c1 = self.canvas.mpl_connect('motion_notify_event',
self.on_motion)
self.save_offset()
def on_release(self, event):
if self.got_artist:
self.finalize_offset()
self.got_artist = False
self.canvas.mpl_disconnect(self._c1)
if self._use_blit:
self.ref_artist.set_animated(False)
def disconnect(self):
"""disconnect the callbacks"""
for cid in self.cids:
self.canvas.mpl_disconnect(cid)
def artist_picker(self, artist, evt):
return self.ref_artist.contains(evt)
def save_offset(self):
pass
def update_offset(self, dx, dy):
pass
def finalize_offset(self):
pass
class DraggableOffsetBox(DraggableBase):
def __init__(self, ref_artist, offsetbox, use_blit=False):
DraggableBase.__init__(self, ref_artist, use_blit=use_blit)
self.offsetbox = offsetbox
def save_offset(self):
offsetbox = self.offsetbox
renderer = offsetbox.figure._cachedRenderer
w, h, xd, yd = offsetbox.get_extent(renderer)
offset = offsetbox.get_offset(w, h, xd, yd, renderer)
self.offsetbox_x, self.offsetbox_y = offset
self.offsetbox.set_offset(offset)
def update_offset(self, dx, dy):
loc_in_canvas = self.offsetbox_x + dx, self.offsetbox_y + dy
self.offsetbox.set_offset(loc_in_canvas)
def get_loc_in_canvas(self):
offsetbox = self.offsetbox
renderer = offsetbox.figure._cachedRenderer
w, h, xd, yd = offsetbox.get_extent(renderer)
ox, oy = offsetbox._offset
loc_in_canvas = (ox - xd, oy - yd)
return loc_in_canvas
class DraggableAnnotation(DraggableBase):
def __init__(self, annotation, use_blit=False):
DraggableBase.__init__(self, annotation, use_blit=use_blit)
self.annotation = annotation
def save_offset(self):
ann = self.annotation
x, y = ann.xyann
if isinstance(ann.anncoords, tuple):
xcoord, ycoord = ann.anncoords
x1, y1 = ann._get_xy(self.canvas.renderer, x, y, xcoord)
x2, y2 = ann._get_xy(self.canvas.renderer, x, y, ycoord)
ox0, oy0 = x1, y2
else:
ox0, oy0 = ann._get_xy(self.canvas.renderer, x, y, ann.anncoords)
self.ox, self.oy = ox0, oy0
self.annotation.anncoords = "figure pixels"
self.update_offset(0, 0)
def update_offset(self, dx, dy):
ann = self.annotation
ann.xyann = self.ox + dx, self.oy + dy
x, y = ann.xyann
def finalize_offset(self):
loc_in_canvas = self.annotation.xyann
self.annotation.anncoords = "axes fraction"
pos_axes_fraction = self.annotation.axes.transAxes.inverted()
pos_axes_fraction = pos_axes_fraction.transform_point(loc_in_canvas)
self.annotation.xyann = tuple(pos_axes_fraction)
if __name__ == "__main__":
import matplotlib.pyplot as plt
fig = plt.figure(1)
fig.clf()
ax = plt.subplot(121)
#txt = ax.text(0.5, 0.5, "Test", size=30, ha="center", color="w")
kwargs = dict()
a = np.arange(256).reshape(16, 16) / 256.
myimage = OffsetImage(a,
zoom=2,
norm=None,
origin=None,
**kwargs
)
ax.add_artist(myimage)
myimage.set_offset((100, 100))
myimage2 = OffsetImage(a,
zoom=2,
norm=None,
origin=None,
**kwargs
)
ann = AnnotationBbox(myimage2, (0.5, 0.5),
xybox=(30, 30),
xycoords='data',
boxcoords="offset points",
frameon=True, pad=0.4, # BboxPatch
bboxprops=dict(boxstyle="round", fc="y"),
fontsize=None,
arrowprops=dict(arrowstyle="->"),
)
ax.add_artist(ann)
plt.draw()
plt.show()
| mit |
tmrowco/electricitymap | parsers/occtonet.py | 1 | 8424 | #!/usr/bin/env python3
# coding=utf-8
import logging
import datetime
import pandas as pd
# The arrow library is used to handle datetimes
import arrow
# The request library is used to fetch content through HTTP
import requests
from io import StringIO
# Abbreviations:
# JP-HKD : Hokkaido
# JP-TH : Tohoku (incl. Niigata)
# JP-TK : Tokyo area (Kanto)
# JP-CB : Chubu
# JP-HR : Hokuriku
# JP-KN : Kansai
# JP-CG : Chugoku
# JP-SK : Shikoku
# JP-KY : Kyushu
# JP-ON : Okinawa
exchange_mapping = {
'JP-HKD->JP-TH':[1],
'JP-TH->JP-TK':[2],
'JP-CB->JP-TK':[3],
'JP-CB->JP-KN':[4],
'JP-CB->JP-HR':[5,11],
'JP-HR->JP-KN':[6],
'JP-CG->JP-KN':[7],
'JP-KN->JP-SK':[8],
'JP-CG->JP-SK':[9],
'JP-CG->JP-KY':[10]
}
def fetch_exchange(zone_key1='JP-TH', zone_key2='JP-TK', session=None,
target_datetime=None, logger=logging.getLogger(__name__)):
"""
Requests the last known power exchange (in MW) between two zones
Arguments:
----------
zone_key: used in case a parser is able to fetch multiple countries
session: request session passed in order to re-use an existing session
target_datetime: the datetime for which we want production data. If not
provided, we should default it to now. If past data is not available,
raise a NotImplementedError. Beware that the provided target_datetime is
UTC. To convert to local timezone, you can use
`target_datetime = arrow.get(target_datetime).to('America/New_York')`.
Note that `arrow.get(None)` returns UTC now.
logger: an instance of a `logging.Logger` that will be passed by the
backend. Information logged will be publicly available so that correct
execution of the logger can be checked. All Exceptions will automatically
be logged, so when something's wrong, simply raise an Exception (with an
explicit text). Use `logger.warning` or `logger.info` for information
that can useful to check if the parser is working correctly. A default
logger is used so that logger output can be seen when coding / debugging.
Returns:
--------
If no data can be fetched, any falsy value (None, [], False) will be
ignored by the backend. If there is no data because the source may have
changed or is not available, raise an Exception.
A dictionary in the form:
{
'sortedZoneKeys': 'DK->NO',
'datetime': '2017-01-01T00:00:00Z',
'netFlow': 0.0,
'source': 'mysource.com'
}
"""
#get target date in time zone Asia/Tokyo
query_date = arrow.get(target_datetime).to('Asia/Tokyo').strftime('%Y/%m/%d')
#get d-1 in tz Asia/Tokyo
query_date_1 = arrow.get(target_datetime).shift(days=-1).to('Asia/Tokyo').strftime('%Y/%m/%d')
sortedZoneKeys = '->'.join(sorted([zone_key1, zone_key2]))
exch_id = exchange_mapping[sortedZoneKeys]
r = session or requests.session()
# Login to occtonet
Cookies = get_cookies(r)
# Get headers for querying exchange
Headers = get_headers(r, exch_id[0], query_date, Cookies)
Headers_1 = get_headers(r, exch_id[0], query_date_1, Cookies)
# Add request tokens to headers
Headers = get_request_token(r, Headers, Cookies)
Headers_1 = get_request_token(r, Headers_1, Cookies)
# Query data
data = get_data(r, Headers, Cookies)
data_1 = get_data(r, Headers_1, Cookies)
# Concatenate d-1 and current day
df = pd.concat([data_1,data])
# CB-HR -exceptions
if sortedZoneKeys == 'JP-CB->JP-HR':
df = df.set_index(['Date', 'Time'])
Headers = get_headers(r, exch_id[1], query_date, Cookies)
Headers_1 = get_headers(r, exch_id[1], query_date_1, Cookies)
Headers = get_request_token(r, Headers, Cookies)
Headers_1 = get_request_token(r, Headers_1, Cookies)
data = get_data(r, Headers, Cookies)
data_1 = get_data(r, Headers_1, Cookies)
df2 = pd.concat([data_1,data])
df2 = df2.set_index(['Date', 'Time'])
df = df + df2
df = df.reset_index()
# fix occurrences of 24:00hrs
list24 = list(df.index[df['Time']=='24:00'])
for idx in list24:
df.loc[idx, 'Date'] = arrow.get(df.loc[idx, 'Date']).shift(days=1).strftime('%Y/%m/%d')
df.loc[idx, 'Time'] = '00:00'
# correct flow direction, if needed
flows_to_revert = ['JP-CB->JP-TK', 'JP-CG->JP-KN', 'JP-CG->JP-SK']
if sortedZoneKeys in flows_to_revert:
df['netFlow'] = -1 * df['netFlow']
df['source'] = 'occtonet.occto.or.jp'
df['datetime'] = df.apply(parse_dt, axis=1)
df['sortedZoneKeys'] = sortedZoneKeys
df = df[['source', 'datetime', 'netFlow', 'sortedZoneKeys']]
results = df.to_dict('records')
for result in results:
result['datetime'] = result['datetime'].to_pydatetime()
return results
def get_cookies(session=None):
s = session or requests.session()
s.get('http://occtonet.occto.or.jp/public/dfw/RP11/OCCTO/SD/LOGIN_login')
return s.cookies
def get_headers(session, exch_id, date, cookies):
payload = {
'ajaxToken':'',
'downloadKey':'',
'fwExtention.actionSubType':'headerInput',
'fwExtention.actionType':'reference',
'fwExtention.formId':'CA01S070P',
'fwExtention.jsonString':'',
'fwExtention.pagingTargetTable':'',
'fwExtention.pathInfo':'CA01S070C',
'fwExtention.prgbrh':'0',
'msgArea':'',
'requestToken':'',
'requestTokenBk':'',
'searchReqHdn':'',
'simFlgHdn':'',
'sntkTgtRklCdHdn':'',
'spcDay':date,
'spcDayHdn':'',
'tgtRkl':'{:02d}'.format(exch_id),
'transitionContextKey':'DEFAULT',
'updDaytime':''
}
s = session
r = s.post('http://occtonet.occto.or.jp/public/dfw/RP11/OCCTO/SD/CA01S070C?fwExtention.pathInfo=CA01S070C&fwExtention.prgbrh=0',
cookies=cookies, data=payload)
headers=r.text
headers = eval(headers.replace('false', 'False').replace('null', 'None'))
if headers['root']['errMessage']:
print(headers['root']['errMessage'])
return None
else:
payload['msgArea'] = headers['root']['bizRoot']['header']['msgArea']['value']
payload['searchReqHdn'] = headers['root']['bizRoot']['header']['searchReqHdn']['value']
payload['spcDayHdn'] = headers['root']['bizRoot']['header']['spcDayHdn']['value']
payload['updDaytime'] = headers['root']['bizRoot']['header']['updDaytime']['value']
return payload
def get_request_token(session, payload, cookies):
s = session
payload['fwExtention.actionSubType']='ok'
r = s.post('http://occtonet.occto.or.jp/public/dfw/RP11/OCCTO/SD/CA01S070C?'
+'fwExtention.pathInfo=CA01S070C&fwExtention.prgbrh=0',
cookies=cookies, data=payload)
headers=r.text
headers = eval(headers.replace('false', 'False').replace('null', 'None'))
if headers['root']['errFields']:
print(headers['root']['errMessage'])
return None
else:
payload['downloadKey'] = headers['root']['bizRoot']['header']['downloadKey']['value']
payload['requestToken'] = headers['root']['bizRoot']['header']['requestToken']['value']
return payload
def get_data(session, payload, cookies):
s = session
payload['fwExtention.actionSubType']='download'
r = s.post('http://occtonet.occto.or.jp/public/dfw/RP11/OCCTO/SD/CA01S070C?'
+'fwExtention.pathInfo=CA01S070C&fwExtention.prgbrh=0',
cookies=cookies, data=payload)
r.encoding = 'shift-jis'
df = pd.read_csv(StringIO(r.text), delimiter=',')
df = df[['対象日付', '対象時刻', '潮流実績']]
df.columns = ['Date', 'Time', 'netFlow']
df = df.dropna()
return df
def parse_dt(row):
return arrow.get(' '.join([row['Date'], row['Time']]).replace('/', '-')).replace(tzinfo='Asia/Tokyo').datetime
if __name__ == '__main__':
"""Main method, never used by the Electricity Map backend, but handy for testing."""
print('fetch_exchange(JP-CB, JP-HR) ->')
print(fetch_exchange('JP-CB', 'JP-HR')[-3:])
print('fetch_exchange(JP-CG, JP-KY) ->')
print(fetch_exchange('JP-CG', 'JP-KY')[-3:])
| gpl-3.0 |
aewhatley/scikit-learn | examples/ensemble/plot_forest_importances_faces.py | 403 | 1519 | """
=================================================
Pixel importances with a parallel forest of trees
=================================================
This example shows the use of forests of trees to evaluate the importance
of the pixels in an image classification task (faces). The hotter the pixel,
the more important.
The code below also illustrates how the construction and the computation
of the predictions can be parallelized within multiple jobs.
"""
print(__doc__)
from time import time
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_olivetti_faces
from sklearn.ensemble import ExtraTreesClassifier
# Number of cores to use to perform parallel fitting of the forest model
n_jobs = 1
# Load the faces dataset
data = fetch_olivetti_faces()
X = data.images.reshape((len(data.images), -1))
y = data.target
mask = y < 5 # Limit to 5 classes
X = X[mask]
y = y[mask]
# Build a forest and compute the pixel importances
print("Fitting ExtraTreesClassifier on faces data with %d cores..." % n_jobs)
t0 = time()
forest = ExtraTreesClassifier(n_estimators=1000,
max_features=128,
n_jobs=n_jobs,
random_state=0)
forest.fit(X, y)
print("done in %0.3fs" % (time() - t0))
importances = forest.feature_importances_
importances = importances.reshape(data.images[0].shape)
# Plot pixel importances
plt.matshow(importances, cmap=plt.cm.hot)
plt.title("Pixel importances with forests of trees")
plt.show()
| bsd-3-clause |
rajat1994/scikit-learn | examples/cluster/plot_digits_linkage.py | 369 | 2959 | """
=============================================================================
Various Agglomerative Clustering on a 2D embedding of digits
=============================================================================
An illustration of various linkage option for agglomerative clustering on
a 2D embedding of the digits dataset.
The goal of this example is to show intuitively how the metrics behave, and
not to find good clusters for the digits. This is why the example works on a
2D embedding.
What this example shows us is the behavior "rich getting richer" of
agglomerative clustering that tends to create uneven cluster sizes.
This behavior is especially pronounced for the average linkage strategy,
that ends up with a couple of singleton clusters.
"""
# Authors: Gael Varoquaux
# License: BSD 3 clause (C) INRIA 2014
print(__doc__)
from time import time
import numpy as np
from scipy import ndimage
from matplotlib import pyplot as plt
from sklearn import manifold, datasets
digits = datasets.load_digits(n_class=10)
X = digits.data
y = digits.target
n_samples, n_features = X.shape
np.random.seed(0)
def nudge_images(X, y):
# Having a larger dataset shows more clearly the behavior of the
# methods, but we multiply the size of the dataset only by 2, as the
# cost of the hierarchical clustering methods are strongly
# super-linear in n_samples
shift = lambda x: ndimage.shift(x.reshape((8, 8)),
.3 * np.random.normal(size=2),
mode='constant',
).ravel()
X = np.concatenate([X, np.apply_along_axis(shift, 1, X)])
Y = np.concatenate([y, y], axis=0)
return X, Y
X, y = nudge_images(X, y)
#----------------------------------------------------------------------
# Visualize the clustering
def plot_clustering(X_red, X, labels, title=None):
x_min, x_max = np.min(X_red, axis=0), np.max(X_red, axis=0)
X_red = (X_red - x_min) / (x_max - x_min)
plt.figure(figsize=(6, 4))
for i in range(X_red.shape[0]):
plt.text(X_red[i, 0], X_red[i, 1], str(y[i]),
color=plt.cm.spectral(labels[i] / 10.),
fontdict={'weight': 'bold', 'size': 9})
plt.xticks([])
plt.yticks([])
if title is not None:
plt.title(title, size=17)
plt.axis('off')
plt.tight_layout()
#----------------------------------------------------------------------
# 2D embedding of the digits dataset
print("Computing embedding")
X_red = manifold.SpectralEmbedding(n_components=2).fit_transform(X)
print("Done.")
from sklearn.cluster import AgglomerativeClustering
for linkage in ('ward', 'average', 'complete'):
clustering = AgglomerativeClustering(linkage=linkage, n_clusters=10)
t0 = time()
clustering.fit(X_red)
print("%s : %.2fs" % (linkage, time() - t0))
plot_clustering(X_red, X, clustering.labels_, "%s linkage" % linkage)
plt.show()
| bsd-3-clause |
WafaaT/spark-tk | regression-tests/sparktkregtests/testcases/models/logistic_regression_test.py | 10 | 19546 | # vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Tests Logistic Regression Model against known values, calculated in R"""
import unittest
from sparktkregtests.lib import sparktk_test
from numpy import array
class LogisticRegression(sparktk_test.SparkTKTestCase):
def setUp(self):
"""Build the frames needed for the tests."""
super(LogisticRegression, self).setUp()
binomial_dataset = self.get_file("small_logit_binary.csv")
schema = [("vec0", float),
("vec1", float),
("vec2", float),
("vec3", float),
("vec4", float),
("res", int),
("count", int),
("actual", int)]
self.binomial_frame = self.context.frame.import_csv(
binomial_dataset, schema=schema, header=True)
def test_predict(self):
"""test predict works as expected"""
log_model = self.context.models.classification.logistic_regression.train(
self.binomial_frame, ["vec0", "vec1", "vec2", "vec3", "vec4"],
'res')
predict_frame = log_model.predict(
self.binomial_frame,
["vec0", "vec1", "vec2", "vec3", "vec4"])
frame = predict_frame.copy(["actual", "predicted_label"])
labels = frame.to_pandas(frame.count())
for _, row in labels.iterrows():
self.assertEqual(row["actual"], row["predicted_label"])
def test_bad_predict_observation(self):
"""test invalid observations on predict"""
log_model = self.context.models.classification.logistic_regression.train(
self.binomial_frame, ["vec0", "vec1", "vec2", "vec3", "vec4"],
'res', num_classes=2)
with self.assertRaisesRegexp(
Exception,
"Expected str or list of str"):
log_model.predict(self.binomial_frame, 7)
def test_bad_predict_observation_value(self):
"""test invalid observation value on predict"""
log_model = self.context.models.classification.logistic_regression.train(
self.binomial_frame, ["vec0", "vec1", "vec2", "vec3", "vec4"],
'res', num_classes=2)
with self.assertRaisesRegexp(
Exception,
"Number of columns for train and predict should be same"):
log_model.predict(self.binomial_frame, ["err"])
def test_bad_feature_column_type(self):
"""test invalid feature column type"""
with self.assertRaisesRegexp(
TypeError, "\'int\' object is not iterable"):
log_model = self.context.models.classification.logistic_regression.train(
self.binomial_frame, 7, 'res', num_classes=2)
def test_no_such_feature_column_train(self):
"""test invalid feature column name while training"""
with self.assertRaisesRegexp(
Exception, "Invalid column name blah provided"):
log_model = self.context.models.classification.logistic_regression.train(
self.binomial_frame, ["vec0", "vec1", "blah", "vec3", "vec4"],
'res', num_classes=2)
def test_no_feature_column_test(self):
"""test invalid feature column name in test"""
log_model = self.context.models.classification.logistic_regression.train(
self.binomial_frame, ["vec0", "vec1", "vec2", "vec3", "vec4"],
'res', num_classes=2)
with self.assertRaisesRegexp(
Exception, "Invalid column name blah provided"):
test_result = log_model.test(
self.binomial_frame,
["vec0", "vec1", "blah", "vec3", "vec4"],
'res')
def test_label_column_type_train(self):
"""test invalid label column type name in train"""
with self.assertRaisesRegexp(
Exception, "Method train.* does not exist"):
log_model = self.context.models.classification.logistic_regression.train(
self.binomial_frame, ["vec0", "vec1", "vec2", "vec3", "vec4"],
7, num_classes=2)
def test_bad_optimizer_type(self):
"""test invalid optimizer type train"""
with self.assertRaisesRegexp(
Exception, "Method train.* does not exist"):
log_model = self.context.models.classification.logistic_regression.train(
self.binomial_frame, ["vec0", "vec1", "vec2", "vec3", "vec4"],
'res', num_classes=2, optimizer=7)
def test_bad_optimizer_value(self):
"""test invalid optmizer value train"""
with self.assertRaisesRegexp(
Exception, "optimizer name must be 'LBFGS' or 'SGD'"):
log_model = self.context.models.classification.logistic_regression.train(
self.binomial_frame, ["vec0", "vec1", "vec2", "vec3", "vec4"],
'res', num_classes=2, optimizer="err")
def test_bad_num_classes_type(self):
"""test bad num_classes data type in train"""
with self.assertRaisesRegexp(
Exception, "Method train.* does not exist"):
log_model = self.context.models.classification.logistic_regression.train(
self.binomial_frame, ["vec0", "vec1", "vec2", "vec3", "vec4"],
'res', num_classes="err")
def test_bad_frame_type(self):
"""test invalid frame type train"""
with self.assertRaisesRegexp(
Exception, "'str' object has no attribute '_tc'"):
log_model = self.context.models.classification.logistic_regression.train(
"ERR", ["vec0", "vec1", "vec2", "vec3", "vec4"],
"res", num_classes=2)
def test_optimizer_bad_config(self):
"""test invalid sgd configuraton"""
with self.assertRaisesRegexp(
Exception,
"multinomial logistic regression not supported for SGD"):
log_model = self.context.models.classification.logistic_regression.train(
self.binomial_frame, ["vec0", "vec1", "vec2", "vec3", "vec4"],
"count", num_classes=4, optimizer="SGD")
def test_covariance_type(self):
"""test invalid covariance type"""
with self.assertRaisesRegexp(
ValueError, "compute_covariance must be a bool"):
log_model = self.context.models.classification.logistic_regression.train(
self.binomial_frame, ["vec0", "vec1", "vec2", "vec3", "vec4"],
"res", num_classes=2, compute_covariance=7)
def test_covariance_false(self):
"""test not computing covariance"""
log_model = self.context.models.classification.logistic_regression.train(
self.binomial_frame, ["vec0", "vec1", "vec2", "vec3", "vec4"],
"res", compute_covariance=False)
self.assertIsNone(log_model.training_summary.covariance_matrix)
def test_intercept_false(self):
"""test not having an intercept"""
log_model = self.context.models.classification.logistic_regression.train(
self.binomial_frame, ["vec0", "vec1", "vec2", "vec3", "vec4"],
"res", intercept=False)
self.assertNotIn(
"intercept", log_model.training_summary.coefficients.keys())
def test_intercept_type(self):
"""test invalid intercept type"""
with self.assertRaisesRegexp(
ValueError, "intercept must be a bool"):
log_model = self.context.models.classification.logistic_regression.train(
self.binomial_frame, ["vec0", "vec1", "vec2", "vec3", "vec4"],
"res", intercept=7)
def test_feature_scaling_type(self):
"""test feature scaling type"""
with self.assertRaisesRegexp(
ValueError, "feature_scaling must be a bool"):
log_model = self.context.models.classification.logistic_regression.train(
self.binomial_frame, ["vec0", "vec1", "vec2", "vec3", "vec4"],
"res", feature_scaling=7)
def test_num_corrections_type(self):
"""test invalid num corrections type"""
with self.assertRaisesRegexp(
Exception, "Method train.* does not exist"):
log_model = self.context.models.classification.logistic_regression.train(
self.binomial_frame, ["vec0", "vec1", "vec2", "vec3", "vec4"],
"res", num_corrections="ERR")
def test_threshold_type(self):
"""test invalid threshold type"""
with self.assertRaisesRegexp(
Exception, "Method train.* does not exist"):
log_model = self.context.models.classification.logistic_regression.train(
self.binomial_frame, ["vec0", "vec1", "vec2", "vec3", "vec4"],
"res", threshold="ERR")
def test_regularization_invalid(self):
"""test regularization parameter"""
with self.assertRaisesRegexp(
Exception, "regularization type must be 'L1' or 'L2'"):
log_model = self.context.models.classification.logistic_regression.train(
self.binomial_frame, ["vec0", "vec1", "vec2", "vec3", "vec4"],
"res", reg_type="ERR")
def test_num_iterations_type(self):
"""test invalid num iterations type"""
with self.assertRaisesRegexp(
Exception, "Method train.* does not exist"):
log_model = self.context.models.classification.logistic_regression.train(
self.binomial_frame, ["vec0", "vec1", "vec2", "vec3", "vec4"],
"res", num_iterations="ERR")
def test_regularization_value_type(self):
"""test invalid regularization value"""
with self.assertRaisesRegexp(
Exception, "Method train.* does not exist"):
log_model = self.context.models.classification.logistic_regression.train(
self.binomial_frame, ["vec0", "vec1", "vec2", "vec3", "vec4"],
"res", reg_param="ERR")
def test_convergence_type(self):
"""test invalid convergence type"""
with self.assertRaisesRegexp(
Exception, "Method train.* does not exist"):
log_model = self.context.models.classification.logistic_regression.train(
self.binomial_frame, ["vec0", "vec1", "vec2", "vec3", "vec4"],
"res", convergence_tolerance="ERR")
def test_regularization_type(self):
"""test invalid reg type type"""
with self.assertRaisesRegexp(
Exception, "Method train.* does not exist"):
log_model = self.context.models.classification.logistic_regression.train(
self.binomial_frame, ["vec0", "vec1", "vec2", "vec3", "vec4"],
"res", reg_type=7)
def test_step_size_type(self):
"""test invalid step size type"""
with self.assertRaisesRegexp(
Exception, "could not convert string to float: ERR"):
log_model = self.context.models.classification.logistic_regression.train(
self.binomial_frame, ["vec0", "vec1", "vec2", "vec3", "vec4"],
"res", step_size="ERR")
def test_logistic_regression_test(self):
"""test the default happy path of logistic regression test"""
log_model = self.context.models.classification.logistic_regression.train(
self.binomial_frame, ["vec0", "vec1", "vec2", "vec3", "vec4"],
"res")
values = log_model.test(
self.binomial_frame,
["vec0", "vec1", "vec2", "vec3", "vec4"], "res")
tp_f = self.binomial_frame.copy()
tp_f.filter(lambda x: x['res'] == 1 and x['actual'] == 1)
tp = float(tp_f.count())
tn_f = self.binomial_frame.copy()
tn_f.filter(lambda x: x['res'] == 0 and x['actual'] == 0)
tn = float(tn_f.count())
fp_f = self.binomial_frame.copy()
fp_f.filter(lambda x: x['res'] == 0 and x['actual'] == 1)
fp = float(fp_f.count())
fn_f = self.binomial_frame.copy()
fn_f.filter(lambda x: x['res'] == 1 and x['actual'] == 0)
fn = float(fn_f.count())
# manually build the confusion matrix and derivative properties
precision = tp/(tp+fp)
recall = tp/(tp+fn)
self.assertAlmostEqual(values.precision, precision)
self.assertAlmostEqual(values.recall, recall)
self.assertAlmostEqual(
values.f_measure, 2*(precision*recall)/(precision+recall))
self.assertAlmostEqual(values.accuracy, (tp+tn)/(tp+fn+tn+fp))
self.assertAlmostEqual(
values.confusion_matrix["Predicted_Pos"]["Actual_Pos"], tp)
self.assertAlmostEqual(
values.confusion_matrix["Predicted_Pos"]["Actual_Neg"], fp)
self.assertAlmostEqual(
values.confusion_matrix["Predicted_Neg"]["Actual_Neg"], tn)
self.assertAlmostEqual(
values.confusion_matrix["Predicted_Neg"]["Actual_Pos"], fn)
def test_logistic_regression_train(self):
"""test logistic regression train"""
log_model = self.context.models.classification.logistic_regression.train(
self.binomial_frame, ["vec0", "vec1", "vec2", "vec3", "vec4"],
"res")
self._standard_summary(log_model.training_summary, False)
def test_logistic_regression_train_sgd(self):
"""test logistic regression train with sgd"""
log_model = self.context.models.classification.logistic_regression.train(
self.binomial_frame, ["vec0", "vec1", "vec2", "vec3", "vec4"],
"res", num_classes=2, optimizer="SGD", step_size=30)
self._standard_summary(log_model.training_summary, False)
def test_logistic_regression_train_count_column(self):
"""test logistic regression train with count"""
log_model = self.context.models.classification.logistic_regression.train(
self.binomial_frame, ["vec0", "vec1", "vec2", "vec3", "vec4"],
"res", "count")
self._standard_summary(log_model.training_summary, True)
def _standard_summary(self, summary, coefficients_only):
"""verfies the summary object against calculated values from R"""
self.assertEqual(summary.num_features, 5)
self.assertEqual(summary.num_classes, 2)
self.assertAlmostEqual(
summary.coefficients["intercept"],
0.9, delta=0.02)
self.assertAlmostEqual(
summary.coefficients["vec0"], 0.4, delta=0.02)
self.assertAlmostEqual(
summary.coefficients["vec1"], 0.7, delta=0.02)
self.assertAlmostEqual(
summary.coefficients["vec2"], 0.9, delta=0.02)
self.assertAlmostEqual(
summary.coefficients["vec3"], 0.3, delta=0.02)
self.assertAlmostEqual(
summary.coefficients["vec4"], 1.4, delta=0.02)
if not coefficients_only:
self.assertAlmostEqual(
summary.degrees_freedom["intercept"], 1)
self.assertAlmostEqual(
summary.standard_errors["intercept"],
0.012222, delta=0.02)
self.assertAlmostEqual(
summary.wald_statistic["intercept"], 73.62,
delta=0.02)
self.assertAlmostEqual(
summary.p_value["intercept"], 0,
delta=0.02)
self.assertAlmostEqual(
summary.degrees_freedom["vec0"], 1)
self.assertAlmostEqual(
summary.standard_errors["vec0"], 0.005307,
delta=0.02)
self.assertAlmostEqual(
summary.wald_statistic["vec0"], 75.451032,
delta=0.02)
self.assertAlmostEqual(
summary.p_value["vec0"], 0,
delta=0.02)
self.assertAlmostEqual(
summary.degrees_freedom["vec1"], 1)
self.assertAlmostEqual(
summary.standard_errors["vec1"], 0.006273,
delta=0.02)
self.assertAlmostEqual(
summary.wald_statistic["vec1"], 110.938600,
delta=0.02)
self.assertAlmostEqual(
summary.p_value["vec1"], 0,
delta=0.02)
self.assertAlmostEqual(
summary.degrees_freedom["vec2"], 1)
self.assertAlmostEqual(
summary.standard_errors["vec2"], 0.007284,
delta=0.02)
self.assertAlmostEqual(
summary.wald_statistic["vec2"], 124.156836,
delta=0.02)
self.assertAlmostEqual(
summary.p_value["vec2"], 0,
delta=0.02)
self.assertAlmostEqual(
summary.degrees_freedom["vec3"], 1)
self.assertAlmostEqual(
summary.standard_errors["vec3"], 0.005096,
delta=0.02)
self.assertAlmostEqual(
summary.wald_statistic["vec3"], 58.617307,
delta=0.02)
self.assertAlmostEqual(
summary.p_value["vec3"], 0,
delta=0.02)
self.assertAlmostEqual(
summary.degrees_freedom["vec4"], 1)
self.assertAlmostEqual(
summary.standard_errors["vec4"], 0.009784,
delta=0.02)
self.assertAlmostEqual(
summary.wald_statistic["vec4"], 143.976980,
delta=0.02)
self.assertAlmostEqual(
summary.p_value["vec4"], 0,
delta=0.02)
#covariance matrix obtained from R
r_cov = [
[1.495461e-04, 1.460983e-05, 2.630870e-05,
3.369595e-05, 9.938721e-06, 5.580564e-05],
[1.460983e-05, 2.816412e-05, 1.180398e-05,
1.477251e-05, 6.008541e-06, 2.206105e-05],
[2.630870e-05, 1.180398e-05, 3.935554e-05,
2.491004e-05, 8.871815e-06, 4.022959e-05],
[3.369595e-05, 1.477251e-05, 2.491004e-05,
5.305153e-05, 1.069435e-05, 5.243129e-05],
[9.938721e-06, 6.008541e-06, 8.871815e-06,
1.069435e-05, 2.596849e-05, 1.776645e-05],
[5.580564e-05, 2.206105e-05, 4.022959e-05,
5.243129e-05, 1.776645e-05, 9.572358e-05]]
#covariance matrix as in log_model summary
summ_cov = summary.covariance_matrix.take(
summary.covariance_matrix.count())
#compare all corresponding values in both matrices
for (r_list, summ_list) in zip(r_cov, summ_cov):
for (r_val, summ_val) in zip(r_list, summ_list):
self.assertAlmostEqual(r_val, summ_val)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
hydrosquall/tiingo-python | tests/test_tiingo_pandas.py | 1 | 7087 | #!/usr/bin/env python
"""Unit tests for pandas functionality in tiingo"""
import vcr
from unittest import TestCase
from tiingo import TiingoClient
from tiingo.exceptions import APIColumnNameError, InstallPandasException, MissingRequiredArgumentError
try:
import pandas as pd
pandas_is_installed = True
except ImportError:
pandas_is_installed = False
class TestTiingoWithPython(TestCase):
def setUp(self):
if pandas_is_installed:
self._client = TiingoClient()
else:
self.skipTest("test_tiingo_pandas: Pandas not installed.")
@vcr.use_cassette('tests/fixtures/ticker_price_pandas_weekly.yaml')
def test_return_pandas_format(self):
"""Test that valid pandas format is returned when specified"""
prices = self._client.get_dataframe("GOOGL", startDate='2018-01-05',
endDate='2018-01-19', frequency='weekly')
self.assertTrue(isinstance(prices, pd.DataFrame))
assert len(prices.index) == 3
@vcr.use_cassette('tests/fixtures/ticker_price_pandas_weekly_multiple_tickers.yaml')
def test_return_pandas_format_multiple(self):
"""Test that valid pandas format is returned when specified"""
tickers = ["GOOGL", "AAPL"]
prices = self._client.get_dataframe(tickers, startDate='2018-01-05',
endDate='2018-01-19', metric_name='adjClose', frequency='weekly')
self.assertTrue(isinstance(prices, pd.DataFrame))
assert len(prices.columns) == 2
assert len(prices.index) == 3
@vcr.use_cassette('tests/fixtures/ticker_price_pandas_weekly_multiple_tickers_csv.yaml')
def test_return_pandas_format_multiple(self):
"""Test that valid pandas format is returned when specified"""
tickers = ["GOOGL", "AAPL"]
prices = self._client.get_dataframe(tickers, startDate='2018-01-05',
endDate='2018-01-19', metric_name='adjClose', frequency='weekly', fmt='csv')
self.assertTrue(isinstance(prices, pd.DataFrame))
assert len(prices.columns) == 2
assert len(prices.index) == 3
@vcr.use_cassette('tests/fixtures/ticker_price_pandas_daily.yaml')
def test_return_pandas_daily(self):
"""Test that valid pandas format is returned when specified"""
prices = self._client.get_dataframe("GOOGL", startDate='2018-01-05',
endDate='2018-01-19', frequency='daily')
self.assertTrue(isinstance(prices, pd.DataFrame))
assert len(prices.columns) == 12
@vcr.use_cassette('tests/fixtures/ticker_price_pandas_daily_csv.yaml')
def test_return_pandas_daily_csv(self):
"""Test that valid pandas format is returned when specified and csv data requested"""
prices = self._client.get_dataframe("GOOGL",
startDate='2018-01-05', endDate='2018-01-19',
frequency='daily', fmt='csv')
self.assertTrue(isinstance(prices, pd.DataFrame))
self.assertTrue(isinstance(prices.index, pd.DatetimeIndex))
assert prices.index.tz.zone == 'UTC'
assert len(prices) == 10
assert len(prices.columns) == 12
@vcr.use_cassette('tests/fixtures/ticker_price_pandas_daily_metric_name.yaml')
def test_return_pandas_daily(self):
"""Test that one column is returned when a metric name is specified"""
prices = self._client.get_dataframe("GOOGL", startDate='2018-01-05', metric_name='adjClose',
endDate='2018-01-19', frequency='daily')
self.assertTrue(isinstance(prices, pd.Series))
assert len(prices.index) == 10
@vcr.use_cassette('tests/fixtures/ticker_price_pandas_daily_metric_name_csv.yaml')
def test_return_pandas_daily_metric_name_csv(self):
"""Test that one column is returned when a metric name is specified and csv data requested
Request unadjusted close column to ensure data remains constant in case GOOGL splits
or distributes dividends.
"""
prices = self._client.get_dataframe("GOOGL", startDate='2018-01-05', metric_name='close',
endDate='2018-01-19', frequency='daily', fmt='csv')
self.assertTrue(isinstance(prices, pd.Series))
self.assertTrue(isinstance(prices.index, pd.DatetimeIndex))
assert prices.index.tz.zone == 'UTC'
assert prices.values.tolist() == [
1110.29,1114.21,1112.79,1110.14,1112.05,
1130.65,1130.7,1139.1,1135.97,1143.5]
assert len(prices.index) == 10
@vcr.use_cassette('tests/fixtures/ticker_price_pandas_daily_equivalent_requesting_json_or_csv.yaml')
def test_price_pandas_daily_equivalent_requesting_json_or_csv(self):
"""Test that equivalent data is returned when specifying reuqest format in json or csv.
"""
prices_json = self._client.get_dataframe("GOOGL",
startDate='2018-01-05', endDate='2018-01-19',
metric_name='close', frequency='daily')
prices_csv = self._client.get_dataframe("GOOGL",
startDate='2018-01-05', endDate='2018-01-19',
metric_name='close', frequency='daily', fmt='csv')
self.assertTrue(prices_json.equals(prices_csv))
@vcr.use_cassette('tests/fixtures/intraday_price.yaml')
def test_intraday_ticker_price(self):
"""Test the EOD Prices Endpoint with data param"""
prices = self._client.get_dataframe("GOOGL",
startDate="2018-01-02",
endDate="2018-01-02",
frequency="30Min")
self.assertGreater(len(prices), 1)
def test_metric_name_column_error(self):
with self.assertRaises(APIColumnNameError):
self._client.get_dataframe(['GOOGL', 'AAPL'], startDate='2018-01-05',
endDate='2018-01-19', metric_name='xopen', frequency='weekly')
def test_metric_name_missing_when_multiple_tickers(self):
with self.assertRaises(MissingRequiredArgumentError):
self._client.get_dataframe(['GOOGL', 'AAPL'], frequency='weekly')
@vcr.use_cassette('tests/fixtures/ticker_price_pandas_single.yaml')
def test_pandas_edge_case(self):
"""Test single price/date being returned as a frame"""
prices = self._client.get_dataframe("GOOGL")
assert len(prices) == 1
assert len(prices.index) == 1
class TestTiingoWithoutPython(TestCase):
def setUp(self):
if pandas_is_installed:
self.skipTest("test_tiingo_without_pandas: Pandas is installed.")
else:
self._client = TiingoClient()
@vcr.use_cassette('tests/fixtures/ticker_price_pandas_single.yaml')
def test_get_dataframe_without_pandas(self):
with self.assertRaises(InstallPandasException):
self._client.get_dataframe("GOOGL")
| mit |
sonnyhu/scikit-learn | examples/text/hashing_vs_dict_vectorizer.py | 93 | 3243 | """
===========================================
FeatureHasher and DictVectorizer Comparison
===========================================
Compares FeatureHasher and DictVectorizer by using both to vectorize
text documents.
The example demonstrates syntax and speed only; it doesn't actually do
anything useful with the extracted vectors. See the example scripts
{document_classification_20newsgroups,clustering}.py for actual learning
on text documents.
A discrepancy between the number of terms reported for DictVectorizer and
for FeatureHasher is to be expected due to hash collisions.
"""
# Author: Lars Buitinck
# License: BSD 3 clause
from __future__ import print_function
from collections import defaultdict
import re
import sys
from time import time
import numpy as np
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction import DictVectorizer, FeatureHasher
def n_nonzero_columns(X):
"""Returns the number of non-zero columns in a CSR matrix X."""
return len(np.unique(X.nonzero()[1]))
def tokens(doc):
"""Extract tokens from doc.
This uses a simple regex to break strings into tokens. For a more
principled approach, see CountVectorizer or TfidfVectorizer.
"""
return (tok.lower() for tok in re.findall(r"\w+", doc))
def token_freqs(doc):
"""Extract a dict mapping tokens from doc to their frequencies."""
freq = defaultdict(int)
for tok in tokens(doc):
freq[tok] += 1
return freq
categories = [
'alt.atheism',
'comp.graphics',
'comp.sys.ibm.pc.hardware',
'misc.forsale',
'rec.autos',
'sci.space',
'talk.religion.misc',
]
# Uncomment the following line to use a larger set (11k+ documents)
#categories = None
print(__doc__)
print("Usage: %s [n_features_for_hashing]" % sys.argv[0])
print(" The default number of features is 2**18.")
print()
try:
n_features = int(sys.argv[1])
except IndexError:
n_features = 2 ** 18
except ValueError:
print("not a valid number of features: %r" % sys.argv[1])
sys.exit(1)
print("Loading 20 newsgroups training data")
raw_data = fetch_20newsgroups(subset='train', categories=categories).data
data_size_mb = sum(len(s.encode('utf-8')) for s in raw_data) / 1e6
print("%d documents - %0.3fMB" % (len(raw_data), data_size_mb))
print()
print("DictVectorizer")
t0 = time()
vectorizer = DictVectorizer()
vectorizer.fit_transform(token_freqs(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % len(vectorizer.get_feature_names()))
print()
print("FeatureHasher on frequency dicts")
t0 = time()
hasher = FeatureHasher(n_features=n_features)
X = hasher.transform(token_freqs(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % n_nonzero_columns(X))
print()
print("FeatureHasher on raw tokens")
t0 = time()
hasher = FeatureHasher(n_features=n_features, input_type="string")
X = hasher.transform(tokens(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % n_nonzero_columns(X))
| bsd-3-clause |
skudriashev/incubator-airflow | airflow/www/views.py | 2 | 97479 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from past.builtins import basestring, unicode
import ast
import logging
import os
import pkg_resources
import socket
from functools import wraps
from datetime import datetime, timedelta
import dateutil.parser
import copy
import math
import json
import bleach
from collections import defaultdict
import inspect
from textwrap import dedent
import traceback
import sqlalchemy as sqla
from sqlalchemy import or_, desc, and_, union_all
from flask import (
abort, redirect, url_for, request, Markup, Response, current_app, render_template,
make_response)
from flask_admin import BaseView, expose, AdminIndexView
from flask_admin.contrib.sqla import ModelView
from flask_admin.actions import action
from flask_admin.babel import lazy_gettext
from flask_admin.tools import iterdecode
from flask_login import flash
from flask._compat import PY2
from jinja2.sandbox import ImmutableSandboxedEnvironment
from jinja2 import escape
import markdown
import nvd3
from wtforms import (
Form, SelectField, TextAreaField, PasswordField, StringField, validators)
from pygments import highlight, lexers
from pygments.formatters import HtmlFormatter
import airflow
from airflow import configuration as conf
from airflow import models
from airflow import settings
from airflow.api.common.experimental.mark_tasks import set_dag_run_state
from airflow.exceptions import AirflowException
from airflow.settings import Session
from airflow.models import XCom, DagRun
from airflow.ti_deps.dep_context import DepContext, QUEUE_DEPS, SCHEDULER_DEPS
from airflow.models import BaseOperator
from airflow.operators.subdag_operator import SubDagOperator
from airflow.utils.json import json_ser
from airflow.utils.state import State
from airflow.utils.db import provide_session
from airflow.utils.helpers import alchemy_to_dict
from airflow.utils.dates import infer_time_unit, scale_time_units
from airflow.www import utils as wwwutils
from airflow.www.forms import DateTimeForm, DateTimeWithNumRunsForm
from airflow.www.validators import GreaterEqualThan
QUERY_LIMIT = 100000
CHART_LIMIT = 200000
dagbag = models.DagBag(settings.DAGS_FOLDER)
login_required = airflow.login.login_required
current_user = airflow.login.current_user
logout_user = airflow.login.logout_user
FILTER_BY_OWNER = False
PAGE_SIZE = conf.getint('webserver', 'page_size')
if conf.getboolean('webserver', 'FILTER_BY_OWNER'):
# filter_by_owner if authentication is enabled and filter_by_owner is true
FILTER_BY_OWNER = not current_app.config['LOGIN_DISABLED']
def dag_link(v, c, m, p):
dag_id = bleach.clean(m.dag_id)
url = url_for(
'airflow.graph',
dag_id=dag_id)
return Markup(
'<a href="{}">{}</a>'.format(url, dag_id))
def log_url_formatter(v, c, m, p):
return Markup(
'<a href="{m.log_url}">'
' <span class="glyphicon glyphicon-book" aria-hidden="true">'
'</span></a>').format(**locals())
def task_instance_link(v, c, m, p):
dag_id = bleach.clean(m.dag_id)
task_id = bleach.clean(m.task_id)
url = url_for(
'airflow.task',
dag_id=dag_id,
task_id=task_id,
execution_date=m.execution_date.isoformat())
url_root = url_for(
'airflow.graph',
dag_id=dag_id,
root=task_id,
execution_date=m.execution_date.isoformat())
return Markup(
"""
<span style="white-space: nowrap;">
<a href="{url}">{task_id}</a>
<a href="{url_root}" title="Filter on this task and upstream">
<span class="glyphicon glyphicon-filter" style="margin-left: 0px;"
aria-hidden="true"></span>
</a>
</span>
""".format(**locals()))
def state_token(state):
color = State.color(state)
return Markup(
'<span class="label" style="background-color:{color};">'
'{state}</span>'.format(**locals()))
def state_f(v, c, m, p):
return state_token(m.state)
def duration_f(v, c, m, p):
if m.end_date and m.duration:
return timedelta(seconds=m.duration)
def datetime_f(v, c, m, p):
attr = getattr(m, p)
dttm = attr.isoformat() if attr else ''
if datetime.utcnow().isoformat()[:4] == dttm[:4]:
dttm = dttm[5:]
return Markup("<nobr>{}</nobr>".format(dttm))
def nobr_f(v, c, m, p):
return Markup("<nobr>{}</nobr>".format(getattr(m, p)))
def label_link(v, c, m, p):
try:
default_params = ast.literal_eval(m.default_params)
except:
default_params = {}
url = url_for(
'airflow.chart', chart_id=m.id, iteration_no=m.iteration_no,
**default_params)
return Markup("<a href='{url}'>{m.label}</a>".format(**locals()))
def pool_link(v, c, m, p):
url = '/admin/taskinstance/?flt1_pool_equals=' + m.pool
return Markup("<a href='{url}'>{m.pool}</a>".format(**locals()))
def pygment_html_render(s, lexer=lexers.TextLexer):
return highlight(
s,
lexer(),
HtmlFormatter(linenos=True),
)
def render(obj, lexer):
out = ""
if isinstance(obj, basestring):
out += pygment_html_render(obj, lexer)
elif isinstance(obj, (tuple, list)):
for i, s in enumerate(obj):
out += "<div>List item #{}</div>".format(i)
out += "<div>" + pygment_html_render(s, lexer) + "</div>"
elif isinstance(obj, dict):
for k, v in obj.items():
out += '<div>Dict item "{}"</div>'.format(k)
out += "<div>" + pygment_html_render(v, lexer) + "</div>"
return out
def wrapped_markdown(s):
return '<div class="rich_doc">' + markdown.markdown(s) + "</div>"
attr_renderer = {
'bash_command': lambda x: render(x, lexers.BashLexer),
'hql': lambda x: render(x, lexers.SqlLexer),
'sql': lambda x: render(x, lexers.SqlLexer),
'doc': lambda x: render(x, lexers.TextLexer),
'doc_json': lambda x: render(x, lexers.JsonLexer),
'doc_rst': lambda x: render(x, lexers.RstLexer),
'doc_yaml': lambda x: render(x, lexers.YamlLexer),
'doc_md': wrapped_markdown,
'python_callable': lambda x: render(
inspect.getsource(x), lexers.PythonLexer),
}
def data_profiling_required(f):
"""Decorator for views requiring data profiling access"""
@wraps(f)
def decorated_function(*args, **kwargs):
if (
current_app.config['LOGIN_DISABLED'] or
(not current_user.is_anonymous() and current_user.data_profiling())
):
return f(*args, **kwargs)
else:
flash("This page requires data profiling privileges", "error")
return redirect(url_for('admin.index'))
return decorated_function
def fused_slots(v, c, m, p):
url = (
'/admin/taskinstance/' +
'?flt1_pool_equals=' + m.pool +
'&flt2_state_equals=running')
return Markup("<a href='{0}'>{1}</a>".format(url, m.used_slots()))
def fqueued_slots(v, c, m, p):
url = (
'/admin/taskinstance/' +
'?flt1_pool_equals=' + m.pool +
'&flt2_state_equals=queued&sort=10&desc=1')
return Markup("<a href='{0}'>{1}</a>".format(url, m.queued_slots()))
def recurse_tasks(tasks, task_ids, dag_ids, task_id_to_dag):
if isinstance(tasks, list):
for task in tasks:
recurse_tasks(task, task_ids, dag_ids, task_id_to_dag)
return
if isinstance(tasks, SubDagOperator):
subtasks = tasks.subdag.tasks
dag_ids.append(tasks.subdag.dag_id)
for subtask in subtasks:
if subtask.task_id not in task_ids:
task_ids.append(subtask.task_id)
task_id_to_dag[subtask.task_id] = tasks.subdag
recurse_tasks(subtasks, task_ids, dag_ids, task_id_to_dag)
if isinstance(tasks, BaseOperator):
task_id_to_dag[tasks.task_id] = tasks.dag
def get_chart_height(dag):
"""
TODO(aoen): See [AIRFLOW-1263] We use the number of tasks in the DAG as a heuristic to
approximate the size of generated chart (otherwise the charts are tiny and unreadable
when DAGs have a large number of tasks). Ideally nvd3 should allow for dynamic-height
charts, that is charts that take up space based on the size of the components within.
"""
return 600 + len(dag.tasks) * 10
class Airflow(BaseView):
def is_visible(self):
return False
@expose('/')
@login_required
def index(self):
return self.render('airflow/dags.html')
@expose('/chart_data')
@data_profiling_required
@wwwutils.gzipped
# @cache.cached(timeout=3600, key_prefix=wwwutils.make_cache_key)
def chart_data(self):
from airflow import macros
import pandas as pd
if conf.getboolean('core', 'secure_mode'):
abort(404)
session = settings.Session()
chart_id = request.args.get('chart_id')
csv = request.args.get('csv') == "true"
chart = session.query(models.Chart).filter_by(id=chart_id).first()
db = session.query(
models.Connection).filter_by(conn_id=chart.conn_id).first()
session.expunge_all()
session.commit()
session.close()
payload = {
"state": "ERROR",
"error": ""
}
# Processing templated fields
try:
args = ast.literal_eval(chart.default_params)
if type(args) is not type(dict()):
raise AirflowException('Not a dict')
except:
args = {}
payload['error'] += (
"Default params is not valid, string has to evaluate as "
"a Python dictionary. ")
request_dict = {k: request.args.get(k) for k in request.args}
args.update(request_dict)
args['macros'] = macros
sandbox = ImmutableSandboxedEnvironment()
sql = sandbox.from_string(chart.sql).render(**args)
label = sandbox.from_string(chart.label).render(**args)
payload['sql_html'] = Markup(highlight(
sql,
lexers.SqlLexer(), # Lexer call
HtmlFormatter(noclasses=True))
)
payload['label'] = label
pd.set_option('display.max_colwidth', 100)
hook = db.get_hook()
try:
df = hook.get_pandas_df(
wwwutils.limit_sql(sql, CHART_LIMIT, conn_type=db.conn_type))
df = df.fillna(0)
except Exception as e:
payload['error'] += "SQL execution failed. Details: " + str(e)
if csv:
return Response(
response=df.to_csv(index=False),
status=200,
mimetype="application/text")
if not payload['error'] and len(df) == CHART_LIMIT:
payload['warning'] = (
"Data has been truncated to {0}"
" rows. Expect incomplete results.").format(CHART_LIMIT)
if not payload['error'] and len(df) == 0:
payload['error'] += "Empty result set. "
elif (
not payload['error'] and
chart.sql_layout == 'series' and
chart.chart_type != "datatable" and
len(df.columns) < 3):
payload['error'] += "SQL needs to return at least 3 columns. "
elif (
not payload['error'] and
chart.sql_layout == 'columns' and
len(df.columns) < 2):
payload['error'] += "SQL needs to return at least 2 columns. "
elif not payload['error']:
import numpy as np
chart_type = chart.chart_type
data = None
if chart.show_datatable or chart_type == "datatable":
data = df.to_dict(orient="split")
data['columns'] = [{'title': c} for c in data['columns']]
payload['data'] = data
# Trying to convert time to something Highcharts likes
x_col = 1 if chart.sql_layout == 'series' else 0
if chart.x_is_date:
try:
# From string to datetime
df[df.columns[x_col]] = pd.to_datetime(
df[df.columns[x_col]])
df[df.columns[x_col]] = df[df.columns[x_col]].apply(
lambda x: int(x.strftime("%s")) * 1000)
except Exception as e:
payload['error'] = "Time conversion failed"
if chart_type == 'datatable':
payload['state'] = 'SUCCESS'
return wwwutils.json_response(payload)
else:
if chart.sql_layout == 'series':
# User provides columns (series, x, y)
xaxis_label = df.columns[1]
yaxis_label = df.columns[2]
df[df.columns[2]] = df[df.columns[2]].astype(np.float)
df = df.pivot_table(
index=df.columns[1],
columns=df.columns[0],
values=df.columns[2], aggfunc=np.sum)
else:
# User provides columns (x, y, metric1, metric2, ...)
xaxis_label = df.columns[0]
yaxis_label = 'y'
df.index = df[df.columns[0]]
df = df.sort(df.columns[0])
del df[df.columns[0]]
for col in df.columns:
df[col] = df[col].astype(np.float)
df = df.fillna(0)
NVd3ChartClass = chart_mapping.get(chart.chart_type)
NVd3ChartClass = getattr(nvd3, NVd3ChartClass)
nvd3_chart = NVd3ChartClass(x_is_date=chart.x_is_date)
for col in df.columns:
nvd3_chart.add_serie(name=col, y=df[col].tolist(), x=df[col].index.tolist())
try:
nvd3_chart.buildcontent()
payload['chart_type'] = nvd3_chart.__class__.__name__
payload['htmlcontent'] = nvd3_chart.htmlcontent
except Exception as e:
payload['error'] = str(e)
payload['state'] = 'SUCCESS'
payload['request_dict'] = request_dict
return wwwutils.json_response(payload)
@expose('/chart')
@data_profiling_required
def chart(self):
if conf.getboolean('core', 'secure_mode'):
abort(404)
session = settings.Session()
chart_id = request.args.get('chart_id')
embed = request.args.get('embed')
chart = session.query(models.Chart).filter_by(id=chart_id).first()
session.expunge_all()
session.commit()
session.close()
NVd3ChartClass = chart_mapping.get(chart.chart_type)
if not NVd3ChartClass:
flash(
"Not supported anymore as the license was incompatible, "
"sorry",
"danger")
redirect('/admin/chart/')
sql = ""
if chart.show_sql:
sql = Markup(highlight(
chart.sql,
lexers.SqlLexer(), # Lexer call
HtmlFormatter(noclasses=True))
)
return self.render(
'airflow/nvd3.html',
chart=chart,
title="Airflow - Chart",
sql=sql,
label=chart.label,
embed=embed)
@expose('/dag_stats')
def dag_stats(self):
ds = models.DagStat
session = Session()
ds.update()
qry = (
session.query(ds.dag_id, ds.state, ds.count)
)
data = {}
for dag_id, state, count in qry:
if dag_id not in data:
data[dag_id] = {}
data[dag_id][state] = count
payload = {}
for dag in dagbag.dags.values():
payload[dag.safe_dag_id] = []
for state in State.dag_states:
try:
count = data[dag.dag_id][state]
except Exception:
count = 0
d = {
'state': state,
'count': count,
'dag_id': dag.dag_id,
'color': State.color(state)
}
payload[dag.safe_dag_id].append(d)
return wwwutils.json_response(payload)
@expose('/task_stats')
def task_stats(self):
TI = models.TaskInstance
DagRun = models.DagRun
Dag = models.DagModel
session = Session()
LastDagRun = (
session.query(DagRun.dag_id, sqla.func.max(DagRun.execution_date).label('execution_date'))
.join(Dag, Dag.dag_id == DagRun.dag_id)
.filter(DagRun.state != State.RUNNING)
.filter(Dag.is_active == True)
.group_by(DagRun.dag_id)
.subquery('last_dag_run')
)
RunningDagRun = (
session.query(DagRun.dag_id, DagRun.execution_date)
.join(Dag, Dag.dag_id == DagRun.dag_id)
.filter(DagRun.state == State.RUNNING)
.filter(Dag.is_active == True)
.subquery('running_dag_run')
)
# Select all task_instances from active dag_runs.
# If no dag_run is active, return task instances from most recent dag_run.
LastTI = (
session.query(TI.dag_id.label('dag_id'), TI.state.label('state'))
.join(LastDagRun, and_(
LastDagRun.c.dag_id == TI.dag_id,
LastDagRun.c.execution_date == TI.execution_date))
)
RunningTI = (
session.query(TI.dag_id.label('dag_id'), TI.state.label('state'))
.join(RunningDagRun, and_(
RunningDagRun.c.dag_id == TI.dag_id,
RunningDagRun.c.execution_date == TI.execution_date))
)
UnionTI = union_all(LastTI, RunningTI).alias('union_ti')
qry = (
session.query(UnionTI.c.dag_id, UnionTI.c.state, sqla.func.count())
.group_by(UnionTI.c.dag_id, UnionTI.c.state)
)
data = {}
for dag_id, state, count in qry:
if dag_id not in data:
data[dag_id] = {}
data[dag_id][state] = count
session.commit()
session.close()
payload = {}
for dag in dagbag.dags.values():
payload[dag.safe_dag_id] = []
for state in State.task_states:
try:
count = data[dag.dag_id][state]
except:
count = 0
d = {
'state': state,
'count': count,
'dag_id': dag.dag_id,
'color': State.color(state)
}
payload[dag.safe_dag_id].append(d)
return wwwutils.json_response(payload)
@expose('/code')
@login_required
def code(self):
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
title = dag_id
try:
with open(dag.fileloc, 'r') as f:
code = f.read()
html_code = highlight(
code, lexers.PythonLexer(), HtmlFormatter(linenos=True))
except IOError as e:
html_code = str(e)
return self.render(
'airflow/dag_code.html', html_code=html_code, dag=dag, title=title,
root=request.args.get('root'),
demo_mode=conf.getboolean('webserver', 'demo_mode'))
@expose('/dag_details')
@login_required
def dag_details(self):
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
title = "DAG details"
session = settings.Session()
TI = models.TaskInstance
states = (
session.query(TI.state, sqla.func.count(TI.dag_id))
.filter(TI.dag_id == dag_id)
.group_by(TI.state)
.all()
)
return self.render(
'airflow/dag_details.html',
dag=dag, title=title, states=states, State=State)
@current_app.errorhandler(404)
def circles(self):
return render_template(
'airflow/circles.html', hostname=socket.getfqdn()), 404
@current_app.errorhandler(500)
def show_traceback(self):
from airflow.utils import asciiart as ascii_
return render_template(
'airflow/traceback.html',
hostname=socket.getfqdn(),
nukular=ascii_.nukular,
info=traceback.format_exc()), 500
@expose('/noaccess')
def noaccess(self):
return self.render('airflow/noaccess.html')
@expose('/pickle_info')
@login_required
def pickle_info(self):
d = {}
dag_id = request.args.get('dag_id')
dags = [dagbag.dags.get(dag_id)] if dag_id else dagbag.dags.values()
for dag in dags:
if not dag.is_subdag:
d[dag.dag_id] = dag.pickle_info()
return wwwutils.json_response(d)
@expose('/login', methods=['GET', 'POST'])
def login(self):
return airflow.login.login(self, request)
@expose('/logout')
def logout(self):
logout_user()
flash('You have been logged out.')
return redirect(url_for('admin.index'))
@expose('/rendered')
@login_required
@wwwutils.action_logging
def rendered(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
execution_date = request.args.get('execution_date')
dttm = dateutil.parser.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
dag = dagbag.get_dag(dag_id)
task = copy.copy(dag.get_task(task_id))
ti = models.TaskInstance(task=task, execution_date=dttm)
try:
ti.render_templates()
except Exception as e:
flash("Error rendering template: " + str(e), "error")
title = "Rendered Template"
html_dict = {}
for template_field in task.__class__.template_fields:
content = getattr(task, template_field)
if template_field in attr_renderer:
html_dict[template_field] = attr_renderer[template_field](content)
else:
html_dict[template_field] = (
"<pre><code>" + str(content) + "</pre></code>")
return self.render(
'airflow/ti_code.html',
html_dict=html_dict,
dag=dag,
task_id=task_id,
execution_date=execution_date,
form=form,
title=title, )
@expose('/log')
@login_required
@wwwutils.action_logging
def log(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
execution_date = request.args.get('execution_date')
dttm = dateutil.parser.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
dag = dagbag.get_dag(dag_id)
session = Session()
ti = session.query(models.TaskInstance).filter(
models.TaskInstance.dag_id == dag_id,
models.TaskInstance.task_id == task_id,
models.TaskInstance.execution_date == dttm).first()
if ti is None:
logs = ["*** Task instance did not exist in the DB\n"]
else:
logger = logging.getLogger('airflow.task')
task_log_reader = conf.get('core', 'task_log_reader')
handler = next((handler for handler in logger.handlers
if handler.name == task_log_reader), None)
try:
ti.task = dag.get_task(ti.task_id)
logs = handler.read(ti)
except AttributeError as e:
logs = ["Task log handler {} does not support read logs.\n{}\n" \
.format(task_log_reader, e.message)]
for i, log in enumerate(logs):
if PY2 and not isinstance(log, unicode):
logs[i] = log.decode('utf-8')
return self.render(
'airflow/ti_log.html',
logs=logs, dag=dag, title="Log by attempts", task_id=task_id,
execution_date=execution_date, form=form)
@expose('/task')
@login_required
@wwwutils.action_logging
def task(self):
TI = models.TaskInstance
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
# Carrying execution_date through, even though it's irrelevant for
# this context
execution_date = request.args.get('execution_date')
dttm = dateutil.parser.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
dag = dagbag.get_dag(dag_id)
if not dag or task_id not in dag.task_ids:
flash(
"Task [{}.{}] doesn't seem to exist"
" at the moment".format(dag_id, task_id),
"error")
return redirect('/admin/')
task = copy.copy(dag.get_task(task_id))
task.resolve_template_files()
ti = TI(task=task, execution_date=dttm)
ti.refresh_from_db()
ti_attrs = []
for attr_name in dir(ti):
if not attr_name.startswith('_'):
attr = getattr(ti, attr_name)
if type(attr) != type(self.task):
ti_attrs.append((attr_name, str(attr)))
task_attrs = []
for attr_name in dir(task):
if not attr_name.startswith('_'):
attr = getattr(task, attr_name)
if type(attr) != type(self.task) and \
attr_name not in attr_renderer:
task_attrs.append((attr_name, str(attr)))
# Color coding the special attributes that are code
special_attrs_rendered = {}
for attr_name in attr_renderer:
if hasattr(task, attr_name):
source = getattr(task, attr_name)
special_attrs_rendered[attr_name] = attr_renderer[attr_name](source)
no_failed_deps_result = [(
"Unknown",
dedent("""\
All dependencies are met but the task instance is not running. In most cases this just means that the task will probably be scheduled soon unless:<br/>
- The scheduler is down or under heavy load<br/>
{}
<br/>
If this task instance does not start soon please contact your Airflow administrator for assistance."""
.format(
"- This task instance already ran and had it's state changed manually (e.g. cleared in the UI)<br/>"
if ti.state == State.NONE else "")))]
# Use the scheduler's context to figure out which dependencies are not met
dep_context = DepContext(SCHEDULER_DEPS)
failed_dep_reasons = [(dep.dep_name, dep.reason) for dep in
ti.get_failed_dep_statuses(
dep_context=dep_context)]
title = "Task Instance Details"
return self.render(
'airflow/task.html',
task_attrs=task_attrs,
ti_attrs=ti_attrs,
failed_dep_reasons=failed_dep_reasons or no_failed_deps_result,
task_id=task_id,
execution_date=execution_date,
special_attrs_rendered=special_attrs_rendered,
form=form,
dag=dag, title=title)
@expose('/xcom')
@login_required
@wwwutils.action_logging
def xcom(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
# Carrying execution_date through, even though it's irrelevant for
# this context
execution_date = request.args.get('execution_date')
dttm = dateutil.parser.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
dag = dagbag.get_dag(dag_id)
if not dag or task_id not in dag.task_ids:
flash(
"Task [{}.{}] doesn't seem to exist"
" at the moment".format(dag_id, task_id),
"error")
return redirect('/admin/')
session = Session()
xcomlist = session.query(XCom).filter(
XCom.dag_id == dag_id, XCom.task_id == task_id,
XCom.execution_date == dttm).all()
attributes = []
for xcom in xcomlist:
if not xcom.key.startswith('_'):
attributes.append((xcom.key, xcom.value))
title = "XCom"
return self.render(
'airflow/xcom.html',
attributes=attributes,
task_id=task_id,
execution_date=execution_date,
form=form,
dag=dag, title=title)
@expose('/run')
@login_required
@wwwutils.action_logging
@wwwutils.notify_owner
def run(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
origin = request.args.get('origin')
dag = dagbag.get_dag(dag_id)
task = dag.get_task(task_id)
execution_date = request.args.get('execution_date')
execution_date = dateutil.parser.parse(execution_date)
ignore_all_deps = request.args.get('ignore_all_deps') == "true"
ignore_task_deps = request.args.get('ignore_task_deps') == "true"
ignore_ti_state = request.args.get('ignore_ti_state') == "true"
try:
from airflow.executors import GetDefaultExecutor
from airflow.executors.celery_executor import CeleryExecutor
executor = GetDefaultExecutor()
if not isinstance(executor, CeleryExecutor):
flash("Only works with the CeleryExecutor, sorry", "error")
return redirect(origin)
except ImportError:
# in case CeleryExecutor cannot be imported it is not active either
flash("Only works with the CeleryExecutor, sorry", "error")
return redirect(origin)
ti = models.TaskInstance(task=task, execution_date=execution_date)
ti.refresh_from_db()
# Make sure the task instance can be queued
dep_context = DepContext(
deps=QUEUE_DEPS,
ignore_all_deps=ignore_all_deps,
ignore_task_deps=ignore_task_deps,
ignore_ti_state=ignore_ti_state)
failed_deps = list(ti.get_failed_dep_statuses(dep_context=dep_context))
if failed_deps:
failed_deps_str = ", ".join(
["{}: {}".format(dep.dep_name, dep.reason) for dep in failed_deps])
flash("Could not queue task instance for execution, dependencies not met: "
"{}".format(failed_deps_str),
"error")
return redirect(origin)
executor.start()
executor.queue_task_instance(
ti,
ignore_all_deps=ignore_all_deps,
ignore_task_deps=ignore_task_deps,
ignore_ti_state=ignore_ti_state)
executor.heartbeat()
flash(
"Sent {} to the message queue, "
"it should start any moment now.".format(ti))
return redirect(origin)
@expose('/trigger')
@login_required
@wwwutils.action_logging
@wwwutils.notify_owner
def trigger(self):
dag_id = request.args.get('dag_id')
origin = request.args.get('origin') or "/admin/"
dag = dagbag.get_dag(dag_id)
if not dag:
flash("Cannot find dag {}".format(dag_id))
return redirect(origin)
execution_date = datetime.utcnow()
run_id = "manual__{0}".format(execution_date.isoformat())
dr = DagRun.find(dag_id=dag_id, run_id=run_id)
if dr:
flash("This run_id {} already exists".format(run_id))
return redirect(origin)
run_conf = {}
dag.create_dagrun(
run_id=run_id,
execution_date=execution_date,
state=State.RUNNING,
conf=run_conf,
external_trigger=True
)
flash(
"Triggered {}, "
"it should start any moment now.".format(dag_id))
return redirect(origin)
def _clear_dag_tis(self, dag, start_date, end_date, origin,
recursive=False, confirmed=False):
if confirmed:
count = dag.clear(
start_date=start_date,
end_date=end_date,
include_subdags=recursive)
flash("{0} task instances have been cleared".format(count))
return redirect(origin)
tis = dag.clear(
start_date=start_date,
end_date=end_date,
include_subdags=recursive,
dry_run=True)
if not tis:
flash("No task instances to clear", 'error')
response = redirect(origin)
else:
details = "\n".join([str(t) for t in tis])
response = self.render(
'airflow/confirm.html',
message=("Here's the list of task instances you are about "
"to clear:"),
details=details)
return response
@expose('/clear')
@login_required
@wwwutils.action_logging
@wwwutils.notify_owner
def clear(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
origin = request.args.get('origin')
dag = dagbag.get_dag(dag_id)
execution_date = request.args.get('execution_date')
execution_date = dateutil.parser.parse(execution_date)
confirmed = request.args.get('confirmed') == "true"
upstream = request.args.get('upstream') == "true"
downstream = request.args.get('downstream') == "true"
future = request.args.get('future') == "true"
past = request.args.get('past') == "true"
recursive = request.args.get('recursive') == "true"
dag = dag.sub_dag(
task_regex=r"^{0}$".format(task_id),
include_downstream=downstream,
include_upstream=upstream)
end_date = execution_date if not future else None
start_date = execution_date if not past else None
return self._clear_dag_tis(dag, start_date, end_date, origin,
recursive=recursive, confirmed=confirmed)
@expose('/dagrun_clear')
@login_required
@wwwutils.action_logging
@wwwutils.notify_owner
def dagrun_clear(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
origin = request.args.get('origin')
execution_date = request.args.get('execution_date')
confirmed = request.args.get('confirmed') == "true"
dag = dagbag.get_dag(dag_id)
execution_date = dateutil.parser.parse(execution_date)
start_date = execution_date
end_date = execution_date
return self._clear_dag_tis(dag, start_date, end_date, origin,
recursive=True, confirmed=confirmed)
@expose('/blocked')
@login_required
def blocked(self):
session = settings.Session()
DR = models.DagRun
dags = (
session.query(DR.dag_id, sqla.func.count(DR.id))
.filter(DR.state == State.RUNNING)
.group_by(DR.dag_id)
.all()
)
payload = []
for dag_id, active_dag_runs in dags:
max_active_runs = 0
if dag_id in dagbag.dags:
max_active_runs = dagbag.dags[dag_id].max_active_runs
payload.append({
'dag_id': dag_id,
'active_dag_run': active_dag_runs,
'max_active_runs': max_active_runs,
})
return wwwutils.json_response(payload)
@expose('/dagrun_success')
@login_required
@wwwutils.action_logging
@wwwutils.notify_owner
def dagrun_success(self):
dag_id = request.args.get('dag_id')
execution_date = request.args.get('execution_date')
confirmed = request.args.get('confirmed') == 'true'
origin = request.args.get('origin')
if not execution_date:
flash('Invalid execution date', 'error')
return redirect(origin)
execution_date = dateutil.parser.parse(execution_date)
dag = dagbag.get_dag(dag_id)
if not dag:
flash('Cannot find DAG: {}'.format(dag_id), 'error')
return redirect(origin)
new_dag_state = set_dag_run_state(dag, execution_date, state=State.SUCCESS,
commit=confirmed)
if confirmed:
flash('Marked success on {} task instances'.format(len(new_dag_state)))
return redirect(origin)
else:
details = '\n'.join([str(t) for t in new_dag_state])
response = self.render('airflow/confirm.html',
message=("Here's the list of task instances you are "
"about to mark as successful:"),
details=details)
return response
@expose('/success')
@login_required
@wwwutils.action_logging
@wwwutils.notify_owner
def success(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
origin = request.args.get('origin')
dag = dagbag.get_dag(dag_id)
task = dag.get_task(task_id)
task.dag = dag
execution_date = request.args.get('execution_date')
execution_date = dateutil.parser.parse(execution_date)
confirmed = request.args.get('confirmed') == "true"
upstream = request.args.get('upstream') == "true"
downstream = request.args.get('downstream') == "true"
future = request.args.get('future') == "true"
past = request.args.get('past') == "true"
if not dag:
flash("Cannot find DAG: {}".format(dag_id))
return redirect(origin)
if not task:
flash("Cannot find task {} in DAG {}".format(task_id, dag.dag_id))
return redirect(origin)
from airflow.api.common.experimental.mark_tasks import set_state
if confirmed:
altered = set_state(task=task, execution_date=execution_date,
upstream=upstream, downstream=downstream,
future=future, past=past, state=State.SUCCESS,
commit=True)
flash("Marked success on {} task instances".format(len(altered)))
return redirect(origin)
to_be_altered = set_state(task=task, execution_date=execution_date,
upstream=upstream, downstream=downstream,
future=future, past=past, state=State.SUCCESS,
commit=False)
details = "\n".join([str(t) for t in to_be_altered])
response = self.render("airflow/confirm.html",
message=("Here's the list of task instances you are "
"about to mark as successful:"),
details=details)
return response
@expose('/tree')
@login_required
@wwwutils.gzipped
@wwwutils.action_logging
def tree(self):
dag_id = request.args.get('dag_id')
blur = conf.getboolean('webserver', 'demo_mode')
dag = dagbag.get_dag(dag_id)
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_downstream=False,
include_upstream=True)
session = settings.Session()
base_date = request.args.get('base_date')
num_runs = request.args.get('num_runs')
num_runs = int(num_runs) if num_runs else 25
if base_date:
base_date = dateutil.parser.parse(base_date)
else:
base_date = dag.latest_execution_date or datetime.utcnow()
dates = dag.date_range(base_date, num=-abs(num_runs))
min_date = dates[0] if dates else datetime(2000, 1, 1)
DR = models.DagRun
dag_runs = (
session.query(DR)
.filter(
DR.dag_id == dag.dag_id,
DR.execution_date <= base_date,
DR.execution_date >= min_date)
.all()
)
dag_runs = {
dr.execution_date: alchemy_to_dict(dr) for dr in dag_runs}
dates = sorted(list(dag_runs.keys()))
max_date = max(dates) if dates else None
tis = dag.get_task_instances(
session, start_date=min_date, end_date=base_date)
task_instances = {}
for ti in tis:
tid = alchemy_to_dict(ti)
dr = dag_runs.get(ti.execution_date)
tid['external_trigger'] = dr['external_trigger'] if dr else False
task_instances[(ti.task_id, ti.execution_date)] = tid
expanded = []
# The default recursion traces every path so that tree view has full
# expand/collapse functionality. After 5,000 nodes we stop and fall
# back on a quick DFS search for performance. See PR #320.
node_count = [0]
node_limit = 5000 / max(1, len(dag.roots))
def recurse_nodes(task, visited):
visited.add(task)
node_count[0] += 1
children = [
recurse_nodes(t, visited) for t in task.upstream_list
if node_count[0] < node_limit or t not in visited]
# D3 tree uses children vs _children to define what is
# expanded or not. The following block makes it such that
# repeated nodes are collapsed by default.
children_key = 'children'
if task.task_id not in expanded:
expanded.append(task.task_id)
elif children:
children_key = "_children"
def set_duration(tid):
if (isinstance(tid, dict) and tid.get("state") == State.RUNNING and
tid["start_date"] is not None):
d = datetime.utcnow() - dateutil.parser.parse(tid["start_date"])
tid["duration"] = d.total_seconds()
return tid
return {
'name': task.task_id,
'instances': [
set_duration(task_instances.get((task.task_id, d))) or {
'execution_date': d.isoformat(),
'task_id': task.task_id
}
for d in dates],
children_key: children,
'num_dep': len(task.upstream_list),
'operator': task.task_type,
'retries': task.retries,
'owner': task.owner,
'start_date': task.start_date,
'end_date': task.end_date,
'depends_on_past': task.depends_on_past,
'ui_color': task.ui_color,
}
data = {
'name': '[DAG]',
'children': [recurse_nodes(t, set()) for t in dag.roots],
'instances': [
dag_runs.get(d) or {'execution_date': d.isoformat()}
for d in dates],
}
data = json.dumps(data, indent=4, default=json_ser)
session.commit()
session.close()
form = DateTimeWithNumRunsForm(data={'base_date': max_date,
'num_runs': num_runs})
return self.render(
'airflow/tree.html',
operators=sorted(
list(set([op.__class__ for op in dag.tasks])),
key=lambda x: x.__name__
),
root=root,
form=form,
dag=dag, data=data, blur=blur)
@expose('/graph')
@login_required
@wwwutils.gzipped
@wwwutils.action_logging
def graph(self):
session = settings.Session()
dag_id = request.args.get('dag_id')
blur = conf.getboolean('webserver', 'demo_mode')
dag = dagbag.get_dag(dag_id)
if dag_id not in dagbag.dags:
flash('DAG "{0}" seems to be missing.'.format(dag_id), "error")
return redirect('/admin/')
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
arrange = request.args.get('arrange', dag.orientation)
nodes = []
edges = []
for task in dag.tasks:
nodes.append({
'id': task.task_id,
'value': {
'label': task.task_id,
'labelStyle': "fill:{0};".format(task.ui_fgcolor),
'style': "fill:{0};".format(task.ui_color),
}
})
def get_upstream(task):
for t in task.upstream_list:
edge = {
'u': t.task_id,
'v': task.task_id,
}
if edge not in edges:
edges.append(edge)
get_upstream(t)
for t in dag.roots:
get_upstream(t)
dttm = request.args.get('execution_date')
if dttm:
dttm = dateutil.parser.parse(dttm)
else:
dttm = dag.latest_execution_date or datetime.utcnow().date()
DR = models.DagRun
drs = (
session.query(DR)
.filter_by(dag_id=dag_id)
.order_by(desc(DR.execution_date)).all()
)
dr_choices = []
dr_state = None
for dr in drs:
dr_choices.append((dr.execution_date.isoformat(), dr.run_id))
if dttm == dr.execution_date:
dr_state = dr.state
class GraphForm(Form):
execution_date = SelectField("DAG run", choices=dr_choices)
arrange = SelectField("Layout", choices=(
('LR', "Left->Right"),
('RL', "Right->Left"),
('TB', "Top->Bottom"),
('BT', "Bottom->Top"),
))
form = GraphForm(
data={'execution_date': dttm.isoformat(), 'arrange': arrange})
task_instances = {
ti.task_id: alchemy_to_dict(ti)
for ti in dag.get_task_instances(session, dttm, dttm)}
tasks = {
t.task_id: {
'dag_id': t.dag_id,
'task_type': t.task_type,
}
for t in dag.tasks}
if not tasks:
flash("No tasks found", "error")
session.commit()
session.close()
doc_md = markdown.markdown(dag.doc_md) if hasattr(dag, 'doc_md') and dag.doc_md else ''
return self.render(
'airflow/graph.html',
dag=dag,
form=form,
width=request.args.get('width', "100%"),
height=request.args.get('height', "800"),
execution_date=dttm.isoformat(),
state_token=state_token(dr_state),
doc_md=doc_md,
arrange=arrange,
operators=sorted(
list(set([op.__class__ for op in dag.tasks])),
key=lambda x: x.__name__
),
blur=blur,
root=root or '',
task_instances=json.dumps(task_instances, indent=2),
tasks=json.dumps(tasks, indent=2),
nodes=json.dumps(nodes, indent=2),
edges=json.dumps(edges, indent=2), )
@expose('/duration')
@login_required
@wwwutils.action_logging
def duration(self):
session = settings.Session()
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
base_date = request.args.get('base_date')
num_runs = request.args.get('num_runs')
num_runs = int(num_runs) if num_runs else 25
if base_date:
base_date = dateutil.parser.parse(base_date)
else:
base_date = dag.latest_execution_date or datetime.utcnow()
dates = dag.date_range(base_date, num=-abs(num_runs))
min_date = dates[0] if dates else datetime(2000, 1, 1)
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
chart_height = get_chart_height(dag)
chart = nvd3.lineChart(
name="lineChart", x_is_date=True, height=chart_height, width="1200")
cum_chart = nvd3.lineChart(
name="cumLineChart", x_is_date=True, height=chart_height, width="1200")
y = defaultdict(list)
x = defaultdict(list)
cum_y = defaultdict(list)
tis = dag.get_task_instances(
session, start_date=min_date, end_date=base_date)
TF = models.TaskFail
ti_fails = (
session
.query(TF)
.filter(
TF.dag_id == dag.dag_id,
TF.execution_date >= min_date,
TF.execution_date <= base_date,
TF.task_id.in_([t.task_id for t in dag.tasks]))
.all()
)
fails_totals = defaultdict(int)
for tf in ti_fails:
dict_key = (tf.dag_id, tf.task_id, tf.execution_date)
fails_totals[dict_key] += tf.duration
for ti in tis:
if ti.duration:
dttm = wwwutils.epoch(ti.execution_date)
x[ti.task_id].append(dttm)
y[ti.task_id].append(float(ti.duration))
fails_dict_key = (ti.dag_id, ti.task_id, ti.execution_date)
fails_total = fails_totals[fails_dict_key]
cum_y[ti.task_id].append(float(ti.duration + fails_total))
# determine the most relevant time unit for the set of task instance
# durations for the DAG
y_unit = infer_time_unit([d for t in y.values() for d in t])
cum_y_unit = infer_time_unit([d for t in cum_y.values() for d in t])
# update the y Axis on both charts to have the correct time units
chart.create_y_axis('yAxis', format='.02f', custom_format=False,
label='Duration ({})'.format(y_unit))
chart.axislist['yAxis']['axisLabelDistance'] = '40'
cum_chart.create_y_axis('yAxis', format='.02f', custom_format=False,
label='Duration ({})'.format(cum_y_unit))
cum_chart.axislist['yAxis']['axisLabelDistance'] = '40'
for task in dag.tasks:
if x[task.task_id]:
chart.add_serie(name=task.task_id, x=x[task.task_id],
y=scale_time_units(y[task.task_id], y_unit))
cum_chart.add_serie(name=task.task_id, x=x[task.task_id],
y=scale_time_units(cum_y[task.task_id],
cum_y_unit))
dates = sorted(list({ti.execution_date for ti in tis}))
max_date = max([ti.execution_date for ti in tis]) if dates else None
session.commit()
session.close()
form = DateTimeWithNumRunsForm(data={'base_date': max_date,
'num_runs': num_runs})
chart.buildcontent()
cum_chart.buildcontent()
s_index = cum_chart.htmlcontent.rfind('});')
cum_chart.htmlcontent = (cum_chart.htmlcontent[:s_index] +
"$(function() {$( document ).trigger('chartload') })" +
cum_chart.htmlcontent[s_index:])
return self.render(
'airflow/duration_chart.html',
dag=dag,
demo_mode=conf.getboolean('webserver', 'demo_mode'),
root=root,
form=form,
chart=chart.htmlcontent,
cum_chart=cum_chart.htmlcontent
)
@expose('/tries')
@login_required
@wwwutils.action_logging
def tries(self):
session = settings.Session()
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
base_date = request.args.get('base_date')
num_runs = request.args.get('num_runs')
num_runs = int(num_runs) if num_runs else 25
if base_date:
base_date = dateutil.parser.parse(base_date)
else:
base_date = dag.latest_execution_date or datetime.utcnow()
dates = dag.date_range(base_date, num=-abs(num_runs))
min_date = dates[0] if dates else datetime(2000, 1, 1)
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
chart_height = get_chart_height(dag)
chart = nvd3.lineChart(
name="lineChart", x_is_date=True, y_axis_format='d', height=chart_height,
width="1200")
for task in dag.tasks:
y = []
x = []
for ti in task.get_task_instances(session, start_date=min_date,
end_date=base_date):
dttm = wwwutils.epoch(ti.execution_date)
x.append(dttm)
y.append(ti.try_number)
if x:
chart.add_serie(name=task.task_id, x=x, y=y)
tis = dag.get_task_instances(
session, start_date=min_date, end_date=base_date)
tries = sorted(list({ti.try_number for ti in tis}))
max_date = max([ti.execution_date for ti in tis]) if tries else None
session.commit()
session.close()
form = DateTimeWithNumRunsForm(data={'base_date': max_date,
'num_runs': num_runs})
chart.buildcontent()
return self.render(
'airflow/chart.html',
dag=dag,
demo_mode=conf.getboolean('webserver', 'demo_mode'),
root=root,
form=form,
chart=chart.htmlcontent
)
@expose('/landing_times')
@login_required
@wwwutils.action_logging
def landing_times(self):
session = settings.Session()
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
base_date = request.args.get('base_date')
num_runs = request.args.get('num_runs')
num_runs = int(num_runs) if num_runs else 25
if base_date:
base_date = dateutil.parser.parse(base_date)
else:
base_date = dag.latest_execution_date or datetime.utcnow()
dates = dag.date_range(base_date, num=-abs(num_runs))
min_date = dates[0] if dates else datetime(2000, 1, 1)
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
chart_height = get_chart_height(dag)
chart = nvd3.lineChart(
name="lineChart", x_is_date=True, height=chart_height, width="1200")
y = {}
x = {}
for task in dag.tasks:
y[task.task_id] = []
x[task.task_id] = []
for ti in task.get_task_instances(session, start_date=min_date,
end_date=base_date):
ts = ti.execution_date
if dag.schedule_interval and dag.following_schedule(ts):
ts = dag.following_schedule(ts)
if ti.end_date:
dttm = wwwutils.epoch(ti.execution_date)
secs = (ti.end_date - ts).total_seconds()
x[ti.task_id].append(dttm)
y[ti.task_id].append(secs)
# determine the most relevant time unit for the set of landing times
# for the DAG
y_unit = infer_time_unit([d for t in y.values() for d in t])
# update the y Axis to have the correct time units
chart.create_y_axis('yAxis', format='.02f', custom_format=False,
label='Landing Time ({})'.format(y_unit))
chart.axislist['yAxis']['axisLabelDistance'] = '40'
for task in dag.tasks:
if x[task.task_id]:
chart.add_serie(name=task.task_id, x=x[task.task_id],
y=scale_time_units(y[task.task_id], y_unit))
tis = dag.get_task_instances(
session, start_date=min_date, end_date=base_date)
dates = sorted(list({ti.execution_date for ti in tis}))
max_date = max([ti.execution_date for ti in tis]) if dates else None
session.commit()
session.close()
form = DateTimeWithNumRunsForm(data={'base_date': max_date,
'num_runs': num_runs})
chart.buildcontent()
return self.render(
'airflow/chart.html',
dag=dag,
chart=chart.htmlcontent,
height=str(chart_height + 100) + "px",
demo_mode=conf.getboolean('webserver', 'demo_mode'),
root=root,
form=form,
)
@expose('/paused', methods=['POST'])
@login_required
@wwwutils.action_logging
def paused(self):
DagModel = models.DagModel
dag_id = request.args.get('dag_id')
session = settings.Session()
orm_dag = session.query(
DagModel).filter(DagModel.dag_id == dag_id).first()
if request.args.get('is_paused') == 'false':
orm_dag.is_paused = True
else:
orm_dag.is_paused = False
session.merge(orm_dag)
session.commit()
session.close()
dagbag.get_dag(dag_id)
return "OK"
@expose('/refresh')
@login_required
@wwwutils.action_logging
def refresh(self):
DagModel = models.DagModel
dag_id = request.args.get('dag_id')
session = settings.Session()
orm_dag = session.query(
DagModel).filter(DagModel.dag_id == dag_id).first()
if orm_dag:
orm_dag.last_expired = datetime.utcnow()
session.merge(orm_dag)
session.commit()
session.close()
dagbag.get_dag(dag_id)
flash("DAG [{}] is now fresh as a daisy".format(dag_id))
return redirect(request.referrer)
@expose('/refresh_all')
@login_required
@wwwutils.action_logging
def refresh_all(self):
dagbag.collect_dags(only_if_updated=False)
flash("All DAGs are now up to date")
return redirect('/')
@expose('/gantt')
@login_required
@wwwutils.action_logging
def gantt(self):
session = settings.Session()
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
demo_mode = conf.getboolean('webserver', 'demo_mode')
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
dttm = request.args.get('execution_date')
if dttm:
dttm = dateutil.parser.parse(dttm)
else:
dttm = dag.latest_execution_date or datetime.utcnow().date()
form = DateTimeForm(data={'execution_date': dttm})
tis = [
ti for ti in dag.get_task_instances(session, dttm, dttm)
if ti.start_date]
tis = sorted(tis, key=lambda ti: ti.start_date)
tasks = []
for ti in tis:
end_date = ti.end_date if ti.end_date else datetime.utcnow()
tasks.append({
'startDate': wwwutils.epoch(ti.start_date),
'endDate': wwwutils.epoch(end_date),
'isoStart': ti.start_date.isoformat()[:-4],
'isoEnd': end_date.isoformat()[:-4],
'taskName': ti.task_id,
'duration': "{}".format(end_date - ti.start_date)[:-4],
'status': ti.state,
'executionDate': ti.execution_date.isoformat(),
})
states = {ti.state: ti.state for ti in tis}
data = {
'taskNames': [ti.task_id for ti in tis],
'tasks': tasks,
'taskStatus': states,
'height': len(tis) * 25 + 25,
}
session.commit()
session.close()
return self.render(
'airflow/gantt.html',
dag=dag,
execution_date=dttm.isoformat(),
form=form,
data=json.dumps(data, indent=2),
base_date='',
demo_mode=demo_mode,
root=root,
)
@expose('/object/task_instances')
@login_required
@wwwutils.action_logging
def task_instances(self):
session = settings.Session()
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
dttm = request.args.get('execution_date')
if dttm:
dttm = dateutil.parser.parse(dttm)
else:
return ("Error: Invalid execution_date")
task_instances = {
ti.task_id: alchemy_to_dict(ti)
for ti in dag.get_task_instances(session, dttm, dttm)}
return json.dumps(task_instances)
@expose('/variables/<form>', methods=["GET", "POST"])
@login_required
@wwwutils.action_logging
def variables(self, form):
try:
if request.method == 'POST':
data = request.json
if data:
session = settings.Session()
var = models.Variable(key=form, val=json.dumps(data))
session.add(var)
session.commit()
return ""
else:
return self.render(
'airflow/variables/{}.html'.format(form)
)
except:
# prevent XSS
form = escape(form)
return ("Error: form airflow/variables/{}.html "
"not found.").format(form), 404
@expose('/varimport', methods=["GET", "POST"])
@login_required
@wwwutils.action_logging
def varimport(self):
try:
out = str(request.files['file'].read())
d = json.loads(out)
except Exception:
flash("Missing file or syntax error.")
else:
for k, v in d.items():
models.Variable.set(k, v, serialize_json=isinstance(v, dict))
flash("{} variable(s) successfully updated.".format(len(d)))
return redirect('/admin/variable')
class HomeView(AdminIndexView):
@expose("/")
@login_required
def index(self):
session = Session()
DM = models.DagModel
# restrict the dags shown if filter_by_owner and current user is not superuser
do_filter = FILTER_BY_OWNER and (not current_user.is_superuser())
owner_mode = conf.get('webserver', 'OWNER_MODE').strip().lower()
hide_paused_dags_by_default = conf.getboolean('webserver',
'hide_paused_dags_by_default')
show_paused_arg = request.args.get('showPaused', 'None')
def get_int_arg(value, default=0):
try:
return int(value)
except ValueError:
return default
arg_current_page = request.args.get('page', '0')
arg_search_query = request.args.get('search', None)
dags_per_page = PAGE_SIZE
current_page = get_int_arg(arg_current_page, default=0)
if show_paused_arg.strip().lower() == 'false':
hide_paused = True
elif show_paused_arg.strip().lower() == 'true':
hide_paused = False
else:
hide_paused = hide_paused_dags_by_default
# read orm_dags from the db
sql_query = session.query(DM)
if do_filter and owner_mode == 'ldapgroup':
sql_query = sql_query.filter(
~DM.is_subdag,
DM.is_active,
DM.owners.in_(current_user.ldap_groups)
)
elif do_filter and owner_mode == 'user':
sql_query = sql_query.filter(
~DM.is_subdag, DM.is_active,
DM.owners == current_user.user.username
)
else:
sql_query = sql_query.filter(
~DM.is_subdag, DM.is_active
)
# optionally filter out "paused" dags
if hide_paused:
sql_query = sql_query.filter(~DM.is_paused)
orm_dags = {dag.dag_id: dag for dag
in sql_query
.all()}
import_errors = session.query(models.ImportError).all()
for ie in import_errors:
flash(
"Broken DAG: [{ie.filename}] {ie.stacktrace}".format(ie=ie),
"error")
session.expunge_all()
session.commit()
session.close()
# get a list of all non-subdag dags visible to everyone
# optionally filter out "paused" dags
if hide_paused:
unfiltered_webserver_dags = [dag for dag in dagbag.dags.values() if
not dag.parent_dag and not dag.is_paused]
else:
unfiltered_webserver_dags = [dag for dag in dagbag.dags.values() if
not dag.parent_dag]
# optionally filter to get only dags that the user should see
if do_filter and owner_mode == 'ldapgroup':
# only show dags owned by someone in @current_user.ldap_groups
webserver_dags = {
dag.dag_id: dag
for dag in unfiltered_webserver_dags
if dag.owner in current_user.ldap_groups
}
elif do_filter and owner_mode == 'user':
# only show dags owned by @current_user.user.username
webserver_dags = {
dag.dag_id: dag
for dag in unfiltered_webserver_dags
if dag.owner == current_user.user.username
}
else:
webserver_dags = {
dag.dag_id: dag
for dag in unfiltered_webserver_dags
}
if arg_search_query:
lower_search_query = arg_search_query.lower()
# filter by dag_id
webserver_dags_filtered = {
dag_id: dag
for dag_id, dag in webserver_dags.items()
if (lower_search_query in dag_id.lower() or
lower_search_query in dag.owner.lower())
}
all_dag_ids = (set([dag.dag_id for dag in orm_dags.values()
if lower_search_query in dag.dag_id.lower() or
lower_search_query in dag.owners.lower()]) |
set(webserver_dags_filtered.keys()))
sorted_dag_ids = sorted(all_dag_ids)
else:
webserver_dags_filtered = webserver_dags
sorted_dag_ids = sorted(set(orm_dags.keys()) | set(webserver_dags.keys()))
start = current_page * dags_per_page
end = start + dags_per_page
num_of_all_dags = len(sorted_dag_ids)
page_dag_ids = sorted_dag_ids[start:end]
num_of_pages = int(math.ceil(num_of_all_dags / float(dags_per_page)))
auto_complete_data = set()
for dag in webserver_dags_filtered.values():
auto_complete_data.add(dag.dag_id)
auto_complete_data.add(dag.owner)
for dag in orm_dags.values():
auto_complete_data.add(dag.dag_id)
auto_complete_data.add(dag.owners)
return self.render(
'airflow/dags.html',
webserver_dags=webserver_dags_filtered,
orm_dags=orm_dags,
hide_paused=hide_paused,
current_page=current_page,
search_query=arg_search_query if arg_search_query else '',
page_size=dags_per_page,
num_of_pages=num_of_pages,
num_dag_from=start + 1,
num_dag_to=min(end, num_of_all_dags),
num_of_all_dags=num_of_all_dags,
paging=wwwutils.generate_pages(current_page, num_of_pages,
search=arg_search_query,
showPaused=not hide_paused),
dag_ids_in_page=page_dag_ids,
auto_complete_data=auto_complete_data)
class QueryView(wwwutils.DataProfilingMixin, BaseView):
@expose('/', methods=['POST', 'GET'])
@wwwutils.gzipped
def query(self):
session = settings.Session()
dbs = session.query(models.Connection).order_by(
models.Connection.conn_id).all()
session.expunge_all()
db_choices = list(
((db.conn_id, db.conn_id) for db in dbs if db.get_hook()))
conn_id_str = request.form.get('conn_id')
csv = request.form.get('csv') == "true"
sql = request.form.get('sql')
class QueryForm(Form):
conn_id = SelectField("Layout", choices=db_choices)
sql = TextAreaField("SQL", widget=wwwutils.AceEditorWidget())
data = {
'conn_id': conn_id_str,
'sql': sql,
}
results = None
has_data = False
error = False
if conn_id_str:
db = [db for db in dbs if db.conn_id == conn_id_str][0]
hook = db.get_hook()
try:
df = hook.get_pandas_df(wwwutils.limit_sql(sql, QUERY_LIMIT, conn_type=db.conn_type))
# df = hook.get_pandas_df(sql)
has_data = len(df) > 0
df = df.fillna('')
results = df.to_html(
classes=[
'table', 'table-bordered', 'table-striped', 'no-wrap'],
index=False,
na_rep='',
) if has_data else ''
except Exception as e:
flash(str(e), 'error')
error = True
if has_data and len(df) == QUERY_LIMIT:
flash(
"Query output truncated at " + str(QUERY_LIMIT) +
" rows", 'info')
if not has_data and error:
flash('No data', 'error')
if csv:
return Response(
response=df.to_csv(index=False),
status=200,
mimetype="application/text")
form = QueryForm(request.form, data=data)
session.commit()
session.close()
return self.render(
'airflow/query.html', form=form,
title="Ad Hoc Query",
results=results or '',
has_data=has_data)
class AirflowModelView(ModelView):
list_template = 'airflow/model_list.html'
edit_template = 'airflow/model_edit.html'
create_template = 'airflow/model_create.html'
column_display_actions = True
page_size = PAGE_SIZE
class ModelViewOnly(wwwutils.LoginMixin, AirflowModelView):
"""
Modifying the base ModelView class for non edit, browse only operations
"""
named_filter_urls = True
can_create = False
can_edit = False
can_delete = False
column_display_pk = True
class PoolModelView(wwwutils.SuperUserMixin, AirflowModelView):
column_list = ('pool', 'slots', 'used_slots', 'queued_slots')
column_formatters = dict(
pool=pool_link, used_slots=fused_slots, queued_slots=fqueued_slots)
named_filter_urls = True
form_args = {
'pool': {
'validators': [
validators.DataRequired(),
]
}
}
class SlaMissModelView(wwwutils.SuperUserMixin, ModelViewOnly):
verbose_name_plural = "SLA misses"
verbose_name = "SLA miss"
column_list = (
'dag_id', 'task_id', 'execution_date', 'email_sent', 'timestamp')
column_formatters = dict(
task_id=task_instance_link,
execution_date=datetime_f,
timestamp=datetime_f,
dag_id=dag_link)
named_filter_urls = True
column_searchable_list = ('dag_id', 'task_id',)
column_filters = (
'dag_id', 'task_id', 'email_sent', 'timestamp', 'execution_date')
form_widget_args = {
'email_sent': {'disabled': True},
'timestamp': {'disabled': True},
}
class ChartModelView(wwwutils.DataProfilingMixin, AirflowModelView):
verbose_name = "chart"
verbose_name_plural = "charts"
form_columns = (
'label',
'owner',
'conn_id',
'chart_type',
'show_datatable',
'x_is_date',
'y_log_scale',
'show_sql',
'height',
'sql_layout',
'sql',
'default_params',
)
column_list = (
'label',
'conn_id',
'chart_type',
'owner',
'last_modified',
)
column_sortable_list = (
'label',
'conn_id',
'chart_type',
('owner', 'owner.username'),
'last_modified',
)
column_formatters = dict(label=label_link, last_modified=datetime_f)
column_default_sort = ('last_modified', True)
create_template = 'airflow/chart/create.html'
edit_template = 'airflow/chart/edit.html'
column_filters = ('label', 'owner.username', 'conn_id')
column_searchable_list = ('owner.username', 'label', 'sql')
column_descriptions = {
'label': "Can include {{ templated_fields }} and {{ macros }}",
'chart_type': "The type of chart to be displayed",
'sql': "Can include {{ templated_fields }} and {{ macros }}.",
'height': "Height of the chart, in pixels.",
'conn_id': "Source database to run the query against",
'x_is_date': (
"Whether the X axis should be casted as a date field. Expect most "
"intelligible date formats to get casted properly."
),
'owner': (
"The chart's owner, mostly used for reference and filtering in "
"the list view."
),
'show_datatable':
"Whether to display an interactive data table under the chart.",
'default_params': (
'A dictionary of {"key": "values",} that define what the '
'templated fields (parameters) values should be by default. '
'To be valid, it needs to "eval" as a Python dict. '
'The key values will show up in the url\'s querystring '
'and can be altered there.'
),
'show_sql': "Whether to display the SQL statement as a collapsible "
"section in the chart page.",
'y_log_scale': "Whether to use a log scale for the Y axis.",
'sql_layout': (
"Defines the layout of the SQL that the application should "
"expect. Depending on the tables you are sourcing from, it may "
"make more sense to pivot / unpivot the metrics."
),
}
column_labels = {
'sql': "SQL",
'height': "Chart Height",
'sql_layout': "SQL Layout",
'show_sql': "Display the SQL Statement",
'default_params': "Default Parameters",
}
form_choices = {
'chart_type': [
('line', 'Line Chart'),
('spline', 'Spline Chart'),
('bar', 'Bar Chart'),
('column', 'Column Chart'),
('area', 'Overlapping Area Chart'),
('stacked_area', 'Stacked Area Chart'),
('percent_area', 'Percent Area Chart'),
('datatable', 'No chart, data table only'),
],
'sql_layout': [
('series', 'SELECT series, x, y FROM ...'),
('columns', 'SELECT x, y (series 1), y (series 2), ... FROM ...'),
],
'conn_id': [
(c.conn_id, c.conn_id)
for c in (
Session().query(models.Connection.conn_id)
.group_by(models.Connection.conn_id)
)
]
}
def on_model_change(self, form, model, is_created=True):
if model.iteration_no is None:
model.iteration_no = 0
else:
model.iteration_no += 1
if not model.user_id and current_user and hasattr(current_user, 'id'):
model.user_id = current_user.id
model.last_modified = datetime.utcnow()
chart_mapping = (
('line', 'lineChart'),
('spline', 'lineChart'),
('bar', 'multiBarChart'),
('column', 'multiBarChart'),
('area', 'stackedAreaChart'),
('stacked_area', 'stackedAreaChart'),
('percent_area', 'stackedAreaChart'),
('datatable', 'datatable'),
)
chart_mapping = dict(chart_mapping)
class KnownEventView(wwwutils.DataProfilingMixin, AirflowModelView):
verbose_name = "known event"
verbose_name_plural = "known events"
form_columns = (
'label',
'event_type',
'start_date',
'end_date',
'reported_by',
'description',
)
form_args = {
'label': {
'validators': [
validators.DataRequired(),
],
},
'event_type': {
'validators': [
validators.DataRequired(),
],
},
'start_date': {
'validators': [
validators.DataRequired(),
],
},
'end_date': {
'validators': [
validators.DataRequired(),
GreaterEqualThan(fieldname='start_date'),
],
},
'reported_by': {
'validators': [
validators.DataRequired(),
],
}
}
column_list = (
'label',
'event_type',
'start_date',
'end_date',
'reported_by',
)
column_default_sort = ("start_date", True)
column_sortable_list = (
'label',
('event_type', 'event_type.know_event_type'),
'start_date',
'end_date',
('reported_by', 'reported_by.username'),
)
class KnownEventTypeView(wwwutils.DataProfilingMixin, AirflowModelView):
pass
# NOTE: For debugging / troubleshooting
# mv = KnowEventTypeView(
# models.KnownEventType,
# Session, name="Known Event Types", category="Manage")
# admin.add_view(mv)
# class DagPickleView(SuperUserMixin, ModelView):
# pass
# mv = DagPickleView(
# models.DagPickle,
# Session, name="Pickles", category="Manage")
# admin.add_view(mv)
class VariableView(wwwutils.DataProfilingMixin, AirflowModelView):
verbose_name = "Variable"
verbose_name_plural = "Variables"
list_template = 'airflow/variable_list.html'
def hidden_field_formatter(view, context, model, name):
if wwwutils.should_hide_value_for_key(model.key):
return Markup('*' * 8)
try:
return getattr(model, name)
except AirflowException:
return Markup('<span class="label label-danger">Invalid</span>')
form_columns = (
'key',
'val',
)
column_list = ('key', 'val', 'is_encrypted',)
column_filters = ('key', 'val')
column_searchable_list = ('key', 'val')
column_default_sort = ('key', False)
form_widget_args = {
'is_encrypted': {'disabled': True},
'val': {
'rows': 20,
}
}
form_args = {
'key': {
'validators': {
validators.DataRequired(),
},
},
}
column_sortable_list = (
'key',
'val',
'is_encrypted',
)
column_formatters = {
'val': hidden_field_formatter,
}
# Default flask-admin export functionality doesn't handle serialized json
@action('varexport', 'Export', None)
def action_varexport(self, ids):
V = models.Variable
session = settings.Session()
qry = session.query(V).filter(V.id.in_(ids)).all()
session.close()
var_dict = {}
d = json.JSONDecoder()
for var in qry:
val = None
try:
val = d.decode(var.val)
except:
val = var.val
var_dict[var.key] = val
response = make_response(json.dumps(var_dict, sort_keys=True, indent=4))
response.headers["Content-Disposition"] = "attachment; filename=variables.json"
return response
def on_form_prefill(self, form, id):
if wwwutils.should_hide_value_for_key(form.key.data):
form.val.data = '*' * 8
class XComView(wwwutils.SuperUserMixin, AirflowModelView):
verbose_name = "XCom"
verbose_name_plural = "XComs"
form_columns = (
'key',
'value',
'execution_date',
'task_id',
'dag_id',
)
form_extra_fields = {
'value': StringField('Value'),
}
column_filters = ('key', 'timestamp', 'execution_date', 'task_id', 'dag_id')
column_searchable_list = ('key', 'timestamp', 'execution_date', 'task_id', 'dag_id')
class JobModelView(ModelViewOnly):
verbose_name_plural = "jobs"
verbose_name = "job"
column_display_actions = False
column_default_sort = ('start_date', True)
column_filters = (
'job_type', 'dag_id', 'state',
'unixname', 'hostname', 'start_date', 'end_date', 'latest_heartbeat')
column_formatters = dict(
start_date=datetime_f,
end_date=datetime_f,
hostname=nobr_f,
state=state_f,
latest_heartbeat=datetime_f)
class DagRunModelView(ModelViewOnly):
verbose_name_plural = "DAG Runs"
can_edit = True
can_create = True
column_editable_list = ('state',)
verbose_name = "dag run"
column_default_sort = ('execution_date', True)
form_choices = {
'state': [
('success', 'success'),
('running', 'running'),
('failed', 'failed'),
],
}
form_args = dict(
dag_id=dict(validators=[validators.DataRequired()])
)
column_list = (
'state', 'dag_id', 'execution_date', 'run_id', 'external_trigger')
column_filters = column_list
column_searchable_list = ('dag_id', 'state', 'run_id')
column_formatters = dict(
execution_date=datetime_f,
state=state_f,
start_date=datetime_f,
dag_id=dag_link)
@action('new_delete', "Delete", "Are you sure you want to delete selected records?")
def action_new_delete(self, ids):
session = settings.Session()
deleted = set(session.query(models.DagRun)
.filter(models.DagRun.id.in_(ids))
.all())
session.query(models.DagRun) \
.filter(models.DagRun.id.in_(ids)) \
.delete(synchronize_session='fetch')
session.commit()
dirty_ids = []
for row in deleted:
dirty_ids.append(row.dag_id)
models.DagStat.update(dirty_ids, dirty_only=False, session=session)
session.close()
@action('set_running', "Set state to 'running'", None)
def action_set_running(self, ids):
self.set_dagrun_state(ids, State.RUNNING)
@action('set_failed', "Set state to 'failed'", None)
def action_set_failed(self, ids):
self.set_dagrun_state(ids, State.FAILED)
@action('set_success', "Set state to 'success'", None)
def action_set_success(self, ids):
self.set_dagrun_state(ids, State.SUCCESS)
@provide_session
def set_dagrun_state(self, ids, target_state, session=None):
try:
DR = models.DagRun
count = 0
dirty_ids = []
for dr in session.query(DR).filter(DR.id.in_(ids)).all():
dirty_ids.append(dr.dag_id)
count += 1
dr.state = target_state
if target_state == State.RUNNING:
dr.start_date = datetime.utcnow()
else:
dr.end_date = datetime.utcnow()
session.commit()
models.DagStat.update(dirty_ids, session=session)
flash(
"{count} dag runs were set to '{target_state}'".format(**locals()))
except Exception as ex:
if not self.handle_view_exception(ex):
raise Exception("Ooops")
flash('Failed to set state', 'error')
class LogModelView(ModelViewOnly):
verbose_name_plural = "logs"
verbose_name = "log"
column_display_actions = False
column_default_sort = ('dttm', True)
column_filters = ('dag_id', 'task_id', 'execution_date')
column_formatters = dict(
dttm=datetime_f, execution_date=datetime_f, dag_id=dag_link)
class TaskInstanceModelView(ModelViewOnly):
verbose_name_plural = "task instances"
verbose_name = "task instance"
column_filters = (
'state', 'dag_id', 'task_id', 'execution_date', 'hostname',
'queue', 'pool', 'operator', 'start_date', 'end_date')
named_filter_urls = True
column_formatters = dict(
log_url=log_url_formatter,
task_id=task_instance_link,
hostname=nobr_f,
state=state_f,
execution_date=datetime_f,
start_date=datetime_f,
end_date=datetime_f,
queued_dttm=datetime_f,
dag_id=dag_link, duration=duration_f)
column_searchable_list = ('dag_id', 'task_id', 'state')
column_default_sort = ('job_id', True)
form_choices = {
'state': [
('success', 'success'),
('running', 'running'),
('failed', 'failed'),
],
}
column_list = (
'state', 'dag_id', 'task_id', 'execution_date', 'operator',
'start_date', 'end_date', 'duration', 'job_id', 'hostname',
'unixname', 'priority_weight', 'queue', 'queued_dttm', 'try_number',
'pool', 'log_url')
page_size = PAGE_SIZE
@action('set_running', "Set state to 'running'", None)
def action_set_running(self, ids):
self.set_task_instance_state(ids, State.RUNNING)
@action('set_failed', "Set state to 'failed'", None)
def action_set_failed(self, ids):
self.set_task_instance_state(ids, State.FAILED)
@action('set_success', "Set state to 'success'", None)
def action_set_success(self, ids):
self.set_task_instance_state(ids, State.SUCCESS)
@action('set_retry', "Set state to 'up_for_retry'", None)
def action_set_retry(self, ids):
self.set_task_instance_state(ids, State.UP_FOR_RETRY)
@provide_session
@action('clear',
lazy_gettext('Clear'),
lazy_gettext(
'Are you sure you want to clear the state of the selected task instance(s)'
' and set their dagruns to the running state?'))
def action_clear(self, ids, session=None):
try:
TI = models.TaskInstance
dag_to_tis = {}
for id in ids:
task_id, dag_id, execution_date = id.split(',')
ti = session.query(TI).filter(TI.task_id == task_id,
TI.dag_id == dag_id,
TI.execution_date == execution_date).one()
dag = dagbag.get_dag(dag_id)
tis = dag_to_tis.setdefault(dag, [])
tis.append(ti)
for dag, tis in dag_to_tis.items():
models.clear_task_instances(tis, session, dag=dag)
session.commit()
flash("{0} task instances have been cleared".format(len(ids)))
except Exception as ex:
if not self.handle_view_exception(ex):
raise Exception("Ooops")
flash('Failed to clear task instances', 'error')
@provide_session
def set_task_instance_state(self, ids, target_state, session=None):
try:
TI = models.TaskInstance
count = len(ids)
for id in ids:
task_id, dag_id, execution_date = id.split(',')
execution_date = datetime.strptime(execution_date, '%Y-%m-%d %H:%M:%S')
ti = session.query(TI).filter(TI.task_id == task_id,
TI.dag_id == dag_id,
TI.execution_date == execution_date).one()
ti.state = target_state
session.commit()
flash(
"{count} task instances were set to '{target_state}'".format(**locals()))
except Exception as ex:
if not self.handle_view_exception(ex):
raise Exception("Ooops")
flash('Failed to set state', 'error')
def get_one(self, id):
"""
As a workaround for AIRFLOW-252, this method overrides Flask-Admin's ModelView.get_one().
TODO: this method should be removed once the below bug is fixed on Flask-Admin side.
https://github.com/flask-admin/flask-admin/issues/1226
"""
task_id, dag_id, execution_date = iterdecode(id)
execution_date = dateutil.parser.parse(execution_date)
return self.session.query(self.model).get((task_id, dag_id, execution_date))
class ConnectionModelView(wwwutils.SuperUserMixin, AirflowModelView):
create_template = 'airflow/conn_create.html'
edit_template = 'airflow/conn_edit.html'
list_template = 'airflow/conn_list.html'
form_columns = (
'conn_id',
'conn_type',
'host',
'schema',
'login',
'password',
'port',
'extra',
'extra__jdbc__drv_path',
'extra__jdbc__drv_clsname',
'extra__google_cloud_platform__project',
'extra__google_cloud_platform__key_path',
'extra__google_cloud_platform__keyfile_dict',
'extra__google_cloud_platform__scope',
)
verbose_name = "Connection"
verbose_name_plural = "Connections"
column_default_sort = ('conn_id', False)
column_list = ('conn_id', 'conn_type', 'host', 'port', 'is_encrypted', 'is_extra_encrypted',)
form_overrides = dict(_password=PasswordField, _extra=TextAreaField)
form_widget_args = {
'is_extra_encrypted': {'disabled': True},
'is_encrypted': {'disabled': True},
}
# Used to customized the form, the forms elements get rendered
# and results are stored in the extra field as json. All of these
# need to be prefixed with extra__ and then the conn_type ___ as in
# extra__{conn_type}__name. You can also hide form elements and rename
# others from the connection_form.js file
form_extra_fields = {
'extra__jdbc__drv_path': StringField('Driver Path'),
'extra__jdbc__drv_clsname': StringField('Driver Class'),
'extra__google_cloud_platform__project': StringField('Project Id'),
'extra__google_cloud_platform__key_path': StringField('Keyfile Path'),
'extra__google_cloud_platform__keyfile_dict': PasswordField('Keyfile JSON'),
'extra__google_cloud_platform__scope': StringField('Scopes (comma separated)'),
}
form_choices = {
'conn_type': models.Connection._types
}
def on_model_change(self, form, model, is_created):
formdata = form.data
if formdata['conn_type'] in ['jdbc', 'google_cloud_platform']:
extra = {
key: formdata[key]
for key in self.form_extra_fields.keys() if key in formdata}
model.extra = json.dumps(extra)
@classmethod
def alert_fernet_key(cls):
fk = None
try:
fk = conf.get('core', 'fernet_key')
except:
pass
return fk is None
@classmethod
def is_secure(cls):
"""
Used to display a message in the Connection list view making it clear
that the passwords and `extra` field can't be encrypted.
"""
is_secure = False
try:
import cryptography
conf.get('core', 'fernet_key')
is_secure = True
except:
pass
return is_secure
def on_form_prefill(self, form, id):
try:
d = json.loads(form.data.get('extra', '{}'))
except Exception:
d = {}
for field in list(self.form_extra_fields.keys()):
value = d.get(field, '')
if value:
field = getattr(form, field)
field.data = value
class UserModelView(wwwutils.SuperUserMixin, AirflowModelView):
verbose_name = "User"
verbose_name_plural = "Users"
column_default_sort = 'username'
class VersionView(wwwutils.SuperUserMixin, BaseView):
@expose('/')
def version(self):
# Look at the version from setup.py
try:
airflow_version = pkg_resources.require("apache-airflow")[0].version
except Exception as e:
airflow_version = None
logging.error(e)
# Get the Git repo and git hash
git_version = None
try:
with open(os.path.join(*[settings.AIRFLOW_HOME, 'airflow', 'git_version'])) as f:
git_version = f.readline()
except Exception as e:
logging.error(e)
# Render information
title = "Version Info"
return self.render('airflow/version.html',
title=title,
airflow_version=airflow_version,
git_version=git_version)
class ConfigurationView(wwwutils.SuperUserMixin, BaseView):
@expose('/')
def conf(self):
raw = request.args.get('raw') == "true"
title = "Airflow Configuration"
subtitle = conf.AIRFLOW_CONFIG
if conf.getboolean("webserver", "expose_config"):
with open(conf.AIRFLOW_CONFIG, 'r') as f:
config = f.read()
table = [(section, key, value, source)
for section, parameters in conf.as_dict(True, True).items()
for key, (value, source) in parameters.items()]
else:
config = (
"# Your Airflow administrator chose not to expose the "
"configuration, most likely for security reasons.")
table = None
if raw:
return Response(
response=config,
status=200,
mimetype="application/text")
else:
code_html = Markup(highlight(
config,
lexers.IniLexer(), # Lexer call
HtmlFormatter(noclasses=True))
)
return self.render(
'airflow/config.html',
pre_subtitle=settings.HEADER + " v" + airflow.__version__,
code_html=code_html, title=title, subtitle=subtitle,
table=table)
class DagModelView(wwwutils.SuperUserMixin, ModelView):
column_list = ('dag_id', 'owners')
column_editable_list = ('is_paused',)
form_excluded_columns = ('is_subdag', 'is_active')
column_searchable_list = ('dag_id',)
column_filters = (
'dag_id', 'owners', 'is_paused', 'is_active', 'is_subdag',
'last_scheduler_run', 'last_expired')
form_widget_args = {
'last_scheduler_run': {'disabled': True},
'fileloc': {'disabled': True},
'is_paused': {'disabled': True},
'last_pickled': {'disabled': True},
'pickle_id': {'disabled': True},
'last_loaded': {'disabled': True},
'last_expired': {'disabled': True},
'pickle_size': {'disabled': True},
'scheduler_lock': {'disabled': True},
'owners': {'disabled': True},
}
column_formatters = dict(
dag_id=dag_link,
)
can_delete = False
can_create = False
page_size = PAGE_SIZE
list_template = 'airflow/list_dags.html'
named_filter_urls = True
def get_query(self):
"""
Default filters for model
"""
return (
super(DagModelView, self)
.get_query()
.filter(or_(models.DagModel.is_active, models.DagModel.is_paused))
.filter(~models.DagModel.is_subdag)
)
def get_count_query(self):
"""
Default filters for model
"""
return (
super(DagModelView, self)
.get_count_query()
.filter(models.DagModel.is_active)
.filter(~models.DagModel.is_subdag)
)
| apache-2.0 |
m0re4u/LeRoT-SCLP | visual_eval/create_histogram.py | 1 | 3474 | from time import sleep
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import argparse
import yaml
import sys
SEAGREEN = (0, 128 / 255., 102 / 255.)
INTERVAL = 500
MINIMUM = 0.03
def create_histogram(filename, iterations, stepsize, x_label, y_label, max_x,
min_y, max_y):
fig = plt.figure(facecolor='white')
datafile = open(filename, "r").read()
y_data = [sorted(data, reverse=True) for data in yaml.load(datafile)]
y_data = [y_data[i*stepsize] for i in range(0, len(y_data) // stepsize)]
y_data = [[max(MINIMUM, d) for d in data] for data in y_data]
x_data = list(range(0, len(y_data[0])))
fargs = [
x_data,
y_data,
fig,
min_y,
max_y,
x_label,
y_label,
iterations
]
animation.FuncAnimation(
fig, animate,
fargs=fargs,
interval=INTERVAL
)
plt.show()
def animate(i, x_data, y_data, fig, min_y, max_y, x_label, y_label,
iterations):
if i > iterations:
sleep(3)
sys.exit()
if i == 1:
sleep(5)
# Reset figure
fig.clear()
# Set text for figure and labels
plt.xlabel(x_label, size=26)
plt.ylabel(y_label, size=26)
plt.title("Iteration" + " " + str(i+1), size=26)
# Set axes sizes
max_x = len(x_data)
plt.ylim(min_y, max_y)
plt.xlim(0, max_x)
plt.bar(x_data, y_data[i], color=SEAGREEN)
# Set visibility of plot frame lines
ax = plt.axes()
ax.spines["top"].set_visible(False)
ax.spines["bottom"].set_visible(True)
ax.spines["right"].set_visible(False)
ax.spines["left"].set_visible(True)
# Add major grid lines
ax.grid(
which='major',
axis='y',
linestyle='--',
linewidth=0.5,
color='black',
alpha=0.5
)
# Remove the tick marks at top and right
plt.tick_params(axis="both", which="both", bottom="on", top="off",
labelbottom="on", left="on", right="off",
labelleft="on")
return plt
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="""
Construct and run a set of learning experiments. Provide the
name of the config file, and which parameter you want to be shifted
between what range, with what steps""")
parser.add_argument("-f", "--filename", help="name of file with data")
parser.add_argument("-s", "--stepsize", help="stepsize for the animation",
type=int)
parser.add_argument("-i", "--iterations", type=int,
help="number of iterations shown")
parser.add_argument("-x", "--x_label", help="label for x-axis")
parser.add_argument("-y", "--y_label", help="label for y-axis"
"(HAS TO BE EVALUATION MEASURE LIKE IN CONFIG)")
parser.add_argument("-max_x", "--max_bound_x",
help="maximum number for x-axis", type=int)
parser.add_argument("-max_y", "--max_bound_y",
help="maximum number for y-axis", type=float)
parser.add_argument("-min_y", "--min_bound_y",
help="minimum number for y-axis", type=float)
args = parser.parse_args()
create_histogram(args.filename, args.iterations, args.stepsize,
args.x_label, args.y_label, args.max_bound_x,
args.min_bound_y, args.max_bound_y)
| gpl-3.0 |
JosephKJ/SDD-RFCN-python | tools/demo_faster_rcnn.py | 1 | 6898 | #!/usr/bin/env python
# --------------------------------------------------------
# R-FCN
# Copyright (c) 2016 Yuwen Xiong
# Licensed under The MIT License [see LICENSE for details]
# Written by Yuwen Xiong
# --------------------------------------------------------
"""
Demo script showing detections in sample images.
See README.md for installation instructions before running.
"""
import _init_paths
from fast_rcnn.config import cfg
from fast_rcnn.test import im_detect
from fast_rcnn.nms_wrapper import nms
from utils.timer import Timer
import matplotlib.pyplot as plt
import numpy as np
import scipy.io as sio
import caffe, os, sys, cv2
import argparse
# CLASSES = ('__background__',
# 'aeroplane', 'bicycle', 'bird', 'boat',
# 'bottle', 'bus', 'car', 'cat', 'chair',
# 'cow', 'diningtable', 'dog', 'horse',
# 'motorbike', 'person', 'pottedplant',
# 'sheep', 'sofa', 'train', 'tvmonitor')
CLASSES = ('__background__', # always index 0
'pedestrian', 'biker', 'skater', 'car',
'bus', 'cart')
NETS = {'ResNet-101': ('ResNet-101',
'resnet101_rfcn_final.caffemodel'),
'ResNet-50': ('ResNet-50',
'resnet50_rfcn_final.caffemodel'),
'SDD': ('ResNet-101',
'resnet101_rfcn_ohem_iter_110000.caffemodel')}
def vis_detections(im, class_name, dets,image_name, thresh=0.5):
"""Draw detected bounding boxes."""
inds = np.where(dets[:, -1] >= thresh)[0]
if len(inds) == 0:
return
im = im[:, :, (2, 1, 0)]
fig, ax = plt.subplots(figsize=(12, 12))
ax.imshow(im, aspect='equal')
for i in inds:
bbox = dets[i, :4]
score = dets[i, -1]
ax.add_patch(
plt.Rectangle((bbox[0], bbox[1]),
bbox[2] - bbox[0],
bbox[3] - bbox[1], fill=False,
edgecolor='red', linewidth=3.5)
)
# ax.text(bbox[0], bbox[1] - 2,
# '{:s} {:.3f}'.format(class_name, score),
# bbox=dict(facecolor='blue', alpha=0.5),
# fontsize=14, color='white')
# ax.set_title(('{} detections with '
# 'p({} | box) >= {:.1f}').format(class_name, class_name,
# thresh),
# fontsize=14)
plt.axis('off')
plt.tight_layout()
plt.draw()
# plt.savefig('./img_output/ddo_80/out_' + class_name + '_' + image_name, bbox_inches='tight')
# plt.savefig('./img_output/ddo_yt/out_' + class_name + '_' + image_name, bbox_inches='tight')
# plt.savefig('./img_output/ddo_80_ee_without_enhancement/out_' + class_name + '_' + image_name, bbox_inches='tight')
def demo(net, image_name):
"""Detect object classes in an image using pre-computed object proposals."""
# Load the demo image
im_file = os.path.join('/home/joseph/Dataset/iith_drone_data/convocation', image_name)
# im_file = os.path.join('/home/joseph/Dataset/iith_drone_data/orijinal_resolution', image_name)
# im_file = os.path.join('/home/joseph/Dataset/youtube', image_name)
# im_file = os.path.join(cfg.DATA_DIR, 'demo', image_name)
im = cv2.imread(im_file)
# Detect all object classes and regress object bounds
timer = Timer()
timer.tic()
scores, boxes = im_detect(net, im)
timer.toc()
print ('Detection took {:.3f}s for '
'{:d} object proposals').format(timer.total_time, boxes.shape[0])
# Visualize detections for each class
CONF_THRESH = 0.8
NMS_THRESH = 0.3
for cls_ind, cls in enumerate(CLASSES[1:]):
cls_ind += 1 # because we skipped background
cls_boxes = boxes[:, 4:8]
cls_scores = scores[:, cls_ind]
dets = np.hstack((cls_boxes,
cls_scores[:, np.newaxis])).astype(np.float32)
keep = nms(dets, NMS_THRESH)
dets = dets[keep, :]
if cls == 'pedestrian':
vis_detections(im, cls, dets, image_name, thresh=CONF_THRESH)
def parse_args():
"""Parse input arguments."""
parser = argparse.ArgumentParser(description='Faster R-CNN demo')
parser.add_argument('--gpu', dest='gpu_id', help='GPU device id to use [0]',
default=0, type=int)
parser.add_argument('--cpu', dest='cpu_mode',
help='Use CPU mode (overrides --gpu)',
action='store_true')
parser.add_argument('--net', dest='demo_net', help='Network to use [ResNet-101]',
choices=NETS.keys(), default='SDD')
args = parser.parse_args()
return args
if __name__ == '__main__':
cfg.TEST.HAS_RPN = True # Use RPN for proposals
args = parse_args()
prototxt = os.path.join(cfg.MODELS_DIR, 'VGG16',
'faster_rcnn_end2end', 'test.prototxt')
caffemodel = os.path.join('/home/joseph/workspace/SDD-RFCN-python'
'/output/faster_rcnn_end2end_sdd/sdd_trainval','Enhanced_annotation_all_from_one_vgg16_faster_rcnn_sdd_iter_70000.caffemodel')
# '/output/faster_rcnn_end2end_sdd/sdd_trainval','rpn_base_size_8_all_from_one_vgg16_faster_rcnn_sdd_iter_70000.caffemodel')
# caffemodel = os.path.join('/home/joseph/workspace/SDD-RFCN-python'
# '/output/faster_rcnn_end2end_sdd/sdd_trainval','vgg16_faster_rcnn_sdd_iter_30000.caffemodel')
# '/output/faster_rcnn_end2end_sdd/sdd_trainval','all_from_one_vgg16_faster_rcnn_sdd_iter_70000.caffemodel')
if not os.path.isfile(caffemodel):
raise IOError(('{:s} not found.\n').format(caffemodel))
if args.cpu_mode:
caffe.set_mode_cpu()
else:
caffe.set_mode_gpu()
caffe.set_device(args.gpu_id)
cfg.GPU_ID = args.gpu_id
net = caffe.Net(prototxt, caffemodel, caffe.TEST)
print '\n\nLoaded network {:s}'.format(caffemodel)
# Warmup on a dummy image
im = 128 * np.ones((300, 500, 3), dtype=np.uint8)
for i in xrange(2):
_, _= im_detect(net, im)
im_names = ['iith_convo_883.jpg']
# im_names = ['iith_06_100.jpg', 'iith_06_250.jpg', 'iith_06_500.jpg', 'iith_06_750.jpg', 'iith_06_1000.jpg',
# 'iith_06_1250.jpg', 'iith_06_1500.jpg', 'iith_06_1750.jpg', 'iith_06_2000.jpg', 'iith_06_2500.jpg']
# im_names = ['bookstore_video0_12345.jpg']
# im_names = ['iith_convo_' + str(i) + '.jpg' for i in range(506, 884)]
# im_names = ['iith_06_' + str(i) + '.jpg' for i in range(856, 1200)]
# im_names = ['youtube' + str(i) + '.jpg' for i in range(1, 342)]
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
for im_name in im_names:
print 'Demo for {}'.format(im_name)
demo(net, im_name)
print 'With enhanced dataset.'
plt.show()
| mit |
joernhees/scikit-learn | examples/ensemble/plot_adaboost_regression.py | 311 | 1529 | """
======================================
Decision Tree Regression with AdaBoost
======================================
A decision tree is boosted using the AdaBoost.R2 [1] algorithm on a 1D
sinusoidal dataset with a small amount of Gaussian noise.
299 boosts (300 decision trees) is compared with a single decision tree
regressor. As the number of boosts is increased the regressor can fit more
detail.
.. [1] H. Drucker, "Improving Regressors using Boosting Techniques", 1997.
"""
print(__doc__)
# Author: Noel Dawe <noel.dawe@gmail.com>
#
# License: BSD 3 clause
# importing necessary libraries
import numpy as np
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import AdaBoostRegressor
# Create the dataset
rng = np.random.RandomState(1)
X = np.linspace(0, 6, 100)[:, np.newaxis]
y = np.sin(X).ravel() + np.sin(6 * X).ravel() + rng.normal(0, 0.1, X.shape[0])
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=4)
regr_2 = AdaBoostRegressor(DecisionTreeRegressor(max_depth=4),
n_estimators=300, random_state=rng)
regr_1.fit(X, y)
regr_2.fit(X, y)
# Predict
y_1 = regr_1.predict(X)
y_2 = regr_2.predict(X)
# Plot the results
plt.figure()
plt.scatter(X, y, c="k", label="training samples")
plt.plot(X, y_1, c="g", label="n_estimators=1", linewidth=2)
plt.plot(X, y_2, c="r", label="n_estimators=300", linewidth=2)
plt.xlabel("data")
plt.ylabel("target")
plt.title("Boosted Decision Tree Regression")
plt.legend()
plt.show()
| bsd-3-clause |
ColdMatter/EDMSuite | MoleculeMOTScripts/analysis3.py | 1 | 60116 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 15 06:30:01 2019
@author: arijit
"""
from __future__ import print_function
import numpy as np
import os,zipfile
from PIL import Image
from scipy.optimize import curve_fit
from scipy import optimize
from scipy.signal import savgol_filter
import matplotlib.pyplot as plt
import scipy.constants as cn
import re
import seaborn as sns
sns.set()
import warnings
warnings.filterwarnings("ignore", category=RuntimeWarning)
linear=lambda x,m,c: m*x+c
exponential=lambda x,s: np.exp(-(x)/s)
exponentialOffset=lambda x,a,c,s: a*np.exp(-(x-c)/s)
exponentialAmp=lambda x,a,s: a*np.exp(-x/s)
exponentialAmpDelay=lambda x,a,c,s: a*np.exp(-(x-c)/s)
exponentialAmpDelayOffset=lambda x,a,c,s,o: a*np.exp(-(x-c)/s)+o
gaussian=lambda x,a,c,s: a*np.exp(-(x-c)**2/(2*s**2))
gaussianOffset=lambda x,a,c,s,o: np.abs(a)*np.exp(-(x-c)**2/(2*s**2))+o
invSinc=lambda x,a,b,c,d: a-np.abs(b)*np.sinc((x-c)*d)
sinc=lambda x,a,b,c,d: a+np.abs(b)*np.sinc((x-c)*d)
def atoi(text):
return int(text) if text.isdigit() else text
def natural_keys(text):
return [atoi(c) for c in re.split(r'(\d+)', text) ]
def linearFit(x,y,sigma=None):
m_trial=(y[-1]-y[0])/(x[-1]-x[0])
c_trial=np.max(y) if m_trial<0 else np.min(y)
p0=[m_trial,c_trial]
try:
popt,pcov=curve_fit(linear,x,y,sigma=sigma,p0=p0)
isFit=True
except:
popt=np.array(p0)
pcov=np.zeros((2,2))
isFit=False
return popt,np.diag(pcov),isFit
def expFit(x,y,sigma=None):
a_trial=np.max(y)
c_trial=x[np.argmax(y)]
s_trial=np.abs((x[-1]-x[0])/np.log(np.abs(y[-1]/y[0])))
p0=[s_trial]
try:
popt,pcov=curve_fit(exponential,x,y,sigma=sigma,p0=p0)
isFit=True
except:
popt=np.array(p0)
pcov=np.zeros((3,3))
isFit=False
return popt,np.diag(pcov),isFit
def expFitOffset(x,y,sigma=None):
a_trial=np.max(y)
o_trial=np.min(y)
c_trial=x[np.argmax(y)]
s_trial=np.abs((x[-1]-x[0])/np.log(np.abs(y[-1]/y[0])))
p0=[a_trial,c_trial,s_trial]
try:
popt,pcov=curve_fit(exponentialOffset,x,y,sigma=sigma,p0=p0,absolute_sigma=True)
isFit=True
except:
popt=np.array(p0)
pcov=np.zeros((4,4))
isFit=False
return popt,np.diag(pcov),isFit
def expAmpFit(x,y,sigma=None):
a_trial=np.max(y)
s_trial=100000#np.abs((x[-1]-x[0])/np.log(np.abs(y[-1]/y[0])))
p0=[a_trial,s_trial]
try:
popt,pcov=curve_fit(exponentialAmp,x,y,sigma=sigma,p0=p0,absolute_sigma=True)
isFit=True
except:
popt=np.array(p0)
pcov=np.zeros((4,4))
isFit=False
return popt,np.diag(pcov),isFit
def gaussianFit(x,y,sigma=None):
loc_trial=np.argmax(y)
halfmax_y = np.max(y)/2.0
a_trial=y[loc_trial]
c_trial=x[loc_trial]
s_trial=np.abs(x[0]-x[1])*len(y[y>halfmax_y])/2.0
p0=[a_trial,c_trial,s_trial]
try:
popt,pcov=curve_fit(gaussian,x,y,sigma=sigma,p0=p0)
isFit=True
except:
popt=np.array(p0)
pcov=np.zeros((3,3))
isFit=False
return popt,np.diag(pcov),isFit
def gaussianFitOffset(x,y,sigma=None):
loc_trial=np.argmax(y)
halfmax_y = np.max(y)/2.0
o_trial=np.min(y)
a_trial=y[loc_trial]
c_trial=x[loc_trial]
s_trial=np.abs(x[0]-x[1])*len(y[y>halfmax_y])/2.0
p0=[a_trial,c_trial,s_trial,o_trial]
try:
popt,pcov=curve_fit(gaussianOffset,x,y,sigma=sigma,p0=p0)
isFit=True
except:
popt=np.array(p0)
pcov=np.zeros((4,4))
isFit=False
return popt,np.diag(pcov),isFit
def invSincFit(x,y,sigma=None):
p0=[np.max(y),np.min(y),x[np.argmin(y)],200]
try:
popt,pcov=curve_fit(invSinc,x,y,sigma=sigma,p0=p0)
isFit=True
except:
popt=np.array(p0)
pcov=np.zeros((4,4))
isFit=False
return popt,np.diag(pcov),isFit
def sincFit(x,y,sigma=None):
p0=[np.min(y),np.max(y),x[np.argmax(y)],200]
try:
popt,pcov=curve_fit(sinc,x,y,sigma=sigma,p0=p0)
isFit=True
except:
popt=np.array(p0)
pcov=np.zeros((4,4))
isFit=False
return popt,np.diag(pcov),isFit
def injector(fileNoStart,fileNoStop,NoImages,
fileNameString,
fileSkip=1,
remotePath="//PH-TEW105/Users/rfmot/Desktop/AbsImages/",
dirPath="C:/Users/cafmot/Box Sync/CaF MOT/MOTData/MOTMasterData/"):
imgs=os.listdir(remotePath)
imgs.sort(key=natural_keys)
if len(imgs)==(fileNoStop-fileNoStart+1)*NoImages/fileSkip:
print('Inserting images to the zip files...')
l=0
for fileNo in range(fileNoStart,fileNoStop+1, fileSkip):
filepath=os.path.join(dirPath,fileNameString+'_'+str(fileNo).zfill(3)+'.zip')
with zipfile.ZipFile(filepath, 'a') as archive:
files=archive.namelist()
for _ in range(NoImages):
if imgs[l] not in files:
archive.write(os.path.join(remotePath,imgs[l]),imgs[l])
l+=1
for img in imgs:
os.remove(os.path.join(remotePath,img))
elif len(imgs)==0:
print('No Image to insert')
elif len(imgs)<(fileNoStart-fileNoStop+1)*NoImages:
print('There seems to be less number of images than required!')
elif len(imgs)>(fileNoStart-fileNoStop+1)*NoImages:
print('There are more images than expected!')
class Analysis():
def __init__(self,args={}):
for key in args:
self.__dict__[key]=args[key]
self.diffStr=''
def __setattr__(self,name,value):
self.__dict__[name]=value
def getFilepath(self,fileNo):
"""This method create the full filepath from the fileNo input
"""
return os.path.join(self.dirPath,
self.fileNameString+'_'+str(fileNo).zfill(3)+'.zip')
def convertRawToCount(self,raw):
return (2**self.bitDepth-1)*raw
def convertCountsToPhotons(self,counts):
return counts*(np.float(self.fullWellCapacity)/\
(2**self.bitsPerChannel-1))/self.etaQ
def convertPhotonsToNumber(self,photonCount):
return photonCount/(self.exposureTime*self.gamma*self.collectionSolidAngle)
def readFromZip(self,fileNo):
archive=zipfile.ZipFile(self.getFilepath(fileNo),'r')
imgs=[]
files=archive.namelist()
files.sort(key=natural_keys)
for f in files:
if f[-3:]=='tif':
if self.diffStr=='':
with archive.open(f) as filename:
imgs.append(np.array(Image.open(filename),dtype=float))
elif f[0]==self.diffStr:
with archive.open(f) as filename:
imgs.append(np.array(Image.open(filename),dtype=float))
if f[-14:]=='parameters.txt':
with archive.open(f) as filename:
scriptParams=filename.readlines()
if f[-18:]=='hardwareReport.txt':
with archive.open(f) as filename:
hardwareParams=filename.readlines()
tempDict={}
for param in scriptParams:
paramSplit=param.split(b'\t')
tempDict[paramSplit[0]]=np.float(paramSplit[1])
for param in hardwareParams:
paramSplit=param.split(b'\t')
tempDict[paramSplit[0]]=np.float(paramSplit[1]) if \
paramSplit[1].isdigit() else paramSplit[1]
paramDict={}
for key in tempDict:
paramDict[key.decode("utf-8")]=tempDict[key]
return np.array(imgs),paramDict
def readFromZipCaFRb(self,fileNo, prefix):
archive=zipfile.ZipFile(self.getFilepath(fileNo),'r')
imgs=[]
files=archive.namelist()
files.sort(key=natural_keys)
for f in files:
if ((f[-3:]=='tif') and (f[0]==prefix)):
if self.diffStr=='':
with archive.open(f) as filename:
imgs.append(np.array(Image.open(filename),dtype=float))
elif f[0]==self.diffStr:
with archive.open(f) as filename:
imgs.append(np.array(Image.open(filename),dtype=float))
if f[-14:]=='parameters.txt':
with archive.open(f) as filename:
scriptParams=filename.readlines()
if f[-18:]=='hardwareReport.txt':
with archive.open(f) as filename:
hardwareParams=filename.readlines()
tempDict={}
for param in scriptParams:
paramSplit=param.split(b'\t')
tempDict[paramSplit[0]]=np.float(paramSplit[1])
for param in hardwareParams:
paramSplit=param.split(b'\t')
tempDict[paramSplit[0]]=np.float(paramSplit[1]) if \
paramSplit[1].isdigit() else paramSplit[1]
paramDict={}
for key in tempDict:
paramDict[key.decode("utf-8")]=tempDict[key]
return np.array(imgs),paramDict
def getImagesFromOneTriggerData(self,fileNo):
imgs,paramsDict=self.readFromZip(fileNo)
return imgs[1:],paramsDict
def getImagesFromTwoTriggerData(self,fileNo):
imgs,paramsDict=self.readFromZip(fileNo)
return imgs[2::2,:,:],imgs[3::2,:,:],paramsDict
def getImagesFromThreeTriggerData(self,fileNo):
imgs,paramsDict=self.readFromZip(fileNo)
return imgs[0::3,:,:],imgs[1::3,:,:],imgs[2::3,:,:],paramsDict
def getImagesFromFourTriggerData(self,fileNo):
imgs,paramsDict=self.readFromZip(fileNo)
return imgs[0::4,:,:],imgs[1::4,:,:],imgs[2::4,:,:],imgs[3::4,:,:],paramsDict
def getAvgImageFromOneTriggerData(self,fileNo):
imgs,_=self.getImagesFromOneTriggerData(fileNo)
return np.mean(imgs,axis=0)
def getAvgImageFromTwoTriggerData(self,fileNo):
firstImages,secondImages,_=self.getImagesFromTwoTriggerData(fileNo)
return np.mean(firstImages,axis=0),\
np.mean(secondImages,axis=0)
def getAvgImageFromThreeTriggerData(self,fileNo):
firstImages,secondImages,thirdImages=self.getImagesFromThreeTriggerData(fileNo)
return np.mean(firstImages,axis=0),\
np.mean(secondImages,axis=0),\
np.mean(thirdImages,axis=0)
def cropImages(self,imageArray):
if self.crop:
h_top=int(self.cropCentre[0]-self.cropHeight/2)
h_bottom=int(self.cropCentre[0]+self.cropHeight/2)
w_left=int(self.cropCentre[1]-self.cropWidth/2)
w_right=int(self.cropCentre[1]+self.cropWidth/2)
return imageArray[:,h_top:h_bottom,w_left:w_right]
else:
return imageArray
def cropSingleImage(self,imageArray):
if self.crop:
h_top=self.cropCentre[1]-self.cropHeight/2
h_bottom=self.cropCentre[1]+self.cropHeight/2
w_left=self.cropCentre[0]-self.cropWidth/2
w_right=self.cropCentre[0]+self.cropWidth/2
return imageArray[h_top:h_bottom,w_left:w_right]
else:
return imageArray
def getImageNumber(self,imageArray):
totalCount=np.sum(imageArray,axis=(1,2))
totalMolecules=self.convertPhotonsToNumber(
self.convertCountsToPhotons(totalCount))
return totalMolecules
def singleImageCloudSize(self,imageArray):
radialY=np.sum(imageArray,axis=0)
axialY=np.sum(imageArray,axis=1)
radialYLength=len(radialY)
axialYLength=len(axialY)
radialX=self.pixelSize*(self.binSize/self.magFactor)*np.arange(0,radialYLength)
axialX=self.pixelSize*(self.binSize/self.magFactor)*np.arange(0,axialYLength)
smoothRadialY=radialY#savgol_filter(radialY,self.smoothingWindow,3)
smoothAxialY=axialY#savgol_filter(axialY,self.smoothingWindow,3)
radialpopt,radialpcov,radialIsFit=gaussianFitOffset(radialX,radialY)
axialpopt,axialpcov,axialIsFit=gaussianFitOffset(axialX,axialY)
return radialX,radialY,\
radialpopt,radialIsFit,\
axialX,axialY,\
axialpopt,axialIsFit
def getImageSizes(self,imageArray):
n=np.shape(imageArray)[0]
radialX=[]
radialY=[]
axialX=[]
axialY=[]
radialpopts=[]
radialisfits=[]
axialpopts=[]
axialisfits=[]
for i in range(n):
radialXI,radialYI,radialpoptsI,radialisfitsI,axialXI,axialYI,\
axialpoptsI,axialisfitsI=self.singleImageCloudSize(imageArray[i,:,:])
radialX.append(radialXI)
radialY.append(radialYI)
radialpopts.append(radialpoptsI)
radialisfits.append(radialisfitsI)
axialX.append(axialXI)
axialY.append(axialYI)
axialpopts.append(axialpoptsI)
axialisfits.append(axialisfitsI)
return np.array(radialX),np.array(radialY),\
np.array(radialpopts),\
np.array(radialisfits),\
np.array(axialX),np.array(axialY),\
np.array(axialpopts),\
np.array(axialisfits)
def singleImageProcessing(self,fileNo,fileNoBG,param):
images,paramsDict=self.getImagesFromOneTriggerData(fileNo)
if fileNoBG is not None:
imagesBG=self.getAvgImageFromOneTriggerData(fileNoBG)
images=images-imagesBG
if self.crop:
images=self.cropImages(images)
return images,paramsDict[param]
def doubleImageProcessing(self,fileNo,fileNoBG,param):
firstImages,secondImages,paramsDict=self.getImagesFromTwoTriggerData(fileNo)
if fileNoBG is not None:
firstImageAvgBG,secondImageAvgBG=self.getAvgImageFromTwoTriggerData(fileNoBG)
firstImages=firstImages-firstImageAvgBG
secondImages=secondImages-secondImageAvgBG
if self.crop:
firstImages=self.cropImages(firstImages)
secondImages=self.cropImages(secondImages)
return firstImages, secondImages, paramsDict[param]
def trippleImageProcessing(self,fileNo,param):
clouds,probes,bgs,paramsDict=self.getImagesFromThreeTriggerData(fileNo)
probes=probes-bgs
clouds=clouds-bgs
clouds[clouds<=0]=1.0
od=np.log(probes/clouds)
od[np.isnan(od)] = 0.0
od[od == -np.inf] = 0.0
od[od == np.inf] = 0.0
if self.crop:
od=self.cropImages(od)
if self.od_correction:
od_s = 8
od = od+np.log((1-np.exp(-od_s))/(1-np.exp(od-od_s)))
return od,paramsDict[param]
def singleImageNumberRange(self,fileNoStart,fileNoStop,fileNoBG,param):
paramsValList=[]
numbersList=[]
images=[]
for fileNo in range(fileNoStart,fileNoStop+1):
if fileNo not in self.fileNoExclude:
imageSubBG,paramsVal=self.singleImageProcessing(fileNo,fileNoBG,param)
images.append(imageSubBG)
numbers=self.getImageNumber(imageSubBG)
numbersList.append(numbers)
paramsValList.append(paramsVal)
self.firstImage=np.array(images)
self.firstImageNumbers=np.array(numbersList)
self.paramVals=np.array(paramsValList,dtype=float)
def singleImageSizeRange(self,fileNoStart,fileNoStop,fileNoBG,param):
images=[]
radialXList=[]
radialYList=[]
axialXList=[]
axialYList=[]
radialpoptsList=[]
radialisfitsList=[]
axialpoptsList=[]
axialisfitsList=[]
paramsValList=[]
for fileNo in range(fileNoStart,fileNoStop+1):
if fileNo not in self.fileNoExclude:
imageSubBG,paramsVal=self.singleImageProcessing(fileNo,fileNoBG,param)
radialX,radialY,radialpopts,radialisfits,\
axialX,axialY,axialpopts,axialisfits=self.getImageSizes(imageSubBG)
images.append(imageSubBG)
radialXList.append(radialX)
radialYList.append(radialY)
radialpoptsList.append(radialpopts)
radialisfitsList.append(radialisfits)
axialXList.append(axialX)
axialYList.append(axialY)
axialpoptsList.append(axialpopts)
radialisfitsList.append(radialisfits)
paramsValList.append(paramsVal)
self.firstImage=np.array(images)
self.secondImage=np.zeros_like(self.firstImage)
self.firstImageRadialX=np.array(radialXList)
self.firstImageRadialY=np.array(radialYList)
self.firstImageAxialX=np.array(axialXList)
self.firstImageAxialY=np.array(axialYList)
self.firstImageRadialFitParams=np.array(radialpoptsList)
self.firstImageAxialFitParams=np.array(axialpoptsList)
self.firstImageRadialIsFit=np.array(radialisfitsList)
self.firstImageAxialIsFit=np.array(axialisfitsList)
self.paramVals=np.array(paramsValList,dtype=float)
def doubleImageNumberRange(self,fileNoStart,fileNoStop,fileNoBG,param):
paramsValList=[]
numbersFirst=[]
numbersSecond=[]
firstImages=[]
secondImages=[]
for fileNo in range(fileNoStart,fileNoStop+1):
if fileNo not in self.fileNoExclude:
firstImagesSubBG,secondImagesSubBG,paramsVal=\
self.doubleImageProcessing(fileNo,fileNoBG,param)
firstImageNumbers=self.getImageNumber(firstImagesSubBG)
secondImageNumbers=self.getImageNumber(secondImagesSubBG)
firstImages.append(firstImagesSubBG)
secondImages.append(secondImagesSubBG)
numbersFirst.append(firstImageNumbers)
numbersSecond.append(secondImageNumbers)
paramsValList.append(paramsVal)
self.firstImage=np.array(firstImages)
self.secondImage=np.array(secondImages)
self.firstImageNumbers=np.array(numbersFirst)
self.secondImageNumbers=np.array(numbersSecond)
self.paramVals=np.array(paramsValList,dtype=float)
def doubleImageSizeRange(self,fileNoStart,fileNoStop,fileNoBG,param):
radialpoptsFirstList=[]
radialisfitsFirstList=[]
axialpoptsFirstList=[]
axialisfitsFirstList=[]
radialpoptsSecondList=[]
radialisfitsSecondList=[]
axialpoptsSecondList=[]
axialisfitsSecondList=[]
paramsValList=[]
firstImages=[]
secondImages=[]
radialXFirstList=[]
radialYFirstList=[]
axialXFirstList=[]
axialYFirstList=[]
radialXSecondList=[]
radialYSecondList=[]
axialXSecondList=[]
axialYSecondList=[]
for fileNo in range(fileNoStart,fileNoStop+1):
if fileNo not in self.fileNoExclude:
firstImagesSubBG,secondImagesSubBG,paramsVal=\
self.doubleImageProcessing(fileNo,fileNoBG,param)
radialXFirst,radialYFirst,radialpoptsFirst,radialisfitsFirst,\
axialXFirst,axialYFirst,axialpoptsFirst,axialisfitsFirst=\
self.getImageSizes(firstImagesSubBG)
radialXSecond,radialYSecond,radialpoptsSecond,radialisfitsSecond,\
axialXSecond,axialYSecond,axialpoptsSecond,axialisfitsSecond=\
self.getImageSizes(secondImagesSubBG)
firstImages.append(firstImagesSubBG)
secondImages.append(secondImagesSubBG)
radialXFirstList.append(radialXFirst)
radialYFirstList.append(radialYFirst)
axialXFirstList.append(axialXFirst)
axialYFirstList.append(axialYFirst)
radialXSecondList.append(radialXSecond)
radialYSecondList.append(radialYSecond)
axialXSecondList.append(axialXSecond)
axialYSecondList.append(axialYSecond)
radialpoptsFirstList.append(radialpoptsFirst)
radialisfitsFirstList.append(radialisfitsFirst)
axialpoptsFirstList.append(axialpoptsFirst)
axialisfitsFirstList.append(axialisfitsFirst)
radialpoptsSecondList.append(radialpoptsSecond)
radialisfitsSecondList.append(radialisfitsSecond)
axialpoptsSecondList.append(axialpoptsSecond)
axialisfitsSecondList.append(axialisfitsSecond)
paramsValList.append(paramsVal)
self.firstImage=np.array(firstImages)
self.secondImage=np.array(secondImages)
self.firstImageRadialX=np.array(radialXFirstList)
self.firstImageRadialY=np.array(radialYFirstList)
self.firstImageRadialFitParams=np.array(radialpoptsFirstList)
self.firstImageRadialIsFit=np.array(radialisfitsFirstList)
self.firstImageAxialX=np.array(axialXFirstList)
self.firstImageAxialY=np.array(axialYFirstList)
self.firstImageAxialFitParams=np.array(axialpoptsFirstList)
self.firstImageAxialIsFit=np.array(axialisfitsFirstList)
self.secondImageRadialX=np.array(radialXSecondList)
self.secondImageRadialY=np.array(radialYSecondList)
self.secondImageAxialX=np.array(axialXSecondList)
self.secondImageAxialY=np.array(axialYSecondList)
self.secondImageRadialFitParams=np.array(radialpoptsSecondList)
self.secondImageRadialIsFit=np.array(radialisfitsSecondList)
self.secondImageAxialFitParams=np.array(axialpoptsSecondList)
self.secondImageAxialIsFit=np.array(axialisfitsSecondList)
self.paramVals=np.array(paramsValList,dtype=float)
def trippleImageNumberRange(self,fileNoStart,fileNoStop,fileNoBG,param):
numbersList=[]
paramsValList=[]
images=[]
for fileNo in range(fileNoStart,fileNoStop+1):
if fileNo not in self.fileNoExclude:
od,paramsVal=self.trippleImageProcessing(fileNo,param)
numbers=(self.pixelSize*(self.binSize/self.magFactor))**2*\
np.sum(od,axis=(1,2))*self.s0
images.append(od)
numbersList.append(numbers)
paramsValList.append(paramsVal)
self.firstImage=np.array(images)
self.firstImageNumbers=np.array(numbersList)
self.paramVals=np.array(paramsValList,dtype=float)
def trippleImageSizeRange(self,fileNoStart,fileNoStop,fileNoBG,param):
radialpoptsList=[]
radialisfitsList=[]
axialpoptsList=[]
axialisfitsList=[]
paramsValList=[]
images=[]
radialXList=[]
radialYList=[]
axialXList=[]
axialYList=[]
for fileNo in range(fileNoStart,fileNoStop+1):
if fileNo not in self.fileNoExclude:
od,paramsVal=self.trippleImageProcessing(fileNo,param)
radialX,radialY,radialpopts,radialisfits,\
axialX,axialY,axialpopts,axialisfits=\
self.getImageSizes(od)
images.append(od)
radialpoptsList.append(radialpopts)
radialisfitsList.append(radialisfits)
axialpoptsList.append(axialpopts)
radialisfitsList.append(radialisfits)
axialisfitsList.append(axialisfits)
radialXList.append(radialX)
radialYList.append(radialY)
axialXList.append(axialX)
axialYList.append(axialY)
paramsValList.append(paramsVal)
self.firstImage=np.array(images)
self.firstImageRadialX=np.array(radialXList)
self.firstImageRadialY=np.array(radialYList)
self.firstImageAxialX=np.array(axialXList)
self.firstImageAxialY=np.array(axialYList)
self.firstImageRadialFitParams=np.array(radialpoptsList)
self.firstImageAxialFitParams=np.array(axialpoptsList)
self.firstImageRadialIsFit=np.array(radialisfitsList)
self.firstImageAxialIsFit=np.array(axialisfitsList)
self.paramVals=np.array(paramsValList,dtype=float)
def getNumber(self,fileNoStart,fileNoStop,fileNoBG,param):
if self.imagingType=='Fluoresence':
if self.trigType=='single':
self.singleImageNumberRange(fileNoStart,fileNoStop,
fileNoBG,param)
self.firstImageMeanNumbers=np.mean(self.firstImageNumbers,axis=1)
self.firstImageStdErrorNumbers=np.std(self.firstImageNumbers,axis=1)\
/np.sqrt(np.shape(self.firstImageNumbers)[1])
if self.fit:
self.fitVariations(self.paramVals,
self.firstImageMeanNumbers,
self.firstImageStdErrorNumbers,
self.fitType)
elif self.isLifetime:
self.lifetime(self.paramVals,
self.firstImageMeanNumbers,
self.firstImageStdErrorNumbers)
else:
self.displayImageNumbersVariation(self.paramVals,
self.firstImageMeanNumbers,
self.firstImageStdErrorNumbers)
if self.showFirstImage:
self.displayImage('First Image',self.firstImage)
elif self.trigType=='double':
self.doubleImageNumberRange(fileNoStart,fileNoStop,
fileNoBG,param)
ratio=self.secondImageNumbers/self.firstImageNumbers
self.ratioImageMeanNumbers=np.mean(ratio,axis=1)
self.ratioImageStdErrorNumbers=np.std(ratio,axis=1)\
/np.sqrt(np.shape(ratio)[1])
if self.fit:
self.fitVariations(self.paramVals,
self.ratioImageMeanNumbers,
self.ratioImageStdErrorNumbers,
self.fitType)
elif self.isLifetime:
self.lifetime(self.paramVals,
self.ratioImageMeanNumbers,
self.ratioImageStdErrorNumbers)
else:
self.displayImageNumbersVariation(self.paramVals,
self.ratioImageMeanNumbers,
self.ratioImageStdErrorNumbers)
if self.showFirstImage:
self.displayImage('First Image',self.firstImage)
if self.showSecondImage:
self.displayImage('Second Image',self.secondImage)
elif self.imagingType=='Absorption':
self.trippleImageNumberRange(fileNoStart,fileNoStop,
fileNoBG,param)
self.firstImageMeanNumbers=np.mean(self.firstImageNumbers,axis=1)
self.firstImageStdErrorNumbers=np.std(self.firstImageNumbers,axis=1)\
/np.sqrt(np.shape(self.firstImageNumbers)[1])
if self.fit:
self.fitVariations(self.paramVals,
self.firstImageMeanNumbers,
self.firstImageStdErrorNumbers,
self.fitType)
elif self.isLifetime:
self.lifetime(self.paramVals,
self.firstImageMeanNumbers,
self.firstImageStdErrorNumbers)
else:
self.displayImageNumbersVariation(self.paramVals,
self.firstImageMeanNumbers,
self.firstImageStdErrorNumbers)
if self.showFirstImage:
self.displayImage('First Image',self.firstImage)
def getSize(self,fileNoStart,fileNoStop,fileNoBG,param):
if self.imagingType=='Fluoresence':
if self.trigType=='single':
self.singleImageSizeRange(fileNoStart,fileNoStop,
fileNoBG,param)
radialSizes=np.abs(self.firstImageRadialFitParams[:,:,2])
axialSizes=np.abs(self.firstImageAxialFitParams[:,:,2])
self.firstImageMeanRadialSizes=np.mean(radialSizes,axis=1)
self.firstImageMeanAxialSizes=np.mean(axialSizes,axis=1)
self.firstImageStdErrorRadialSizes=np.std(radialSizes,axis=1)\
/np.sqrt(np.shape(radialSizes)[1])
self.firstImageStdErrorAxialSizes=np.std(axialSizes,axis=1)\
/np.sqrt(np.shape(axialSizes)[1])
if self.fit:
self.fitVariations(self.paramVals,
self.firstImageMeanRadialSizes,
self.firstImageStdErrorRadialSizes,
self.fitType)
self.fitVariations(self.paramVals,
self.firstImageMeanAxialSizes,
self.firstImageStdErrorAxialSizes,
self.fitType)
elif self.isTemperature:
self.temperature(self.paramVals,
self.firstImageMeanRadialSizes,
self.firstImageStdErrorRadialSizes,
self.firstImageMeanAxialSizes,
self.firstImageStdErrorAxialSizes)
else:
self.displaySingleImageSizeVariation()
if self.showFirstImage:
self.displayImage('First Image',self.firstImage)
if self.showSizeFitsFirstImage:
self.displaySizeFits('First Image',
self.firstImageRadialX,
self.firstImageRadialY,
self.firstImageAxialX,
self.firstImageAxialY,
self.firstImageRadialFitParams,
self.firstImageAxialFitParams)
elif self.trigType=='double':
self.doubleImageSizeRange(fileNoStart,fileNoStop,
fileNoBG,param)
radialSizes=np.abs(self.firstImageRadialFitParams[:,:,2])
axialSizes=np.abs(self.firstImageAxialFitParams[:,:,2])
self.firstImageMeanRadialSizes=np.mean(radialSizes,axis=1)
self.firstImageMeanAxialSizes=np.mean(axialSizes,axis=1)
self.firstImageStdErrorRadialSizes=np.std(radialSizes,axis=1)\
/np.sqrt(np.shape(radialSizes)[1])
self.firstImageStdErrorAxialSizes=np.std(axialSizes,axis=1)\
/np.sqrt(np.shape(axialSizes)[1])
radialSizes=np.abs(self.secondImageRadialFitParams[:,:,2])
axialSizes=np.abs(self.secondImageAxialFitParams[:,:,2])
self.secondImageMeanRadialSizes=np.mean(radialSizes,axis=1)
self.secondImageMeanAxialSizes=np.mean(axialSizes,axis=1)
self.secondImageStdErrorRadialSizes=np.std(radialSizes,axis=1)\
/np.sqrt(np.shape(radialSizes)[1])
self.secondImageStdErrorAxialSizes=np.std(axialSizes,axis=1)\
/np.sqrt(np.shape(axialSizes)[1])
if self.fit:
self.fitVariations(self.paramVals,
self.secondImageMeanRadialSizes,
self.secondImageStdErrorRadialSizes,
self.fitType)
self.fitVariations(self.paramVals,
self.secondImageMeanAxialSizes,
self.secondImageStdErrorAxialSizes,
self.fitType)
elif self.isTemperature:
self.temperature(self.paramVals,
self.secondImageMeanRadialSizes,
self.secondImageStdErrorRadialSizes,
self.secondImageMeanAxialSizes,
self.secondImageStdErrorAxialSizes)
else:
self.displayDoubleImageSizeVariation()
if self.showFirstImage:
self.displayImage('First Image',self.firstImage)
if self.showSecondImage:
self.displayImage('Second Image',self.secondImage)
if self.showSizeFitsFirstImage:
self.displaySizeFits('First Image',
self.firstImageRadialX,
self.firstImageRadialY,
self.firstImageAxialX,
self.firstImageAxialY,
self.firstImageRadialFitParams,
self.firstImageAxialFitParams)
if self.showSizeFitsSecondImage:
self.displaySizeFits('Second Image',
self.secondImageRadialX,
self.secondImageRadialY,
self.secondImageAxialX,
self.secondImageAxialY,
self.secondImageRadialFitParams,
self.secondImageAxialFitParams)
elif self.imagingType=='Absorption':
self.trippleImageSizeRange(fileNoStart,fileNoStop,
fileNoBG,param)
radialSizes=np.abs(self.firstImageRadialFitParams[:,:,2])
axialSizes=np.abs(self.firstImageAxialFitParams[:,:,2])
self.firstImageMeanRadialSizes=np.mean(radialSizes,axis=1)
self.firstImageMeanAxialSizes=np.mean(axialSizes,axis=1)
self.firstImageStdErrorRadialSizes=np.std(radialSizes,axis=1)\
/np.sqrt(np.shape(radialSizes)[1])
self.firstImageStdErrorAxialSizes=np.std(axialSizes,axis=1)\
/np.sqrt(np.shape(axialSizes)[1])
if self.fit:
self.fitVariations(self.paramVals,
self.firstImageMeanRadialSizes,
self.firstImageStdErrorRadialSizes,
self.fitType)
self.fitVariations(self.paramVals,
self.firstImageMeanAxialSizes,
self.firstImageStdErrorAxialSizes,
self.fitType)
elif self.isTemperature:
self.temperature(self.paramVals,
self.firstImageMeanRadialSizes,
self.firstImageStdErrorRadialSizes,
self.firstImageMeanAxialSizes,
self.firstImageStdErrorAxialSizes)
else:
self.displaySingleImageSizeVariation()
if self.showFirstImage:
self.displayImage('First Image',self.firstImage)
if self.showSizeFitsFirstImage:
self.displaySizeFits('First Image',
self.firstImageRadialX,
self.firstImageRadialY,
self.firstImageAxialX,
self.firstImageAxialY,
self.firstImageRadialFitParams,
self.firstImageAxialFitParams)
def fitVariations(self,paramVals,numbers,stdErrorNumbers,fitType):
paramValsFine=np.linspace(np.min(paramVals),np.max(paramVals),100)
fig,ax=plt.subplots()
if fitType=='lin':
popt,diagpcov,isFit=linearFit(paramVals,numbers,stdErrorNumbers)
yFit=linear(paramValsFine,*popt)
self.fitParams=popt
funcText='Fit Func:\ny(x,m,c)=m*x+c\n'
m='m: {0:.3f}\n'.format(popt[0]/(self.yScale/self.xScale))
c='c: {0:.3f}\n'.format(popt[1]/self.yScale)
ax.text(1.05,0.3,funcText+m+c,transform=ax.transAxes,wrap=True)
if fitType=='exp':
popt,diagpcov,isFit=expFit(paramVals,numbers,stdErrorNumbers)
yFit=exponential(paramValsFine,*popt)
self.fitParams=popt
funcText='Fit Func:\ny(x,a,c,s)=a*exp(-(x-c)/s)\n'
a='a: {0:.3f}\n'.format(popt[0]/self.yScale)
c='c: {0:.3f}\n'.format(popt[1]/self.xScale)
s='s: {0:.3f}\n'.format(popt[2]/self.xScale)
ax.text(1.05,0.3,funcText+a+c+s,transform=ax.transAxes,wrap=True)
if fitType=='expOffset':
popt,diagpcov,isFit=expFitOffset(paramVals,numbers,stdErrorNumbers)
yFit=exponentialOffset(paramValsFine,*popt)
self.fitParams=popt
funcText='Fit Func:\ny(x,a,c,s,o)=a*exp(-(x-c)/s)+o\n'
a='a: {0:.3f}\n'.format(popt[0]/self.yScale)
c='c: {0:.3f}\n'.format(popt[1]/self.xScale)
s='s: {0:.3f}\n'.format(popt[2]/self.xScale)
ax.text(1.05,0.3,funcText+a+c+s,transform=ax.transAxes,wrap=True)
if fitType=='gaussian':
popt,diagpcov,isFit=gaussianFit(paramVals,numbers,stdErrorNumbers)
yFit=gaussian(paramValsFine,*popt)
self.fitParams=popt
funcText='Fit Func:\ny(x,a,c,s)=a*exp(-(x-c)**2/(2*s**2))\n'
a='a: {0:.3f}\n'.format(popt[0]/self.yScale)
c='c: {0:.3f}\n'.format(popt[1]/self.xScale)
s='s: {0:.3f}\n'.format(popt[2]/self.xScale)
ax.text(1.05,0.3,funcText+a+c+s,transform=ax.transAxes,wrap=True)
if fitType=='gaussianOffset':
popt,diagpcov,isFit=gaussianFitOffset(paramVals,numbers,stdErrorNumbers)
yFit=gaussianOffset(paramValsFine,*popt)
self.fitParams=popt
funcText='Fit Func:\ny(x,a,c,s,o)=a*exp(-(x-c)**2/(2*s**2))+o\n'
a='a: {0:.3f}\n'.format(popt[0]/self.yScale)
c='c: {0:.3f}\n'.format(popt[1]/self.xScale)
s='s: {0:.3f}\n'.format(popt[2]/self.xScale)
o='o: {0:.3f}\n'.format(popt[3]/self.yScale)
ax.text(1.05,0.3,funcText+a+c+s+o,transform=ax.transAxes,wrap=True)
if fitType=='invSinc':
popt,diagpcov,isFit=invSincFit(paramVals,numbers,stdErrorNumbers)
yFit=invSinc(paramValsFine,*popt)
self.fitParams=popt
funcText='Fit Func:\ny(x,a,b,c,d)=a-b*sinc((x-c)*d)\n'
a='a: {0:.3f}\n'.format(popt[0]/self.yScale)
b='b: {0:.3f}\n'.format(popt[1]/self.yScale)
c='c: {0:.3f}\n'.format(popt[2]/self.xScale)
d='d: {0:.3f}\n'.format(popt[3]/self.xScale)
ax.text(1.05,0.3,funcText+a+b+c+d,transform=ax.transAxes,wrap=True)
if fitType=='sinc':
popt,diagpcov,isFit=sincFit(paramVals,numbers,stdErrorNumbers)
yFit=sinc(paramValsFine,*popt)
self.fitParams=popt
funcText='Fit Func:\ny(x,b,c,d)=b*sinc((x-c)*d)\n'
a='a: {0:.3f}\n'.format(popt[0]/self.yScale)
b='b: {0:.3f}\n'.format(popt[1]/self.yScale)
c='c: {0:.3f}\n'.format(popt[2]/self.xScale)
d='d: {0:.3f}\n'.format(popt[3]/self.xScale)
ax.text(1.05,0.3,funcText+a+b+c+d,transform=ax.transAxes,wrap=True)
if fitType=='expAmp':
popt,diagpcov,isFit=expAmpFit(paramVals,numbers,stdErrorNumbers)
yFit=exponentialAmp(paramValsFine,*popt)
self.fitParams=popt
funcText='Fit Func:\ny(x,a,s)=a*exp(-x/s)\n'
a='a: {0:.3f}+-{1:.3f}\n'.format(popt[0]/self.yScale,np.sqrt(diagpcov[0])/self.yScale)
s='s: {0:.3f}+-{1:.3f}\n'.format(popt[1]/self.xScale,np.sqrt(diagpcov[1])/self.xScale)
ax.text(1.05,0.3,funcText+a+s,transform=ax.transAxes,wrap=True)
# TODO: add lorengian and inverted gaussian
ax.errorbar(paramVals/self.xScale,
numbers/self.yScale,
yerr=stdErrorNumbers/self.yScale,
fmt=self.fmtP)
ax.plot(paramValsFine/self.xScale,yFit/self.yScale,'-g')
ax.legend(['Fit','Experimental'])
ax.set_xlabel(self.xLabel)
ax.set_ylabel(self.yLabel)
def temperature(self,paramVals,meanRadialSizes,stdErrorRadialSizes,
meanAxialSizes,stdErrorAxialSizes):
tSq=(paramVals*1e-5)**2
radialSizeSq=meanRadialSizes**2
radialErrorSq=stdErrorRadialSizes**2
axialSizeSq=meanAxialSizes**2
axialErrorSq=stdErrorAxialSizes**2
poptR,diagpcovR,isFitR=linearFit(tSq,radialSizeSq,stdErrorRadialSizes)
poptA,diagpcovA,isFitA=linearFit(tSq,axialSizeSq,stdErrorAxialSizes)
tSqFine=np.linspace(np.min(tSq),np.max(tSq),100)
radialSizeSqFine=linear(tSqFine,*poptR)
axialSizeSqFine=linear(tSqFine,*poptA)
self.radialT=poptR[0]*(self.massInAMU*cn.u/cn.k)
self.axialT=poptA[0]*(self.massInAMU*cn.u/cn.k)
self.radialTConfIntv=np.sqrt(diagpcovR[0])*(self.massInAMU*cn.u/cn.k)
self.axialTConfIntv=np.sqrt(diagpcovA[0])*(self.massInAMU*cn.u/cn.k)
bound_upperR = linear(tSqFine, *(poptR + np.sqrt(diagpcovR)))
bound_lowerR = linear(tSqFine, *(poptR - np.sqrt(diagpcovR)))
bound_upperA = linear(tSqFine, *(poptA + np.sqrt(diagpcovA)))
bound_lowerA = linear(tSqFine, *(poptA - np.sqrt(diagpcovA)))
fig,ax=plt.subplots(1,2,sharex=True,figsize=self.figSizePlot)
fig.subplots_adjust(hspace=0.01,wspace=0.01)
ax[1].yaxis.tick_right()
ax[1].yaxis.set_label_position("right")
ax[0].errorbar(tSq*1e6,radialSizeSq*1e6,yerr=radialErrorSq*1e6,fmt='ok')
ax[0].plot(tSqFine*1e6,radialSizeSqFine*1e6,'-r')
ax[1].errorbar(tSq*1e6,axialSizeSq*1e6,yerr=axialErrorSq*1e6,fmt='ok')
ax[1].plot(tSqFine*1e6,axialSizeSqFine*1e6,'-r')
ax[0].fill_between(tSqFine*1e6,bound_lowerR*1e6,
bound_upperR*1e6,color='r',alpha=0.15)
ax[1].fill_between(tSqFine*1e6,bound_lowerA*1e6,
bound_upperA*1e6,color='r',alpha=0.15)
ax[0].set_xlabel('time^2 [ms^2]')
ax[0].set_ylabel('size^2 [mm^2]')
ax[1].set_xlabel('time^2 [ms^2]')
ax[1].set_ylabel('size^2 [mm^2]')
tr = "Tr:{0:.3f}".format(self.radialT*1e6)+\
u"\u00B1"+"{0:.3f} uK".format(self.radialTConfIntv*1e6)
ta = "Ta:{0:.3f}".format(self.axialT*1e6)+\
u"\u00B1"+"{0:.3f} uK".format(self.axialTConfIntv*1e6)
ax[0].set_title(tr)
ax[1].set_title(ta)
def density(self,fileNoStart,fileNoStop,fileNoBG,param):
if self.imagingType=='Fluoresence':
if self.trigType=='single':
self.singleImageSizeRange(fileNoStart,fileNoStop,
fileNoBG,param)
radialSizes=np.abs(self.firstImageRadialFitParams[:,:,2])
axialSizes=np.abs(self.firstImageAxialFitParams[:,:,2])
self.singleImageNumberRange(fileNoStart,fileNoStop,
fileNoBG,param)
numbers=self.firstImageNumbers
elif self.trigType=='double':
self.doubleImageSizeRange(fileNoStart,fileNoStop,
fileNoBG,param)
radialSizes=np.abs(self.secondImageRadialFitParams[:,:,2])
axialSizes=np.abs(self.secondImageAxialFitParams[:,:,2])
self.doubleImageNumberRange(fileNoStart,fileNoStop,
fileNoBG,param)
numbers=self.secondImageNumbers
elif self.imagingType=='Absorption':
self.trippleImageSizeRange(fileNoStart,fileNoStop,
fileNoBG,param)
radialSizes=np.abs(self.firstImageRadialFitParams[:,:,2])
axialSizes=np.abs(self.firstImageAxialFitParams[:,:,2])
self.trippleImageNumberRange(fileNoStart,fileNoStop,
fileNoBG,param)
numbers=self.firstImageNumbers
vol=(2*np.pi)**(1.5)*axialSizes*radialSizes**2
self.imageNumberDensity=numbers/vol
self.meanNumberDensity=\
np.mean(self.imageNumberDensity,axis=1)
self.stdErrorNumberDensity=\
np.std(self.imageNumberDensity,axis=1)\
/np.sqrt(np.shape(self.firstImageNumbers)[1])
self.displayImageNumbersVariation(self.paramVals,
self.meanNumberDensity,
self.stdErrorNumberDensity)
if self.showFirstImage:
self.displayImage('First Image',self.firstImage)
if self.showSizeFitsFirstImage:
self.displaySizeFits('First Image',
self.firstImageRadialX,
self.firstImageRadialY,
self.firstImageAxialX,
self.firstImageAxialY,
self.firstImageRadialFitParams,
self.firstImageAxialFitParams)
def singleImageLifetimes(self,fileNoStart,fileNoStop,
fileNoBG,param,shotsPerImage,t0,dt):
bg,_=self.readFromZip(fileNoBG)
noShots=int(int(np.shape(bg)[0])/shotsPerImage)
bg=np.mean(bg,axis=0)
t=np.array([t0+i*dt for i in range(0,shotsPerImage-1)])
tI=np.linspace(np.min(t),np.max(t),100)
paramsValList=[]
lifetimesList=[]
errorList=[]
for fileNo in range(fileNoStart,fileNoStop+1):
images,paramsDict=self.readFromZip(fileNo)
paramsValList.append(paramsDict[param])
images-=bg
k=0
N_list=[]
for i in range(noShots):
imageArray=images[k:k+shotsPerImage,:,:]
N=self.getImageNumber(self.cropImages(imageArray))
N=N[1:]/N[1]
N_list.append(N)
k+=shotsPerImage
N_mean=np.mean(N_list,axis=0)
N_std=np.std(N_list,axis=0)/np.sqrt(noShots)
popt,diagpcov,isFit=expFit(t,N_mean)
lifetimesList.append(popt[0])
errorList.append(np.sqrt(diagpcov[0]))
self.singleImageLifetimesList=np.array(lifetimesList)
self.singleImageLifetimesError=np.array(errorList)
self.paramVals=np.array(paramsValList)
if fileNoStart==fileNoStop:
fig,ax=plt.subplots(1,1,figsize=self.figSizePlot)
ax.errorbar(t/self.xScale,N_mean,yerr=N_std,fmt='og')
ax.plot(tI/self.xScale,exponential(tI,*popt),'-k')
ax.legend(['lifetime :{0:.3}'.format(popt[0]/self.xScale)+u"\u00B1"+"{0:.3f} [s.u]".format(np.sqrt(diagpcov[0])/self.xScale),'numbers'])
ax.set_xlabel(self.xLabel)
ax.set_ylabel(self.yLabel)
def lifetime(self,paramVals,meanNumbers,stdErrorNumbers):
paramValsFine=np.linspace(np.min(paramVals),np.max(paramVals),100)
popt,diagpcov,isFit=expFitOffset(paramVals,meanNumbers,stdErrorNumbers)
bound_upper = exponentialOffset(paramValsFine, *(popt + np.sqrt(diagpcov)))
bound_lower = exponentialOffset(paramValsFine, *(popt - np.sqrt(diagpcov)))
yFit=exponentialOffset(paramValsFine,*popt)
self.fitParams=popt
fig,ax=plt.subplots(figsize=self.figSizePlot)
ax.errorbar(paramVals/self.xScale,meanNumbers/self.yScale,
yerr=stdErrorNumbers/self.yScale,fmt=self.fmtP)
ax.plot(paramValsFine/self.xScale,yFit/self.yScale,'-r')
ax.fill_between(paramValsFine/self.xScale,
bound_lower/self.yScale,
bound_upper/self.yScale,
color='r',alpha=0.15)
ax.set_xlabel(self.xLabel)
ax.set_ylabel(self.yLabel)
l = "Lifetime: {0:.3f}".format(popt[2]/self.xScale)+\
u"\u00B1"+"{0:.3f} [s.u]".format(np.sqrt(diagpcov[2])/self.xScale)
ax.set_title(l)
def displayImageNumbersVariation(self,paramVals,numbers,stdErrorNumbers):
if self.display:
fig,ax=plt.subplots(figsize=self.figSizePlot)
ax.errorbar(paramVals/self.xScale,
numbers/self.yScale,
yerr=stdErrorNumbers/self.yScale,
fmt=self.fmtP)
ax.set_xlabel(self.xLabel)
ax.set_ylabel(self.yLabel)
def displaySingleImageSizeVariation(self):
if self.display:
fig,ax=plt.subplots(1,1,figsize=self.figSizePlot)
fig.subplots_adjust(hspace=0.01,wspace=0.01)
ax.errorbar(self.paramVals/self.xScale,
self.firstImageMeanRadialSizes/self.yScale,
yerr=self.firstImageStdErrorRadialSizes/self.yScale,
fmt=self.fmtP)
ax.errorbar(self.paramVals/self.xScale,
self.firstImageMeanAxialSizes/self.yScale,
yerr=self.firstImageStdErrorAxialSizes/self.yScale,
fmt=self.fmtS)
ax.legend(['Radial','Axial'])
ax.set_xlabel(self.xLabel)
ax.set_ylabel(self.yLabel)
def displayDoubleImageSizeVariation(self):
if self.display:
fig,ax=plt.subplots(1,2,figsize=self.figSizePlot)
fig.subplots_adjust(hspace=0.01,wspace=0.01)
ax[1].yaxis.tick_right()
ax[1].yaxis.set_label_position("right")
ax[0].set_title('First Image')
ax[0].errorbar(self.paramVals/self.xScale,
self.firstImageMeanRadialSizes/self.yScale,
yerr=self.firstImageStdErrorRadialSizes/self.yScale,
fmt=self.fmtP)
ax[0].errorbar(self.paramVals/self.xScale,
self.firstImageMeanAxialSizes/self.yScale,
yerr=self.firstImageStdErrorAxialSizes/self.yScale,
fmt=self.fmtS)
ax[1].set_title('Second Image')
ax[1].errorbar(self.paramVals/self.xScale,
self.secondImageMeanRadialSizes/self.yScale,
yerr=self.secondImageStdErrorRadialSizes/self.yScale,
fmt=self.fmtP)
ax[1].errorbar(self.paramVals/self.xScale,
self.secondImageMeanAxialSizes/self.yScale,
yerr=self.secondImageStdErrorAxialSizes/self.yScale,
fmt=self.fmtS)
ax[0].legend(['Radial','Axial'])
ax[0].set_xlabel(self.xLabel)
ax[0].set_ylabel(self.yLabel)
ax[1].legend(['Radial','Axial'])
ax[1].set_xlabel(self.xLabel)
ax[1].set_ylabel(self.yLabel)
def displayImage(self,title,images):
if self.display:
l,m,_,_=np.shape(images)
fig,ax=plt.subplots(l,m,figsize=self.figSizeImage,
sharex=True,sharey=True)
fig.tight_layout(rect=[0, 0.01, 1, 0.95])
fig.suptitle(title)
fig.subplots_adjust(hspace=0.01,wspace=0)
minn=np.min(images)
maxx=np.max(images)
for i in range(l):
for j in range(m):
im=ax[i,j].imshow(images[i,j,:,:],cmap='jet',
interpolation='nearest',vmin=minn,vmax=maxx)
ax[i,j].axis('off')
fig.colorbar(im, ax=ax.ravel().tolist(),orientation='horizontal')
def displaySizeFits(self,title,radialX,radialY,axialX,axialY,
radialFitParams,axialFitParams):
if self.display:
l,m,_=np.shape(radialFitParams)
fig,ax=plt.subplots(l,m,figsize=self.figSizeImage,sharex=True)
fig.tight_layout(rect=[0, 0.01, 1, 0.95])
fig.suptitle(title)
fig.subplots_adjust(hspace=0.01,wspace=0.01)
for i in range(l):
for j in range(m):
radialYFits=gaussianOffset(radialX[i,j,:],
*radialFitParams[i,j,:])
axialYFits=gaussianOffset(axialX[i,j,:],
*axialFitParams[i,j,:])
ax[i,j].plot(radialX[i,j,:]*1e3,
radialY[i,j,:],'--k')
ax[i,j].plot(axialX[i,j,:]*1e3,
axialY[i,j,:],'--r')
ax[i,j].plot(radialX[i,j,:]*1e3,
radialYFits,'-k')
ax[i,j].plot(axialX[i,j,:]*1e3,
axialYFits,'-r')
ax[i,j].set_yticks([])
if i==l-1:
ax[i,j].set_xlabel('distance [mm]')
if i==0 and j==0:
ax[i,j].legend(['Radial','Axial','RadialFit','AxialFit'])
def __call__(self,
fileNoStart,
fileNoStop,
fileNoBg,
param,
imagingType,
requirement='Number',
trigType='single',
fit=False,
fitType='lin',
showFirstImage=False,
showSecondImage=False,
showSizeFitsFirstImage=False,
showSizeFitsSecondImage=False,
diffStr='',
extParam='give a name',
extParamVals=[],
fileNoExclude=[],
figSizeImage=(15,20),
figSizePlot=(8,12),
fmtP='ok',
fmtS='or',
xLabel='X',
yLabel='Y',
xScale=1e2,
yScale=1,
smoothingWindow=11,
od_correction=False,
display=True,
**kwargs):
self.imagingType=imagingType
self.trigType=trigType
self.fit=fit
self.fitType=fitType
self.diffStr=diffStr
self.showFirstImage=showFirstImage
self.showSecondImage=showSecondImage
self.showSizeFitsFirstImage=showSizeFitsFirstImage
self.showSizeFitsSecondImage=showSizeFitsSecondImage
self.figSizeImage=figSizeImage
self.figSizePlot=figSizePlot
self.fmtP=fmtP
self.fmtS=fmtS
self.xScale=xScale
self.yScale=yScale
self.xLabel=xLabel
self.yLabel=yLabel
self.isLifetime=False
self.isTemperature=False
self.fileNoExclude=fileNoExclude
self.smoothingWindow=smoothingWindow
self.kwargs=kwargs
self.od_correction = od_correction
self.display = display
self.extParamVals = np.array(extParamVals)
if hasattr(self, 'detuningInVolt'):
if self.detuningInVolt != 'None':
self.s0=(1+4*(self.detuningInVolt*self.detuningFrequencyScaling)**2/\
self.gamma**2)/(3*self.lamda**2/(2*np.pi))
if requirement=='Number':
self.getNumber(fileNoStart,fileNoStop,fileNoBg,param)
elif requirement=='Size':
self.getSize(fileNoStart,fileNoStop,fileNoBg,param)
elif requirement=='Temperature':
self.isTemperature=True
self.fit=False
self.getSize(fileNoStart,fileNoStop,fileNoBg,param)
elif requirement=='Lifetime':
self.isLifetime=True
self.fit=False
self.getNumber(fileNoStart,fileNoStop,fileNoBg,param)
elif requirement=='Density':
self.density(fileNoStart,fileNoStop,fileNoBg,param)
elif requirement=='MOTLifetimes':
self.singleImageLifetimes(fileNoStart,fileNoStop,fileNoBg,
param,self.kwargs['shotsPerImage'],
self.kwargs['t0'],self.kwargs['dt'])
else:
print('Unknown requirement',requirement)
return self
def analysisWithDefaultCaFSettings():
analysis=Analysis()
analysis.bitDepth=16
analysis.fullWellCapacity=18000
analysis.collectionSolidAngle=0.023
analysis.pixelSize=6.45e-6
analysis.binSize=8
analysis.magFactor=0.5
analysis.bitsPerChannel=12
analysis.gamma=1.5e6
analysis.etaQ=0.65
analysis.exposureTime=10e-3
analysis.crop=False
analysis.cropCentre=(65,65)
analysis.cropHeight=50
analysis.cropWidth=50
analysis.massInAMU=59
analysis.diffStr='C'
analysis.smoothingWindow=11
return analysis
def analysisWithDefaultRbSettings():
analysis=Analysis()
analysis.pixelSize=6.45e-6
analysis.binSize=2
analysis.magFactor=0.41
analysis.crop=False
analysis.cropCentre=(220,320)
analysis.cropHeight=120
analysis.cropWidth=120
analysis.detuningInVolt=0
analysis.detuningFrequencyScaling=14.7e6
analysis.gamma=6e6
analysis.lamda=780e-9
analysis.massInAMU=86.9
analysis.diffStr='R'
analysis.smoothingWindow=11
return analysis
if __name__=='__main__':
analysis=analysisWithDefaultCaFSettings()
analysis.dirPath='./trialData'
analysis.fileNameString='CaF04Oct1900'
a=analysis(requirement='Number',
fileNoStart=45,
fileNoStop=63,
fileNoBg=23,
param="Gigatronics Synthesizer 1 - Frequency (MHz)",
imagingType='Fluoresence',
trigType='double',
fit=True,
fitType='invSinc',
showFirstImage=False,
showSecondImage=False,
showSizeFitsFirstImage=False,
showSizeFitsSecondImage=False,
diffStr='C',
extParam='give a name',
extParamVals=[],
fileNoExclude=[],
figSizeImage=(15,20),
figSizePlot=(8,5),
xLabel='expansion time [ms]',
yLabel='Normalised No',
xScale=1,
yScale=1)
| mit |
tinkerinestudio/Tinkerine-Suite | TinkerineSuite/python/Lib/numpy/lib/function_base.py | 6 | 109722 | __docformat__ = "restructuredtext en"
__all__ = ['select', 'piecewise', 'trim_zeros', 'copy', 'iterable',
'percentile', 'diff', 'gradient', 'angle', 'unwrap', 'sort_complex',
'disp', 'extract', 'place', 'nansum', 'nanmax', 'nanargmax',
'nanargmin', 'nanmin', 'vectorize', 'asarray_chkfinite', 'average',
'histogram', 'histogramdd', 'bincount', 'digitize', 'cov', 'corrcoef',
'msort', 'median', 'sinc', 'hamming', 'hanning', 'bartlett',
'blackman', 'kaiser', 'trapz', 'i0', 'add_newdoc', 'add_docstring',
'meshgrid', 'delete', 'insert', 'append', 'interp']
import warnings
import types
import sys
import numpy.core.numeric as _nx
from numpy.core import linspace
from numpy.core.numeric import ones, zeros, arange, concatenate, array, \
asarray, asanyarray, empty, empty_like, ndarray, around
from numpy.core.numeric import ScalarType, dot, where, newaxis, intp, \
integer, isscalar
from numpy.core.umath import pi, multiply, add, arctan2, \
frompyfunc, isnan, cos, less_equal, sqrt, sin, mod, exp, log10
from numpy.core.fromnumeric import ravel, nonzero, choose, sort, mean
from numpy.core.numerictypes import typecodes, number
from numpy.core import atleast_1d, atleast_2d
from numpy.lib.twodim_base import diag
from _compiled_base import _insert, add_docstring
from _compiled_base import digitize, bincount, interp as compiled_interp
from arraysetops import setdiff1d
from utils import deprecate
import numpy as np
def iterable(y):
"""
Check whether or not an object can be iterated over.
Parameters
----------
y : object
Input object.
Returns
-------
b : {0, 1}
Return 1 if the object has an iterator method or is a sequence,
and 0 otherwise.
Examples
--------
>>> np.iterable([1, 2, 3])
1
>>> np.iterable(2)
0
"""
try: iter(y)
except: return 0
return 1
def histogram(a, bins=10, range=None, normed=False, weights=None, density=None):
"""
Compute the histogram of a set of data.
Parameters
----------
a : array_like
Input data. The histogram is computed over the flattened array.
bins : int or sequence of scalars, optional
If `bins` is an int, it defines the number of equal-width
bins in the given range (10, by default). If `bins` is a sequence,
it defines the bin edges, including the rightmost edge, allowing
for non-uniform bin widths.
range : (float, float), optional
The lower and upper range of the bins. If not provided, range
is simply ``(a.min(), a.max())``. Values outside the range are
ignored.
normed : bool, optional
This keyword is deprecated in Numpy 1.6 due to confusing/buggy
behavior. It will be removed in Numpy 2.0. Use the density keyword
instead.
If False, the result will contain the number of samples
in each bin. If True, the result is the value of the
probability *density* function at the bin, normalized such that
the *integral* over the range is 1. Note that this latter behavior is
known to be buggy with unequal bin widths; use `density` instead.
weights : array_like, optional
An array of weights, of the same shape as `a`. Each value in `a`
only contributes its associated weight towards the bin count
(instead of 1). If `normed` is True, the weights are normalized,
so that the integral of the density over the range remains 1
density : bool, optional
If False, the result will contain the number of samples
in each bin. If True, the result is the value of the
probability *density* function at the bin, normalized such that
the *integral* over the range is 1. Note that the sum of the
histogram values will not be equal to 1 unless bins of unity
width are chosen; it is not a probability *mass* function.
Overrides the `normed` keyword if given.
Returns
-------
hist : array
The values of the histogram. See `normed` and `weights` for a
description of the possible semantics.
bin_edges : array of dtype float
Return the bin edges ``(length(hist)+1)``.
See Also
--------
histogramdd, bincount, searchsorted, digitize
Notes
-----
All but the last (righthand-most) bin is half-open. In other words, if
`bins` is::
[1, 2, 3, 4]
then the first bin is ``[1, 2)`` (including 1, but excluding 2) and the
second ``[2, 3)``. The last bin, however, is ``[3, 4]``, which *includes*
4.
Examples
--------
>>> np.histogram([1, 2, 1], bins=[0, 1, 2, 3])
(array([0, 2, 1]), array([0, 1, 2, 3]))
>>> np.histogram(np.arange(4), bins=np.arange(5), density=True)
(array([ 0.25, 0.25, 0.25, 0.25]), array([0, 1, 2, 3, 4]))
>>> np.histogram([[1, 2, 1], [1, 0, 1]], bins=[0,1,2,3])
(array([1, 4, 1]), array([0, 1, 2, 3]))
>>> a = np.arange(5)
>>> hist, bin_edges = np.histogram(a, density=True)
>>> hist
array([ 0.5, 0. , 0.5, 0. , 0. , 0.5, 0. , 0.5, 0. , 0.5])
>>> hist.sum()
2.4999999999999996
>>> np.sum(hist*np.diff(bin_edges))
1.0
"""
a = asarray(a)
if weights is not None:
weights = asarray(weights)
if np.any(weights.shape != a.shape):
raise ValueError(
'weights should have the same shape as a.')
weights = weights.ravel()
a = a.ravel()
if (range is not None):
mn, mx = range
if (mn > mx):
raise AttributeError(
'max must be larger than min in range parameter.')
if not iterable(bins):
if np.isscalar(bins) and bins < 1:
raise ValueError("`bins` should be a positive integer.")
if range is None:
if a.size == 0:
# handle empty arrays. Can't determine range, so use 0-1.
range = (0, 1)
else:
range = (a.min(), a.max())
mn, mx = [mi+0.0 for mi in range]
if mn == mx:
mn -= 0.5
mx += 0.5
bins = linspace(mn, mx, bins+1, endpoint=True)
else:
bins = asarray(bins)
if (np.diff(bins) < 0).any():
raise AttributeError(
'bins must increase monotonically.')
# Histogram is an integer or a float array depending on the weights.
if weights is None:
ntype = int
else:
ntype = weights.dtype
n = np.zeros(bins.shape, ntype)
block = 65536
if weights is None:
for i in arange(0, len(a), block):
sa = sort(a[i:i+block])
n += np.r_[sa.searchsorted(bins[:-1], 'left'), \
sa.searchsorted(bins[-1], 'right')]
else:
zero = array(0, dtype=ntype)
for i in arange(0, len(a), block):
tmp_a = a[i:i+block]
tmp_w = weights[i:i+block]
sorting_index = np.argsort(tmp_a)
sa = tmp_a[sorting_index]
sw = tmp_w[sorting_index]
cw = np.concatenate(([zero,], sw.cumsum()))
bin_index = np.r_[sa.searchsorted(bins[:-1], 'left'), \
sa.searchsorted(bins[-1], 'right')]
n += cw[bin_index]
n = np.diff(n)
if density is not None:
if density:
db = array(np.diff(bins), float)
return n/db/n.sum(), bins
else:
return n, bins
else:
# deprecated, buggy behavior. Remove for Numpy 2.0
if normed:
db = array(np.diff(bins), float)
return n/(n*db).sum(), bins
else:
return n, bins
def histogramdd(sample, bins=10, range=None, normed=False, weights=None):
"""
Compute the multidimensional histogram of some data.
Parameters
----------
sample : array_like
The data to be histogrammed. It must be an (N,D) array or data
that can be converted to such. The rows of the resulting array
are the coordinates of points in a D dimensional polytope.
bins : sequence or int, optional
The bin specification:
* A sequence of arrays describing the bin edges along each dimension.
* The number of bins for each dimension (nx, ny, ... =bins)
* The number of bins for all dimensions (nx=ny=...=bins).
range : sequence, optional
A sequence of lower and upper bin edges to be used if the edges are
not given explicitely in `bins`. Defaults to the minimum and maximum
values along each dimension.
normed : bool, optional
If False, returns the number of samples in each bin. If True, returns
the bin density, ie, the bin count divided by the bin hypervolume.
weights : array_like (N,), optional
An array of values `w_i` weighing each sample `(x_i, y_i, z_i, ...)`.
Weights are normalized to 1 if normed is True. If normed is False, the
values of the returned histogram are equal to the sum of the weights
belonging to the samples falling into each bin.
Returns
-------
H : ndarray
The multidimensional histogram of sample x. See normed and weights for
the different possible semantics.
edges : list
A list of D arrays describing the bin edges for each dimension.
See Also
--------
histogram: 1-D histogram
histogram2d: 2-D histogram
Examples
--------
>>> r = np.random.randn(100,3)
>>> H, edges = np.histogramdd(r, bins = (5, 8, 4))
>>> H.shape, edges[0].size, edges[1].size, edges[2].size
((5, 8, 4), 6, 9, 5)
"""
try:
# Sample is an ND-array.
N, D = sample.shape
except (AttributeError, ValueError):
# Sample is a sequence of 1D arrays.
sample = atleast_2d(sample).T
N, D = sample.shape
nbin = empty(D, int)
edges = D*[None]
dedges = D*[None]
if weights is not None:
weights = asarray(weights)
try:
M = len(bins)
if M != D:
raise AttributeError(
'The dimension of bins must be equal'\
' to the dimension of the sample x.')
except TypeError:
# bins is an integer
bins = D*[bins]
# Select range for each dimension
# Used only if number of bins is given.
if range is None:
# Handle empty input. Range can't be determined in that case, use 0-1.
if N == 0:
smin = zeros(D)
smax = ones(D)
else:
smin = atleast_1d(array(sample.min(0), float))
smax = atleast_1d(array(sample.max(0), float))
else:
smin = zeros(D)
smax = zeros(D)
for i in arange(D):
smin[i], smax[i] = range[i]
# Make sure the bins have a finite width.
for i in arange(len(smin)):
if smin[i] == smax[i]:
smin[i] = smin[i] - .5
smax[i] = smax[i] + .5
# Create edge arrays
for i in arange(D):
if isscalar(bins[i]):
if bins[i] < 1:
raise ValueError("Element at index %s in `bins` should be "
"a positive integer." % i)
nbin[i] = bins[i] + 2 # +2 for outlier bins
edges[i] = linspace(smin[i], smax[i], nbin[i]-1)
else:
edges[i] = asarray(bins[i], float)
nbin[i] = len(edges[i])+1 # +1 for outlier bins
dedges[i] = diff(edges[i])
if np.any(np.asarray(dedges[i]) <= 0):
raise ValueError("""
Found bin edge of size <= 0. Did you specify `bins` with
non-monotonic sequence?""")
nbin = asarray(nbin)
# Handle empty input.
if N == 0:
return np.zeros(nbin-2), edges
# Compute the bin number each sample falls into.
Ncount = {}
for i in arange(D):
Ncount[i] = digitize(sample[:,i], edges[i])
# Using digitize, values that fall on an edge are put in the right bin.
# For the rightmost bin, we want values equal to the right
# edge to be counted in the last bin, and not as an outlier.
outliers = zeros(N, int)
for i in arange(D):
# Rounding precision
mindiff = dedges[i].min()
if not np.isinf(mindiff):
decimal = int(-log10(mindiff)) + 6
# Find which points are on the rightmost edge.
on_edge = where(around(sample[:,i], decimal) == around(edges[i][-1],
decimal))[0]
# Shift these points one bin to the left.
Ncount[i][on_edge] -= 1
# Flattened histogram matrix (1D)
# Reshape is used so that overlarge arrays
# will raise an error.
hist = zeros(nbin, float).reshape(-1)
# Compute the sample indices in the flattened histogram matrix.
ni = nbin.argsort()
shape = []
xy = zeros(N, int)
for i in arange(0, D-1):
xy += Ncount[ni[i]] * nbin[ni[i+1:]].prod()
xy += Ncount[ni[-1]]
# Compute the number of repetitions in xy and assign it to the
# flattened histmat.
if len(xy) == 0:
return zeros(nbin-2, int), edges
flatcount = bincount(xy, weights)
a = arange(len(flatcount))
hist[a] = flatcount
# Shape into a proper matrix
hist = hist.reshape(sort(nbin))
for i in arange(nbin.size):
j = ni.argsort()[i]
hist = hist.swapaxes(i,j)
ni[i],ni[j] = ni[j],ni[i]
# Remove outliers (indices 0 and -1 for each dimension).
core = D*[slice(1,-1)]
hist = hist[core]
# Normalize if normed is True
if normed:
s = hist.sum()
for i in arange(D):
shape = ones(D, int)
shape[i] = nbin[i] - 2
hist = hist / dedges[i].reshape(shape)
hist /= s
if (hist.shape != nbin - 2).any():
raise RuntimeError(
"Internal Shape Error")
return hist, edges
def average(a, axis=None, weights=None, returned=False):
"""
Compute the weighted average along the specified axis.
Parameters
----------
a : array_like
Array containing data to be averaged. If `a` is not an array, a
conversion is attempted.
axis : int, optional
Axis along which to average `a`. If `None`, averaging is done over
the flattened array.
weights : array_like, optional
An array of weights associated with the values in `a`. Each value in
`a` contributes to the average according to its associated weight.
The weights array can either be 1-D (in which case its length must be
the size of `a` along the given axis) or of the same shape as `a`.
If `weights=None`, then all data in `a` are assumed to have a
weight equal to one.
returned : bool, optional
Default is `False`. If `True`, the tuple (`average`, `sum_of_weights`)
is returned, otherwise only the average is returned.
If `weights=None`, `sum_of_weights` is equivalent to the number of
elements over which the average is taken.
Returns
-------
average, [sum_of_weights] : {array_type, double}
Return the average along the specified axis. When returned is `True`,
return a tuple with the average as the first element and the sum
of the weights as the second element. The return type is `Float`
if `a` is of integer type, otherwise it is of the same type as `a`.
`sum_of_weights` is of the same type as `average`.
Raises
------
ZeroDivisionError
When all weights along axis are zero. See `numpy.ma.average` for a
version robust to this type of error.
TypeError
When the length of 1D `weights` is not the same as the shape of `a`
along axis.
See Also
--------
mean
ma.average : average for masked arrays
Examples
--------
>>> data = range(1,5)
>>> data
[1, 2, 3, 4]
>>> np.average(data)
2.5
>>> np.average(range(1,11), weights=range(10,0,-1))
4.0
>>> data = np.arange(6).reshape((3,2))
>>> data
array([[0, 1],
[2, 3],
[4, 5]])
>>> np.average(data, axis=1, weights=[1./4, 3./4])
array([ 0.75, 2.75, 4.75])
>>> np.average(data, weights=[1./4, 3./4])
Traceback (most recent call last):
...
TypeError: Axis must be specified when shapes of a and weights differ.
"""
if not isinstance(a, np.matrix) :
a = np.asarray(a)
if weights is None :
avg = a.mean(axis)
scl = avg.dtype.type(a.size/avg.size)
else :
a = a + 0.0
wgt = np.array(weights, dtype=a.dtype, copy=0)
# Sanity checks
if a.shape != wgt.shape :
if axis is None :
raise TypeError(
"Axis must be specified when shapes of a "\
"and weights differ.")
if wgt.ndim != 1 :
raise TypeError(
"1D weights expected when shapes of a and "\
"weights differ.")
if wgt.shape[0] != a.shape[axis] :
raise ValueError(
"Length of weights not compatible with "\
"specified axis.")
# setup wgt to broadcast along axis
wgt = np.array(wgt, copy=0, ndmin=a.ndim).swapaxes(-1, axis)
scl = wgt.sum(axis=axis)
if (scl == 0.0).any():
raise ZeroDivisionError(
"Weights sum to zero, can't be normalized")
avg = np.multiply(a, wgt).sum(axis)/scl
if returned:
scl = np.multiply(avg, 0) + scl
return avg, scl
else:
return avg
def asarray_chkfinite(a):
"""
Convert the input to an array, checking for NaNs or Infs.
Parameters
----------
a : array_like
Input data, in any form that can be converted to an array. This
includes lists, lists of tuples, tuples, tuples of tuples, tuples
of lists and ndarrays. Success requires no NaNs or Infs.
dtype : data-type, optional
By default, the data-type is inferred from the input data.
order : {'C', 'F'}, optional
Whether to use row-major ('C') or column-major ('FORTRAN') memory
representation. Defaults to 'C'.
Returns
-------
out : ndarray
Array interpretation of `a`. No copy is performed if the input
is already an ndarray. If `a` is a subclass of ndarray, a base
class ndarray is returned.
Raises
------
ValueError
Raises ValueError if `a` contains NaN (Not a Number) or Inf (Infinity).
See Also
--------
asarray : Create and array.
asanyarray : Similar function which passes through subclasses.
ascontiguousarray : Convert input to a contiguous array.
asfarray : Convert input to a floating point ndarray.
asfortranarray : Convert input to an ndarray with column-major
memory order.
fromiter : Create an array from an iterator.
fromfunction : Construct an array by executing a function on grid
positions.
Examples
--------
Convert a list into an array. If all elements are finite
``asarray_chkfinite`` is identical to ``asarray``.
>>> a = [1, 2]
>>> np.asarray_chkfinite(a)
array([1, 2])
Raises ValueError if array_like contains Nans or Infs.
>>> a = [1, 2, np.inf]
>>> try:
... np.asarray_chkfinite(a)
... except ValueError:
... print 'ValueError'
...
ValueError
"""
a = asarray(a)
if (a.dtype.char in typecodes['AllFloat']) \
and (_nx.isnan(a).any() or _nx.isinf(a).any()):
raise ValueError(
"array must not contain infs or NaNs")
return a
def piecewise(x, condlist, funclist, *args, **kw):
"""
Evaluate a piecewise-defined function.
Given a set of conditions and corresponding functions, evaluate each
function on the input data wherever its condition is true.
Parameters
----------
x : ndarray
The input domain.
condlist : list of bool arrays
Each boolean array corresponds to a function in `funclist`. Wherever
`condlist[i]` is True, `funclist[i](x)` is used as the output value.
Each boolean array in `condlist` selects a piece of `x`,
and should therefore be of the same shape as `x`.
The length of `condlist` must correspond to that of `funclist`.
If one extra function is given, i.e. if
``len(funclist) - len(condlist) == 1``, then that extra function
is the default value, used wherever all conditions are false.
funclist : list of callables, f(x,*args,**kw), or scalars
Each function is evaluated over `x` wherever its corresponding
condition is True. It should take an array as input and give an array
or a scalar value as output. If, instead of a callable,
a scalar is provided then a constant function (``lambda x: scalar``) is
assumed.
args : tuple, optional
Any further arguments given to `piecewise` are passed to the functions
upon execution, i.e., if called ``piecewise(..., ..., 1, 'a')``, then
each function is called as ``f(x, 1, 'a')``.
kw : dict, optional
Keyword arguments used in calling `piecewise` are passed to the
functions upon execution, i.e., if called
``piecewise(..., ..., lambda=1)``, then each function is called as
``f(x, lambda=1)``.
Returns
-------
out : ndarray
The output is the same shape and type as x and is found by
calling the functions in `funclist` on the appropriate portions of `x`,
as defined by the boolean arrays in `condlist`. Portions not covered
by any condition have undefined values.
See Also
--------
choose, select, where
Notes
-----
This is similar to choose or select, except that functions are
evaluated on elements of `x` that satisfy the corresponding condition from
`condlist`.
The result is::
|--
|funclist[0](x[condlist[0]])
out = |funclist[1](x[condlist[1]])
|...
|funclist[n2](x[condlist[n2]])
|--
Examples
--------
Define the sigma function, which is -1 for ``x < 0`` and +1 for ``x >= 0``.
>>> x = np.arange(6) - 2.5
>>> np.piecewise(x, [x < 0, x >= 0], [-1, 1])
array([-1., -1., -1., 1., 1., 1.])
Define the absolute value, which is ``-x`` for ``x <0`` and ``x`` for
``x >= 0``.
>>> np.piecewise(x, [x < 0, x >= 0], [lambda x: -x, lambda x: x])
array([ 2.5, 1.5, 0.5, 0.5, 1.5, 2.5])
"""
x = asanyarray(x)
n2 = len(funclist)
if isscalar(condlist) or \
not (isinstance(condlist[0], list) or
isinstance(condlist[0], ndarray)):
condlist = [condlist]
condlist = [asarray(c, dtype=bool) for c in condlist]
n = len(condlist)
if n == n2-1: # compute the "otherwise" condition.
totlist = condlist[0]
for k in range(1, n):
totlist |= condlist[k]
condlist.append(~totlist)
n += 1
if (n != n2):
raise ValueError(
"function list and condition list must be the same")
zerod = False
# This is a hack to work around problems with NumPy's
# handling of 0-d arrays and boolean indexing with
# numpy.bool_ scalars
if x.ndim == 0:
x = x[None]
zerod = True
newcondlist = []
for k in range(n):
if condlist[k].ndim == 0:
condition = condlist[k][None]
else:
condition = condlist[k]
newcondlist.append(condition)
condlist = newcondlist
y = zeros(x.shape, x.dtype)
for k in range(n):
item = funclist[k]
if not callable(item):
y[condlist[k]] = item
else:
vals = x[condlist[k]]
if vals.size > 0:
y[condlist[k]] = item(vals, *args, **kw)
if zerod:
y = y.squeeze()
return y
def select(condlist, choicelist, default=0):
"""
Return an array drawn from elements in choicelist, depending on conditions.
Parameters
----------
condlist : list of bool ndarrays
The list of conditions which determine from which array in `choicelist`
the output elements are taken. When multiple conditions are satisfied,
the first one encountered in `condlist` is used.
choicelist : list of ndarrays
The list of arrays from which the output elements are taken. It has
to be of the same length as `condlist`.
default : scalar, optional
The element inserted in `output` when all conditions evaluate to False.
Returns
-------
output : ndarray
The output at position m is the m-th element of the array in
`choicelist` where the m-th element of the corresponding array in
`condlist` is True.
See Also
--------
where : Return elements from one of two arrays depending on condition.
take, choose, compress, diag, diagonal
Examples
--------
>>> x = np.arange(10)
>>> condlist = [x<3, x>5]
>>> choicelist = [x, x**2]
>>> np.select(condlist, choicelist)
array([ 0, 1, 2, 0, 0, 0, 36, 49, 64, 81])
"""
n = len(condlist)
n2 = len(choicelist)
if n2 != n:
raise ValueError(
"list of cases must be same length as list of conditions")
choicelist = [default] + choicelist
S = 0
pfac = 1
for k in range(1, n+1):
S += k * pfac * asarray(condlist[k-1])
if k < n:
pfac *= (1-asarray(condlist[k-1]))
# handle special case of a 1-element condition but
# a multi-element choice
if type(S) in ScalarType or max(asarray(S).shape)==1:
pfac = asarray(1)
for k in range(n2+1):
pfac = pfac + asarray(choicelist[k])
if type(S) in ScalarType:
S = S*ones(asarray(pfac).shape, type(S))
else:
S = S*ones(asarray(pfac).shape, S.dtype)
return choose(S, tuple(choicelist))
def copy(a):
"""
Return an array copy of the given object.
Parameters
----------
a : array_like
Input data.
Returns
-------
arr : ndarray
Array interpretation of `a`.
Notes
-----
This is equivalent to
>>> np.array(a, copy=True) #doctest: +SKIP
Examples
--------
Create an array x, with a reference y and a copy z:
>>> x = np.array([1, 2, 3])
>>> y = x
>>> z = np.copy(x)
Note that, when we modify x, y changes, but not z:
>>> x[0] = 10
>>> x[0] == y[0]
True
>>> x[0] == z[0]
False
"""
return array(a, copy=True)
# Basic operations
def gradient(f, *varargs):
"""
Return the gradient of an N-dimensional array.
The gradient is computed using central differences in the interior
and first differences at the boundaries. The returned gradient hence has
the same shape as the input array.
Parameters
----------
f : array_like
An N-dimensional array containing samples of a scalar function.
`*varargs` : scalars
0, 1, or N scalars specifying the sample distances in each direction,
that is: `dx`, `dy`, `dz`, ... The default distance is 1.
Returns
-------
g : ndarray
N arrays of the same shape as `f` giving the derivative of `f` with
respect to each dimension.
Examples
--------
>>> x = np.array([1, 2, 4, 7, 11, 16], dtype=np.float)
>>> np.gradient(x)
array([ 1. , 1.5, 2.5, 3.5, 4.5, 5. ])
>>> np.gradient(x, 2)
array([ 0.5 , 0.75, 1.25, 1.75, 2.25, 2.5 ])
>>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=np.float))
[array([[ 2., 2., -1.],
[ 2., 2., -1.]]),
array([[ 1. , 2.5, 4. ],
[ 1. , 1. , 1. ]])]
"""
N = len(f.shape) # number of dimensions
n = len(varargs)
if n == 0:
dx = [1.0]*N
elif n == 1:
dx = [varargs[0]]*N
elif n == N:
dx = list(varargs)
else:
raise SyntaxError(
"invalid number of arguments")
# use central differences on interior and first differences on endpoints
outvals = []
# create slice objects --- initially all are [:, :, ..., :]
slice1 = [slice(None)]*N
slice2 = [slice(None)]*N
slice3 = [slice(None)]*N
otype = f.dtype.char
if otype not in ['f', 'd', 'F', 'D']:
otype = 'd'
for axis in range(N):
# select out appropriate parts for this dimension
out = np.zeros_like(f).astype(otype)
slice1[axis] = slice(1, -1)
slice2[axis] = slice(2, None)
slice3[axis] = slice(None, -2)
# 1D equivalent -- out[1:-1] = (f[2:] - f[:-2])/2.0
out[slice1] = (f[slice2] - f[slice3])/2.0
slice1[axis] = 0
slice2[axis] = 1
slice3[axis] = 0
# 1D equivalent -- out[0] = (f[1] - f[0])
out[slice1] = (f[slice2] - f[slice3])
slice1[axis] = -1
slice2[axis] = -1
slice3[axis] = -2
# 1D equivalent -- out[-1] = (f[-1] - f[-2])
out[slice1] = (f[slice2] - f[slice3])
# divide by step size
outvals.append(out / dx[axis])
# reset the slice object in this dimension to ":"
slice1[axis] = slice(None)
slice2[axis] = slice(None)
slice3[axis] = slice(None)
if N == 1:
return outvals[0]
else:
return outvals
def diff(a, n=1, axis=-1):
"""
Calculate the n-th order discrete difference along given axis.
The first order difference is given by ``out[n] = a[n+1] - a[n]`` along
the given axis, higher order differences are calculated by using `diff`
recursively.
Parameters
----------
a : array_like
Input array
n : int, optional
The number of times values are differenced.
axis : int, optional
The axis along which the difference is taken, default is the last axis.
Returns
-------
out : ndarray
The `n` order differences. The shape of the output is the same as `a`
except along `axis` where the dimension is smaller by `n`.
See Also
--------
gradient, ediff1d
Examples
--------
>>> x = np.array([1, 2, 4, 7, 0])
>>> np.diff(x)
array([ 1, 2, 3, -7])
>>> np.diff(x, n=2)
array([ 1, 1, -10])
>>> x = np.array([[1, 3, 6, 10], [0, 5, 6, 8]])
>>> np.diff(x)
array([[2, 3, 4],
[5, 1, 2]])
>>> np.diff(x, axis=0)
array([[-1, 2, 0, -2]])
"""
if n == 0:
return a
if n < 0:
raise ValueError(
"order must be non-negative but got " + repr(n))
a = asanyarray(a)
nd = len(a.shape)
slice1 = [slice(None)]*nd
slice2 = [slice(None)]*nd
slice1[axis] = slice(1, None)
slice2[axis] = slice(None, -1)
slice1 = tuple(slice1)
slice2 = tuple(slice2)
if n > 1:
return diff(a[slice1]-a[slice2], n-1, axis=axis)
else:
return a[slice1]-a[slice2]
def interp(x, xp, fp, left=None, right=None):
"""
One-dimensional linear interpolation.
Returns the one-dimensional piecewise linear interpolant to a function
with given values at discrete data-points.
Parameters
----------
x : array_like
The x-coordinates of the interpolated values.
xp : 1-D sequence of floats
The x-coordinates of the data points, must be increasing.
fp : 1-D sequence of floats
The y-coordinates of the data points, same length as `xp`.
left : float, optional
Value to return for `x < xp[0]`, default is `fp[0]`.
right : float, optional
Value to return for `x > xp[-1]`, defaults is `fp[-1]`.
Returns
-------
y : {float, ndarray}
The interpolated values, same shape as `x`.
Raises
------
ValueError
If `xp` and `fp` have different length
Notes
-----
Does not check that the x-coordinate sequence `xp` is increasing.
If `xp` is not increasing, the results are nonsense.
A simple check for increasingness is::
np.all(np.diff(xp) > 0)
Examples
--------
>>> xp = [1, 2, 3]
>>> fp = [3, 2, 0]
>>> np.interp(2.5, xp, fp)
1.0
>>> np.interp([0, 1, 1.5, 2.72, 3.14], xp, fp)
array([ 3. , 3. , 2.5 , 0.56, 0. ])
>>> UNDEF = -99.0
>>> np.interp(3.14, xp, fp, right=UNDEF)
-99.0
Plot an interpolant to the sine function:
>>> x = np.linspace(0, 2*np.pi, 10)
>>> y = np.sin(x)
>>> xvals = np.linspace(0, 2*np.pi, 50)
>>> yinterp = np.interp(xvals, x, y)
>>> import matplotlib.pyplot as plt
>>> plt.plot(x, y, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.plot(xvals, yinterp, '-x')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.show()
"""
if isinstance(x, (float, int, number)):
return compiled_interp([x], xp, fp, left, right).item()
elif isinstance(x, np.ndarray) and x.ndim == 0:
return compiled_interp([x], xp, fp, left, right).item()
else:
return compiled_interp(x, xp, fp, left, right)
def angle(z, deg=0):
"""
Return the angle of the complex argument.
Parameters
----------
z : array_like
A complex number or sequence of complex numbers.
deg : bool, optional
Return angle in degrees if True, radians if False (default).
Returns
-------
angle : {ndarray, scalar}
The counterclockwise angle from the positive real axis on
the complex plane, with dtype as numpy.float64.
See Also
--------
arctan2
absolute
Examples
--------
>>> np.angle([1.0, 1.0j, 1+1j]) # in radians
array([ 0. , 1.57079633, 0.78539816])
>>> np.angle(1+1j, deg=True) # in degrees
45.0
"""
if deg:
fact = 180/pi
else:
fact = 1.0
z = asarray(z)
if (issubclass(z.dtype.type, _nx.complexfloating)):
zimag = z.imag
zreal = z.real
else:
zimag = 0
zreal = z
return arctan2(zimag, zreal) * fact
def unwrap(p, discont=pi, axis=-1):
"""
Unwrap by changing deltas between values to 2*pi complement.
Unwrap radian phase `p` by changing absolute jumps greater than
`discont` to their 2*pi complement along the given axis.
Parameters
----------
p : array_like
Input array.
discont : float, optional
Maximum discontinuity between values, default is ``pi``.
axis : int, optional
Axis along which unwrap will operate, default is the last axis.
Returns
-------
out : ndarray
Output array.
See Also
--------
rad2deg, deg2rad
Notes
-----
If the discontinuity in `p` is smaller than ``pi``, but larger than
`discont`, no unwrapping is done because taking the 2*pi complement
would only make the discontinuity larger.
Examples
--------
>>> phase = np.linspace(0, np.pi, num=5)
>>> phase[3:] += np.pi
>>> phase
array([ 0. , 0.78539816, 1.57079633, 5.49778714, 6.28318531])
>>> np.unwrap(phase)
array([ 0. , 0.78539816, 1.57079633, -0.78539816, 0. ])
"""
p = asarray(p)
nd = len(p.shape)
dd = diff(p, axis=axis)
slice1 = [slice(None, None)]*nd # full slices
slice1[axis] = slice(1, None)
ddmod = mod(dd+pi, 2*pi)-pi
_nx.putmask(ddmod, (ddmod==-pi) & (dd > 0), pi)
ph_correct = ddmod - dd;
_nx.putmask(ph_correct, abs(dd)<discont, 0)
up = array(p, copy=True, dtype='d')
up[slice1] = p[slice1] + ph_correct.cumsum(axis)
return up
def sort_complex(a):
"""
Sort a complex array using the real part first, then the imaginary part.
Parameters
----------
a : array_like
Input array
Returns
-------
out : complex ndarray
Always returns a sorted complex array.
Examples
--------
>>> np.sort_complex([5, 3, 6, 2, 1])
array([ 1.+0.j, 2.+0.j, 3.+0.j, 5.+0.j, 6.+0.j])
>>> np.sort_complex([1 + 2j, 2 - 1j, 3 - 2j, 3 - 3j, 3 + 5j])
array([ 1.+2.j, 2.-1.j, 3.-3.j, 3.-2.j, 3.+5.j])
"""
b = array(a,copy=True)
b.sort()
if not issubclass(b.dtype.type, _nx.complexfloating):
if b.dtype.char in 'bhBH':
return b.astype('F')
elif b.dtype.char == 'g':
return b.astype('G')
else:
return b.astype('D')
else:
return b
def trim_zeros(filt, trim='fb'):
"""
Trim the leading and/or trailing zeros from a 1-D array or sequence.
Parameters
----------
filt : 1-D array or sequence
Input array.
trim : str, optional
A string with 'f' representing trim from front and 'b' to trim from
back. Default is 'fb', trim zeros from both front and back of the
array.
Returns
-------
trimmed : 1-D array or sequence
The result of trimming the input. The input data type is preserved.
Examples
--------
>>> a = np.array((0, 0, 0, 1, 2, 3, 0, 2, 1, 0))
>>> np.trim_zeros(a)
array([1, 2, 3, 0, 2, 1])
>>> np.trim_zeros(a, 'b')
array([0, 0, 0, 1, 2, 3, 0, 2, 1])
The input data type is preserved, list/tuple in means list/tuple out.
>>> np.trim_zeros([0, 1, 2, 0])
[1, 2]
"""
first = 0
trim = trim.upper()
if 'F' in trim:
for i in filt:
if i != 0.: break
else: first = first + 1
last = len(filt)
if 'B' in trim:
for i in filt[::-1]:
if i != 0.: break
else: last = last - 1
return filt[first:last]
import sys
if sys.hexversion < 0x2040000:
from sets import Set as set
@deprecate
def unique(x):
"""
This function is deprecated. Use numpy.lib.arraysetops.unique()
instead.
"""
try:
tmp = x.flatten()
if tmp.size == 0:
return tmp
tmp.sort()
idx = concatenate(([True],tmp[1:]!=tmp[:-1]))
return tmp[idx]
except AttributeError:
items = list(set(x))
items.sort()
return asarray(items)
def extract(condition, arr):
"""
Return the elements of an array that satisfy some condition.
This is equivalent to ``np.compress(ravel(condition), ravel(arr))``. If
`condition` is boolean ``np.extract`` is equivalent to ``arr[condition]``.
Parameters
----------
condition : array_like
An array whose nonzero or True entries indicate the elements of `arr`
to extract.
arr : array_like
Input array of the same size as `condition`.
See Also
--------
take, put, putmask, compress
Examples
--------
>>> arr = np.arange(12).reshape((3, 4))
>>> arr
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> condition = np.mod(arr, 3)==0
>>> condition
array([[ True, False, False, True],
[False, False, True, False],
[False, True, False, False]], dtype=bool)
>>> np.extract(condition, arr)
array([0, 3, 6, 9])
If `condition` is boolean:
>>> arr[condition]
array([0, 3, 6, 9])
"""
return _nx.take(ravel(arr), nonzero(ravel(condition))[0])
def place(arr, mask, vals):
"""
Change elements of an array based on conditional and input values.
Similar to ``np.putmask(arr, mask, vals)``, the difference is that `place`
uses the first N elements of `vals`, where N is the number of True values
in `mask`, while `putmask` uses the elements where `mask` is True.
Note that `extract` does the exact opposite of `place`.
Parameters
----------
arr : array_like
Array to put data into.
mask : array_like
Boolean mask array. Must have the same size as `a`.
vals : 1-D sequence
Values to put into `a`. Only the first N elements are used, where
N is the number of True values in `mask`. If `vals` is smaller
than N it will be repeated.
See Also
--------
putmask, put, take, extract
Examples
--------
>>> arr = np.arange(6).reshape(2, 3)
>>> np.place(arr, arr>2, [44, 55])
>>> arr
array([[ 0, 1, 2],
[44, 55, 44]])
"""
return _insert(arr, mask, vals)
def _nanop(op, fill, a, axis=None):
"""
General operation on arrays with not-a-number values.
Parameters
----------
op : callable
Operation to perform.
fill : float
NaN values are set to fill before doing the operation.
a : array-like
Input array.
axis : {int, None}, optional
Axis along which the operation is computed.
By default the input is flattened.
Returns
-------
y : {ndarray, scalar}
Processed data.
"""
y = array(a, subok=True)
# We only need to take care of NaN's in floating point arrays
if np.issubdtype(y.dtype, np.integer):
return op(y, axis=axis)
mask = isnan(a)
# y[mask] = fill
# We can't use fancy indexing here as it'll mess w/ MaskedArrays
# Instead, let's fill the array directly...
np.putmask(y, mask, fill)
res = op(y, axis=axis)
mask_all_along_axis = mask.all(axis=axis)
# Along some axes, only nan's were encountered. As such, any values
# calculated along that axis should be set to nan.
if mask_all_along_axis.any():
if np.isscalar(res):
res = np.nan
else:
res[mask_all_along_axis] = np.nan
return res
def nansum(a, axis=None):
"""
Return the sum of array elements over a given axis treating
Not a Numbers (NaNs) as zero.
Parameters
----------
a : array_like
Array containing numbers whose sum is desired. If `a` is not an
array, a conversion is attempted.
axis : int, optional
Axis along which the sum is computed. The default is to compute
the sum of the flattened array.
Returns
-------
y : ndarray
An array with the same shape as a, with the specified axis removed.
If a is a 0-d array, or if axis is None, a scalar is returned with
the same dtype as `a`.
See Also
--------
numpy.sum : Sum across array including Not a Numbers.
isnan : Shows which elements are Not a Number (NaN).
isfinite: Shows which elements are not: Not a Number, positive and
negative infinity
Notes
-----
Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
If positive or negative infinity are present the result is positive or
negative infinity. But if both positive and negative infinity are present,
the result is Not A Number (NaN).
Arithmetic is modular when using integer types (all elements of `a` must
be finite i.e. no elements that are NaNs, positive infinity and negative
infinity because NaNs are floating point types), and no error is raised
on overflow.
Examples
--------
>>> np.nansum(1)
1
>>> np.nansum([1])
1
>>> np.nansum([1, np.nan])
1.0
>>> a = np.array([[1, 1], [1, np.nan]])
>>> np.nansum(a)
3.0
>>> np.nansum(a, axis=0)
array([ 2., 1.])
When positive infinity and negative infinity are present
>>> np.nansum([1, np.nan, np.inf])
inf
>>> np.nansum([1, np.nan, np.NINF])
-inf
>>> np.nansum([1, np.nan, np.inf, np.NINF])
nan
"""
return _nanop(np.sum, 0, a, axis)
def nanmin(a, axis=None):
"""
Return the minimum of an array or minimum along an axis ignoring any NaNs.
Parameters
----------
a : array_like
Array containing numbers whose minimum is desired.
axis : int, optional
Axis along which the minimum is computed.The default is to compute
the minimum of the flattened array.
Returns
-------
nanmin : ndarray
A new array or a scalar array with the result.
See Also
--------
numpy.amin : Minimum across array including any Not a Numbers.
numpy.nanmax : Maximum across array ignoring any Not a Numbers.
isnan : Shows which elements are Not a Number (NaN).
isfinite: Shows which elements are not: Not a Number, positive and
negative infinity
Notes
-----
Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Positive infinity is treated as a very large number and negative infinity
is treated as a very small (i.e. negative) number.
If the input has a integer type the function is equivalent to np.min.
Examples
--------
>>> a = np.array([[1, 2], [3, np.nan]])
>>> np.nanmin(a)
1.0
>>> np.nanmin(a, axis=0)
array([ 1., 2.])
>>> np.nanmin(a, axis=1)
array([ 1., 3.])
When positive infinity and negative infinity are present:
>>> np.nanmin([1, 2, np.nan, np.inf])
1.0
>>> np.nanmin([1, 2, np.nan, np.NINF])
-inf
"""
a = np.asanyarray(a)
if axis is not None:
return np.fmin.reduce(a, axis)
else:
return np.fmin.reduce(a.flat)
def nanargmin(a, axis=None):
"""
Return indices of the minimum values over an axis, ignoring NaNs.
Parameters
----------
a : array_like
Input data.
axis : int, optional
Axis along which to operate. By default flattened input is used.
Returns
-------
index_array : ndarray
An array of indices or a single index value.
See Also
--------
argmin, nanargmax
Examples
--------
>>> a = np.array([[np.nan, 4], [2, 3]])
>>> np.argmin(a)
0
>>> np.nanargmin(a)
2
>>> np.nanargmin(a, axis=0)
array([1, 1])
>>> np.nanargmin(a, axis=1)
array([1, 0])
"""
return _nanop(np.argmin, np.inf, a, axis)
def nanmax(a, axis=None):
"""
Return the maximum of an array or maximum along an axis ignoring any NaNs.
Parameters
----------
a : array_like
Array containing numbers whose maximum is desired. If `a` is not
an array, a conversion is attempted.
axis : int, optional
Axis along which the maximum is computed. The default is to compute
the maximum of the flattened array.
Returns
-------
nanmax : ndarray
An array with the same shape as `a`, with the specified axis removed.
If `a` is a 0-d array, or if axis is None, a ndarray scalar is
returned. The the same dtype as `a` is returned.
See Also
--------
numpy.amax : Maximum across array including any Not a Numbers.
numpy.nanmin : Minimum across array ignoring any Not a Numbers.
isnan : Shows which elements are Not a Number (NaN).
isfinite: Shows which elements are not: Not a Number, positive and
negative infinity
Notes
-----
Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Positive infinity is treated as a very large number and negative infinity
is treated as a very small (i.e. negative) number.
If the input has a integer type the function is equivalent to np.max.
Examples
--------
>>> a = np.array([[1, 2], [3, np.nan]])
>>> np.nanmax(a)
3.0
>>> np.nanmax(a, axis=0)
array([ 3., 2.])
>>> np.nanmax(a, axis=1)
array([ 2., 3.])
When positive infinity and negative infinity are present:
>>> np.nanmax([1, 2, np.nan, np.NINF])
2.0
>>> np.nanmax([1, 2, np.nan, np.inf])
inf
"""
a = np.asanyarray(a)
if axis is not None:
return np.fmax.reduce(a, axis)
else:
return np.fmax.reduce(a.flat)
def nanargmax(a, axis=None):
"""
Return indices of the maximum values over an axis, ignoring NaNs.
Parameters
----------
a : array_like
Input data.
axis : int, optional
Axis along which to operate. By default flattened input is used.
Returns
-------
index_array : ndarray
An array of indices or a single index value.
See Also
--------
argmax, nanargmin
Examples
--------
>>> a = np.array([[np.nan, 4], [2, 3]])
>>> np.argmax(a)
0
>>> np.nanargmax(a)
1
>>> np.nanargmax(a, axis=0)
array([1, 0])
>>> np.nanargmax(a, axis=1)
array([1, 1])
"""
return _nanop(np.argmax, -np.inf, a, axis)
def disp(mesg, device=None, linefeed=True):
"""
Display a message on a device.
Parameters
----------
mesg : str
Message to display.
device : object
Device to write message. If None, defaults to ``sys.stdout`` which is
very similar to ``print``. `device` needs to have ``write()`` and
``flush()`` methods.
linefeed : bool, optional
Option whether to print a line feed or not. Defaults to True.
Raises
------
AttributeError
If `device` does not have a ``write()`` or ``flush()`` method.
Examples
--------
Besides ``sys.stdout``, a file-like object can also be used as it has
both required methods:
>>> from StringIO import StringIO
>>> buf = StringIO()
>>> np.disp('"Display" in a file', device=buf)
>>> buf.getvalue()
'"Display" in a file\\n'
"""
if device is None:
import sys
device = sys.stdout
if linefeed:
device.write('%s\n' % mesg)
else:
device.write('%s' % mesg)
device.flush()
return
# return number of input arguments and
# number of default arguments
def _get_nargs(obj):
import re
terr = re.compile(r'.*? takes (exactly|at least) (?P<exargs>(\d+)|(\w+))' +
r' argument(s|) \((?P<gargs>(\d+)|(\w+)) given\)')
def _convert_to_int(strval):
try:
result = int(strval)
except ValueError:
if strval=='zero':
result = 0
elif strval=='one':
result = 1
elif strval=='two':
result = 2
# How high to go? English only?
else:
raise
return result
if not callable(obj):
raise TypeError(
"Object is not callable.")
if sys.version_info[0] >= 3:
# inspect currently fails for binary extensions
# like math.cos. So fall back to other methods if
# it fails.
import inspect
try:
spec = inspect.getargspec(obj)
nargs = len(spec.args)
if spec.defaults:
ndefaults = len(spec.defaults)
else:
ndefaults = 0
if inspect.ismethod(obj):
nargs -= 1
return nargs, ndefaults
except:
pass
if hasattr(obj,'func_code'):
fcode = obj.func_code
nargs = fcode.co_argcount
if obj.func_defaults is not None:
ndefaults = len(obj.func_defaults)
else:
ndefaults = 0
if isinstance(obj, types.MethodType):
nargs -= 1
return nargs, ndefaults
try:
obj()
return 0, 0
except TypeError, msg:
m = terr.match(str(msg))
if m:
nargs = _convert_to_int(m.group('exargs'))
ndefaults = _convert_to_int(m.group('gargs'))
if isinstance(obj, types.MethodType):
nargs -= 1
return nargs, ndefaults
raise ValueError(
"failed to determine the number of arguments for %s" % (obj))
class vectorize(object):
"""
vectorize(pyfunc, otypes='', doc=None)
Generalized function class.
Define a vectorized function which takes a nested sequence
of objects or numpy arrays as inputs and returns a
numpy array as output. The vectorized function evaluates `pyfunc` over
successive tuples of the input arrays like the python map function,
except it uses the broadcasting rules of numpy.
The data type of the output of `vectorized` is determined by calling
the function with the first element of the input. This can be avoided
by specifying the `otypes` argument.
Parameters
----------
pyfunc : callable
A python function or method.
otypes : str or list of dtypes, optional
The output data type. It must be specified as either a string of
typecode characters or a list of data type specifiers. There should
be one data type specifier for each output.
doc : str, optional
The docstring for the function. If None, the docstring will be the
`pyfunc` one.
Examples
--------
>>> def myfunc(a, b):
... \"\"\"Return a-b if a>b, otherwise return a+b\"\"\"
... if a > b:
... return a - b
... else:
... return a + b
>>> vfunc = np.vectorize(myfunc)
>>> vfunc([1, 2, 3, 4], 2)
array([3, 4, 1, 2])
The docstring is taken from the input function to `vectorize` unless it
is specified
>>> vfunc.__doc__
'Return a-b if a>b, otherwise return a+b'
>>> vfunc = np.vectorize(myfunc, doc='Vectorized `myfunc`')
>>> vfunc.__doc__
'Vectorized `myfunc`'
The output type is determined by evaluating the first element of the input,
unless it is specified
>>> out = vfunc([1, 2, 3, 4], 2)
>>> type(out[0])
<type 'numpy.int32'>
>>> vfunc = np.vectorize(myfunc, otypes=[np.float])
>>> out = vfunc([1, 2, 3, 4], 2)
>>> type(out[0])
<type 'numpy.float64'>
"""
def __init__(self, pyfunc, otypes='', doc=None):
self.thefunc = pyfunc
self.ufunc = None
nin, ndefault = _get_nargs(pyfunc)
if nin == 0 and ndefault == 0:
self.nin = None
self.nin_wo_defaults = None
else:
self.nin = nin
self.nin_wo_defaults = nin - ndefault
self.nout = None
if doc is None:
self.__doc__ = pyfunc.__doc__
else:
self.__doc__ = doc
if isinstance(otypes, str):
self.otypes = otypes
for char in self.otypes:
if char not in typecodes['All']:
raise ValueError(
"invalid otype specified")
elif iterable(otypes):
self.otypes = ''.join([_nx.dtype(x).char for x in otypes])
else:
raise ValueError(
"Invalid otype specification")
self.lastcallargs = 0
def __call__(self, *args):
# get number of outputs and output types by calling
# the function on the first entries of args
nargs = len(args)
if self.nin:
if (nargs > self.nin) or (nargs < self.nin_wo_defaults):
raise ValueError(
"Invalid number of arguments")
# we need a new ufunc if this is being called with more arguments.
if (self.lastcallargs != nargs):
self.lastcallargs = nargs
self.ufunc = None
self.nout = None
if self.nout is None or self.otypes == '':
newargs = []
for arg in args:
newargs.append(asarray(arg).flat[0])
theout = self.thefunc(*newargs)
if isinstance(theout, tuple):
self.nout = len(theout)
else:
self.nout = 1
theout = (theout,)
if self.otypes == '':
otypes = []
for k in range(self.nout):
otypes.append(asarray(theout[k]).dtype.char)
self.otypes = ''.join(otypes)
# Create ufunc if not already created
if (self.ufunc is None):
self.ufunc = frompyfunc(self.thefunc, nargs, self.nout)
# Convert to object arrays first
newargs = [array(arg,copy=False,subok=True,dtype=object) for arg in args]
if self.nout == 1:
_res = array(self.ufunc(*newargs),copy=False,
subok=True,dtype=self.otypes[0])
else:
_res = tuple([array(x,copy=False,subok=True,dtype=c) \
for x, c in zip(self.ufunc(*newargs), self.otypes)])
return _res
def cov(m, y=None, rowvar=1, bias=0, ddof=None):
"""
Estimate a covariance matrix, given data.
Covariance indicates the level to which two variables vary together.
If we examine N-dimensional samples, :math:`X = [x_1, x_2, ... x_N]^T`,
then the covariance matrix element :math:`C_{ij}` is the covariance of
:math:`x_i` and :math:`x_j`. The element :math:`C_{ii}` is the variance
of :math:`x_i`.
Parameters
----------
m : array_like
A 1-D or 2-D array containing multiple variables and observations.
Each row of `m` represents a variable, and each column a single
observation of all those variables. Also see `rowvar` below.
y : array_like, optional
An additional set of variables and observations. `y` has the same
form as that of `m`.
rowvar : int, optional
If `rowvar` is non-zero (default), then each row represents a
variable, with observations in the columns. Otherwise, the relationship
is transposed: each column represents a variable, while the rows
contain observations.
bias : int, optional
Default normalization is by ``(N - 1)``, where ``N`` is the number of
observations given (unbiased estimate). If `bias` is 1, then
normalization is by ``N``. These values can be overridden by using
the keyword ``ddof`` in numpy versions >= 1.5.
ddof : int, optional
.. versionadded:: 1.5
If not ``None`` normalization is by ``(N - ddof)``, where ``N`` is
the number of observations; this overrides the value implied by
``bias``. The default value is ``None``.
Returns
-------
out : ndarray
The covariance matrix of the variables.
See Also
--------
corrcoef : Normalized covariance matrix
Examples
--------
Consider two variables, :math:`x_0` and :math:`x_1`, which
correlate perfectly, but in opposite directions:
>>> x = np.array([[0, 2], [1, 1], [2, 0]]).T
>>> x
array([[0, 1, 2],
[2, 1, 0]])
Note how :math:`x_0` increases while :math:`x_1` decreases. The covariance
matrix shows this clearly:
>>> np.cov(x)
array([[ 1., -1.],
[-1., 1.]])
Note that element :math:`C_{0,1}`, which shows the correlation between
:math:`x_0` and :math:`x_1`, is negative.
Further, note how `x` and `y` are combined:
>>> x = [-2.1, -1, 4.3]
>>> y = [3, 1.1, 0.12]
>>> X = np.vstack((x,y))
>>> print np.cov(X)
[[ 11.71 -4.286 ]
[ -4.286 2.14413333]]
>>> print np.cov(x, y)
[[ 11.71 -4.286 ]
[ -4.286 2.14413333]]
>>> print np.cov(x)
11.71
"""
# Check inputs
if ddof is not None and ddof != int(ddof):
raise ValueError("ddof must be integer")
X = array(m, ndmin=2, dtype=float)
if X.size == 0:
# handle empty arrays
return np.array(m)
if X.shape[0] == 1:
rowvar = 1
if rowvar:
axis = 0
tup = (slice(None),newaxis)
else:
axis = 1
tup = (newaxis, slice(None))
if y is not None:
y = array(y, copy=False, ndmin=2, dtype=float)
X = concatenate((X,y), axis)
X -= X.mean(axis=1-axis)[tup]
if rowvar:
N = X.shape[1]
else:
N = X.shape[0]
if ddof is None:
if bias == 0:
ddof = 1
else:
ddof = 0
fact = float(N - ddof)
if not rowvar:
return (dot(X.T, X.conj()) / fact).squeeze()
else:
return (dot(X, X.T.conj()) / fact).squeeze()
def corrcoef(x, y=None, rowvar=1, bias=0, ddof=None):
"""
Return correlation coefficients.
Please refer to the documentation for `cov` for more detail. The
relationship between the correlation coefficient matrix, `P`, and the
covariance matrix, `C`, is
.. math:: P_{ij} = \\frac{ C_{ij} } { \\sqrt{ C_{ii} * C_{jj} } }
The values of `P` are between -1 and 1, inclusive.
Parameters
----------
x : array_like
A 1-D or 2-D array containing multiple variables and observations.
Each row of `m` represents a variable, and each column a single
observation of all those variables. Also see `rowvar` below.
y : array_like, optional
An additional set of variables and observations. `y` has the same
shape as `m`.
rowvar : int, optional
If `rowvar` is non-zero (default), then each row represents a
variable, with observations in the columns. Otherwise, the relationship
is transposed: each column represents a variable, while the rows
contain observations.
bias : int, optional
Default normalization is by ``(N - 1)``, where ``N`` is the number of
observations (unbiased estimate). If `bias` is 1, then
normalization is by ``N``. These values can be overridden by using
the keyword ``ddof`` in numpy versions >= 1.5.
ddof : {None, int}, optional
.. versionadded:: 1.5
If not ``None`` normalization is by ``(N - ddof)``, where ``N`` is
the number of observations; this overrides the value implied by
``bias``. The default value is ``None``.
Returns
-------
out : ndarray
The correlation coefficient matrix of the variables.
See Also
--------
cov : Covariance matrix
"""
c = cov(x, y, rowvar, bias, ddof)
if c.size == 0:
# handle empty arrays
return c
try:
d = diag(c)
except ValueError: # scalar covariance
return 1
return c/sqrt(multiply.outer(d,d))
def blackman(M):
"""
Return the Blackman window.
The Blackman window is a taper formed by using the the first three
terms of a summation of cosines. It was designed to have close to the
minimal leakage possible. It is close to optimal, only slightly worse
than a Kaiser window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
Returns
-------
out : ndarray
The window, normalized to one (the value one appears only if the
number of samples is odd).
See Also
--------
bartlett, hamming, hanning, kaiser
Notes
-----
The Blackman window is defined as
.. math:: w(n) = 0.42 - 0.5 \\cos(2\\pi n/M) + 0.08 \\cos(4\\pi n/M)
Most references to the Blackman window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function. It is known as a
"near optimal" tapering function, almost as good (by some measures)
as the kaiser window.
References
----------
Blackman, R.B. and Tukey, J.W., (1958) The measurement of power spectra,
Dover Publications, New York.
Oppenheim, A.V., and R.W. Schafer. Discrete-Time Signal Processing.
Upper Saddle River, NJ: Prentice-Hall, 1999, pp. 468-471.
Examples
--------
>>> from numpy import blackman
>>> blackman(12)
array([ -1.38777878e-17, 3.26064346e-02, 1.59903635e-01,
4.14397981e-01, 7.36045180e-01, 9.67046769e-01,
9.67046769e-01, 7.36045180e-01, 4.14397981e-01,
1.59903635e-01, 3.26064346e-02, -1.38777878e-17])
Plot the window and the frequency response:
>>> from numpy import clip, log10, array, blackman, linspace
>>> from numpy.fft import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = blackman(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Blackman window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = abs(fftshift(A))
>>> freq = linspace(-0.5,0.5,len(A))
>>> response = 20*log10(mag)
>>> response = clip(response,-100,100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Blackman window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0,M)
return 0.42-0.5*cos(2.0*pi*n/(M-1)) + 0.08*cos(4.0*pi*n/(M-1))
def bartlett(M):
"""
Return the Bartlett window.
The Bartlett window is very similar to a triangular window, except
that the end points are at zero. It is often used in signal
processing for tapering a signal, without generating too much
ripple in the frequency domain.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
Returns
-------
out : array
The triangular window, normalized to one (the value one
appears only if the number of samples is odd), with the first
and last samples equal to zero.
See Also
--------
blackman, hamming, hanning, kaiser
Notes
-----
The Bartlett window is defined as
.. math:: w(n) = \\frac{2}{M-1} \\left(
\\frac{M-1}{2} - \\left|n - \\frac{M-1}{2}\\right|
\\right)
Most references to the Bartlett window come from the signal
processing literature, where it is used as one of many windowing
functions for smoothing values. Note that convolution with this
window produces linear interpolation. It is also known as an
apodization (which means"removing the foot", i.e. smoothing
discontinuities at the beginning and end of the sampled signal) or
tapering function. The fourier transform of the Bartlett is the product
of two sinc functions.
Note the excellent discussion in Kanasewich.
References
----------
.. [1] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra",
Biometrika 37, 1-16, 1950.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
The University of Alberta Press, 1975, pp. 109-110.
.. [3] A.V. Oppenheim and R.W. Schafer, "Discrete-Time Signal
Processing", Prentice-Hall, 1999, pp. 468-471.
.. [4] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [5] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 429.
Examples
--------
>>> np.bartlett(12)
array([ 0. , 0.18181818, 0.36363636, 0.54545455, 0.72727273,
0.90909091, 0.90909091, 0.72727273, 0.54545455, 0.36363636,
0.18181818, 0. ])
Plot the window and its frequency response (requires SciPy and matplotlib):
>>> from numpy import clip, log10, array, bartlett, linspace
>>> from numpy.fft import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = bartlett(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Bartlett window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = abs(fftshift(A))
>>> freq = linspace(-0.5,0.5,len(A))
>>> response = 20*log10(mag)
>>> response = clip(response,-100,100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Bartlett window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0,M)
return where(less_equal(n,(M-1)/2.0),2.0*n/(M-1),2.0-2.0*n/(M-1))
def hanning(M):
"""
Return the Hanning window.
The Hanning window is a taper formed by using a weighted cosine.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
Returns
-------
out : ndarray, shape(M,)
The window, normalized to one (the value one
appears only if `M` is odd).
See Also
--------
bartlett, blackman, hamming, kaiser
Notes
-----
The Hanning window is defined as
.. math:: w(n) = 0.5 - 0.5cos\\left(\\frac{2\\pi{n}}{M-1}\\right)
\\qquad 0 \\leq n \\leq M-1
The Hanning was named for Julius van Hann, an Austrian meterologist. It is
also known as the Cosine Bell. Some authors prefer that it be called a
Hann window, to help avoid confusion with the very similar Hamming window.
Most references to the Hanning window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
The University of Alberta Press, 1975, pp. 106-108.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
>>> from numpy import hanning
>>> hanning(12)
array([ 0. , 0.07937323, 0.29229249, 0.57115742, 0.82743037,
0.97974649, 0.97974649, 0.82743037, 0.57115742, 0.29229249,
0.07937323, 0. ])
Plot the window and its frequency response:
>>> from numpy.fft import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = np.hanning(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Hann window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = abs(fftshift(A))
>>> freq = np.linspace(-0.5,0.5,len(A))
>>> response = 20*np.log10(mag)
>>> response = np.clip(response,-100,100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of the Hann window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
# XXX: this docstring is inconsistent with other filter windows, e.g.
# Blackman and Bartlett - they should all follow the same convention for
# clarity. Either use np. for all numpy members (as above), or import all
# numpy members (as in Blackman and Bartlett examples)
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0,M)
return 0.5-0.5*cos(2.0*pi*n/(M-1))
def hamming(M):
"""
Return the Hamming window.
The Hamming window is a taper formed by using a weighted cosine.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
Returns
-------
out : ndarray
The window, normalized to one (the value one
appears only if the number of samples is odd).
See Also
--------
bartlett, blackman, hanning, kaiser
Notes
-----
The Hamming window is defined as
.. math:: w(n) = 0.54 + 0.46cos\\left(\\frac{2\\pi{n}}{M-1}\\right)
\\qquad 0 \\leq n \\leq M-1
The Hamming was named for R. W. Hamming, an associate of J. W. Tukey and
is described in Blackman and Tukey. It was recommended for smoothing the
truncated autocovariance function in the time domain.
Most references to the Hamming window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
University of Alberta Press, 1975, pp. 109-110.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
>>> np.hamming(12)
array([ 0.08 , 0.15302337, 0.34890909, 0.60546483, 0.84123594,
0.98136677, 0.98136677, 0.84123594, 0.60546483, 0.34890909,
0.15302337, 0.08 ])
Plot the window and the frequency response:
>>> from numpy.fft import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = np.hamming(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Hamming window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Hamming window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1,float)
n = arange(0,M)
return 0.54-0.46*cos(2.0*pi*n/(M-1))
## Code from cephes for i0
_i0A = [
-4.41534164647933937950E-18,
3.33079451882223809783E-17,
-2.43127984654795469359E-16,
1.71539128555513303061E-15,
-1.16853328779934516808E-14,
7.67618549860493561688E-14,
-4.85644678311192946090E-13,
2.95505266312963983461E-12,
-1.72682629144155570723E-11,
9.67580903537323691224E-11,
-5.18979560163526290666E-10,
2.65982372468238665035E-9,
-1.30002500998624804212E-8,
6.04699502254191894932E-8,
-2.67079385394061173391E-7,
1.11738753912010371815E-6,
-4.41673835845875056359E-6,
1.64484480707288970893E-5,
-5.75419501008210370398E-5,
1.88502885095841655729E-4,
-5.76375574538582365885E-4,
1.63947561694133579842E-3,
-4.32430999505057594430E-3,
1.05464603945949983183E-2,
-2.37374148058994688156E-2,
4.93052842396707084878E-2,
-9.49010970480476444210E-2,
1.71620901522208775349E-1,
-3.04682672343198398683E-1,
6.76795274409476084995E-1]
_i0B = [
-7.23318048787475395456E-18,
-4.83050448594418207126E-18,
4.46562142029675999901E-17,
3.46122286769746109310E-17,
-2.82762398051658348494E-16,
-3.42548561967721913462E-16,
1.77256013305652638360E-15,
3.81168066935262242075E-15,
-9.55484669882830764870E-15,
-4.15056934728722208663E-14,
1.54008621752140982691E-14,
3.85277838274214270114E-13,
7.18012445138366623367E-13,
-1.79417853150680611778E-12,
-1.32158118404477131188E-11,
-3.14991652796324136454E-11,
1.18891471078464383424E-11,
4.94060238822496958910E-10,
3.39623202570838634515E-9,
2.26666899049817806459E-8,
2.04891858946906374183E-7,
2.89137052083475648297E-6,
6.88975834691682398426E-5,
3.36911647825569408990E-3,
8.04490411014108831608E-1]
def _chbevl(x, vals):
b0 = vals[0]
b1 = 0.0
for i in xrange(1,len(vals)):
b2 = b1
b1 = b0
b0 = x*b1 - b2 + vals[i]
return 0.5*(b0 - b2)
def _i0_1(x):
return exp(x) * _chbevl(x/2.0-2, _i0A)
def _i0_2(x):
return exp(x) * _chbevl(32.0/x - 2.0, _i0B) / sqrt(x)
def i0(x):
"""
Modified Bessel function of the first kind, order 0.
Usually denoted :math:`I_0`. This function does broadcast, but will *not*
"up-cast" int dtype arguments unless accompanied by at least one float or
complex dtype argument (see Raises below).
Parameters
----------
x : array_like, dtype float or complex
Argument of the Bessel function.
Returns
-------
out : ndarray, shape = x.shape, dtype = x.dtype
The modified Bessel function evaluated at each of the elements of `x`.
Raises
------
TypeError: array cannot be safely cast to required type
If argument consists exclusively of int dtypes.
See Also
--------
scipy.special.iv, scipy.special.ive
Notes
-----
We use the algorithm published by Clenshaw [1]_ and referenced by
Abramowitz and Stegun [2]_, for which the function domain is partitioned
into the two intervals [0,8] and (8,inf), and Chebyshev polynomial
expansions are employed in each interval. Relative error on the domain
[0,30] using IEEE arithmetic is documented [3]_ as having a peak of 5.8e-16
with an rms of 1.4e-16 (n = 30000).
References
----------
.. [1] C. W. Clenshaw, "Chebyshev series for mathematical functions," in
*National Physical Laboratory Mathematical Tables*, vol. 5, London:
Her Majesty's Stationery Office, 1962.
.. [2] M. Abramowitz and I. A. Stegun, *Handbook of Mathematical
Functions*, 10th printing, New York: Dover, 1964, pp. 379.
http://www.math.sfu.ca/~cbm/aands/page_379.htm
.. [3] http://kobesearch.cpan.org/htdocs/Math-Cephes/Math/Cephes.html
Examples
--------
>>> np.i0([0.])
array(1.0)
>>> np.i0([0., 1. + 2j])
array([ 1.00000000+0.j , 0.18785373+0.64616944j])
"""
x = atleast_1d(x).copy()
y = empty_like(x)
ind = (x<0)
x[ind] = -x[ind]
ind = (x<=8.0)
y[ind] = _i0_1(x[ind])
ind2 = ~ind
y[ind2] = _i0_2(x[ind2])
return y.squeeze()
## End of cephes code for i0
def kaiser(M,beta):
"""
Return the Kaiser window.
The Kaiser window is a taper formed by using a Bessel function.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
beta : float
Shape parameter for window.
Returns
-------
out : array
The window, normalized to one (the value one
appears only if the number of samples is odd).
See Also
--------
bartlett, blackman, hamming, hanning
Notes
-----
The Kaiser window is defined as
.. math:: w(n) = I_0\\left( \\beta \\sqrt{1-\\frac{4n^2}{(M-1)^2}}
\\right)/I_0(\\beta)
with
.. math:: \\quad -\\frac{M-1}{2} \\leq n \\leq \\frac{M-1}{2},
where :math:`I_0` is the modified zeroth-order Bessel function.
The Kaiser was named for Jim Kaiser, who discovered a simple approximation
to the DPSS window based on Bessel functions.
The Kaiser window is a very good approximation to the Digital Prolate
Spheroidal Sequence, or Slepian window, which is the transform which
maximizes the energy in the main lobe of the window relative to total
energy.
The Kaiser can approximate many other windows by varying the beta
parameter.
==== =======================
beta Window shape
==== =======================
0 Rectangular
5 Similar to a Hamming
6 Similar to a Hanning
8.6 Similar to a Blackman
==== =======================
A beta value of 14 is probably a good starting point. Note that as beta
gets large, the window narrows, and so the number of samples needs to be
large enough to sample the increasingly narrow spike, otherwise nans will
get returned.
Most references to the Kaiser window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] J. F. Kaiser, "Digital Filters" - Ch 7 in "Systems analysis by
digital computer", Editors: F.F. Kuo and J.F. Kaiser, p 218-285.
John Wiley and Sons, New York, (1966).
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
University of Alberta Press, 1975, pp. 177-178.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
Examples
--------
>>> from numpy import kaiser
>>> kaiser(12, 14)
array([ 7.72686684e-06, 3.46009194e-03, 4.65200189e-02,
2.29737120e-01, 5.99885316e-01, 9.45674898e-01,
9.45674898e-01, 5.99885316e-01, 2.29737120e-01,
4.65200189e-02, 3.46009194e-03, 7.72686684e-06])
Plot the window and the frequency response:
>>> from numpy import clip, log10, array, kaiser, linspace
>>> from numpy.fft import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = kaiser(51, 14)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Kaiser window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = abs(fftshift(A))
>>> freq = linspace(-0.5,0.5,len(A))
>>> response = 20*log10(mag)
>>> response = clip(response,-100,100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Kaiser window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
from numpy.dual import i0
if M == 1:
return np.array([1.])
n = arange(0,M)
alpha = (M-1)/2.0
return i0(beta * sqrt(1-((n-alpha)/alpha)**2.0))/i0(float(beta))
def sinc(x):
"""
Return the sinc function.
The sinc function is :math:`\\sin(\\pi x)/(\\pi x)`.
Parameters
----------
x : ndarray
Array (possibly multi-dimensional) of values for which to to
calculate ``sinc(x)``.
Returns
-------
out : ndarray
``sinc(x)``, which has the same shape as the input.
Notes
-----
``sinc(0)`` is the limit value 1.
The name sinc is short for "sine cardinal" or "sinus cardinalis".
The sinc function is used in various signal processing applications,
including in anti-aliasing, in the construction of a
Lanczos resampling filter, and in interpolation.
For bandlimited interpolation of discrete-time signals, the ideal
interpolation kernel is proportional to the sinc function.
References
----------
.. [1] Weisstein, Eric W. "Sinc Function." From MathWorld--A Wolfram Web
Resource. http://mathworld.wolfram.com/SincFunction.html
.. [2] Wikipedia, "Sinc function",
http://en.wikipedia.org/wiki/Sinc_function
Examples
--------
>>> x = np.arange(-20., 21.)/5.
>>> np.sinc(x)
array([ -3.89804309e-17, -4.92362781e-02, -8.40918587e-02,
-8.90384387e-02, -5.84680802e-02, 3.89804309e-17,
6.68206631e-02, 1.16434881e-01, 1.26137788e-01,
8.50444803e-02, -3.89804309e-17, -1.03943254e-01,
-1.89206682e-01, -2.16236208e-01, -1.55914881e-01,
3.89804309e-17, 2.33872321e-01, 5.04551152e-01,
7.56826729e-01, 9.35489284e-01, 1.00000000e+00,
9.35489284e-01, 7.56826729e-01, 5.04551152e-01,
2.33872321e-01, 3.89804309e-17, -1.55914881e-01,
-2.16236208e-01, -1.89206682e-01, -1.03943254e-01,
-3.89804309e-17, 8.50444803e-02, 1.26137788e-01,
1.16434881e-01, 6.68206631e-02, 3.89804309e-17,
-5.84680802e-02, -8.90384387e-02, -8.40918587e-02,
-4.92362781e-02, -3.89804309e-17])
>>> import matplotlib.pyplot as plt
>>> plt.plot(x, np.sinc(x))
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Sinc Function")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("X")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
It works in 2-D as well:
>>> x = np.arange(-200., 201.)/50.
>>> xx = np.outer(x, x)
>>> plt.imshow(np.sinc(xx))
<matplotlib.image.AxesImage object at 0x...>
"""
x = np.asanyarray(x)
y = pi* where(x == 0, 1.0e-20, x)
return sin(y)/y
def msort(a):
"""
Return a copy of an array sorted along the first axis.
Parameters
----------
a : array_like
Array to be sorted.
Returns
-------
sorted_array : ndarray
Array of the same type and shape as `a`.
See Also
--------
sort
Notes
-----
``np.msort(a)`` is equivalent to ``np.sort(a, axis=0)``.
"""
b = array(a,subok=True,copy=True)
b.sort(0)
return b
def median(a, axis=None, out=None, overwrite_input=False):
"""
Compute the median along the specified axis.
Returns the median of the array elements.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
axis : int, optional
Axis along which the medians are computed. The default (axis=None)
is to compute the median along a flattened version of the array.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output,
but the type (of the output) will be cast if necessary.
overwrite_input : bool optional
If True, then allow use of memory of input array (a) for
calculations. The input array will be modified by the call to
median. This will save memory when you do not need to preserve
the contents of the input array. Treat the input as undefined,
but it will probably be fully or partially sorted. Default is
False. Note that, if `overwrite_input` is True and the input
is not already an ndarray, an error will be raised.
Returns
-------
median : ndarray
A new array holding the result (unless `out` is specified, in
which case that array is returned instead). If the input contains
integers, or floats of smaller precision than 64, then the output
data-type is float64. Otherwise, the output data-type is the same
as that of the input.
See Also
--------
mean, percentile
Notes
-----
Given a vector V of length N, the median of V is the middle value of
a sorted copy of V, ``V_sorted`` - i.e., ``V_sorted[(N-1)/2]``, when N is
odd. When N is even, it is the average of the two middle values of
``V_sorted``.
Examples
--------
>>> a = np.array([[10, 7, 4], [3, 2, 1]])
>>> a
array([[10, 7, 4],
[ 3, 2, 1]])
>>> np.median(a)
3.5
>>> np.median(a, axis=0)
array([ 6.5, 4.5, 2.5])
>>> np.median(a, axis=1)
array([ 7., 2.])
>>> m = np.median(a, axis=0)
>>> out = np.zeros_like(m)
>>> np.median(a, axis=0, out=m)
array([ 6.5, 4.5, 2.5])
>>> m
array([ 6.5, 4.5, 2.5])
>>> b = a.copy()
>>> np.median(b, axis=1, overwrite_input=True)
array([ 7., 2.])
>>> assert not np.all(a==b)
>>> b = a.copy()
>>> np.median(b, axis=None, overwrite_input=True)
3.5
>>> assert not np.all(a==b)
"""
if overwrite_input:
if axis is None:
sorted = a.ravel()
sorted.sort()
else:
a.sort(axis=axis)
sorted = a
else:
sorted = sort(a, axis=axis)
if sorted.shape == ():
# make 0-D arrays work
return sorted.item()
if axis is None:
axis = 0
indexer = [slice(None)] * sorted.ndim
index = int(sorted.shape[axis]/2)
if sorted.shape[axis] % 2 == 1:
# index with slice to allow mean (below) to work
indexer[axis] = slice(index, index+1)
else:
indexer[axis] = slice(index-1, index+1)
# Use mean in odd and even case to coerce data type
# and check, use out array.
return mean(sorted[indexer], axis=axis, out=out)
def percentile(a, q, axis=None, out=None, overwrite_input=False):
"""
Compute the qth percentile of the data along the specified axis.
Returns the qth percentile of the array elements.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
q : float in range of [0,100] (or sequence of floats)
Percentile to compute which must be between 0 and 100 inclusive.
axis : int, optional
Axis along which the percentiles are computed. The default (None)
is to compute the median along a flattened version of the array.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output,
but the type (of the output) will be cast if necessary.
overwrite_input : bool, optional
If True, then allow use of memory of input array `a` for
calculations. The input array will be modified by the call to
median. This will save memory when you do not need to preserve
the contents of the input array. Treat the input as undefined,
but it will probably be fully or partially sorted.
Default is False. Note that, if `overwrite_input` is True and the
input is not already an array, an error will be raised.
Returns
-------
pcntile : ndarray
A new array holding the result (unless `out` is specified, in
which case that array is returned instead). If the input contains
integers, or floats of smaller precision than 64, then the output
data-type is float64. Otherwise, the output data-type is the same
as that of the input.
See Also
--------
mean, median
Notes
-----
Given a vector V of length N, the qth percentile of V is the qth ranked
value in a sorted copy of V. A weighted average of the two nearest
neighbors is used if the normalized ranking does not match q exactly.
The same as the median if ``q=0.5``, the same as the minimum if ``q=0``
and the same as the maximum if ``q=1``.
Examples
--------
>>> a = np.array([[10, 7, 4], [3, 2, 1]])
>>> a
array([[10, 7, 4],
[ 3, 2, 1]])
>>> np.percentile(a, 50)
3.5
>>> np.percentile(a, 0.5, axis=0)
array([ 6.5, 4.5, 2.5])
>>> np.percentile(a, 50, axis=1)
array([ 7., 2.])
>>> m = np.percentile(a, 50, axis=0)
>>> out = np.zeros_like(m)
>>> np.percentile(a, 50, axis=0, out=m)
array([ 6.5, 4.5, 2.5])
>>> m
array([ 6.5, 4.5, 2.5])
>>> b = a.copy()
>>> np.percentile(b, 50, axis=1, overwrite_input=True)
array([ 7., 2.])
>>> assert not np.all(a==b)
>>> b = a.copy()
>>> np.percentile(b, 50, axis=None, overwrite_input=True)
3.5
"""
a = np.asarray(a)
if q == 0:
return a.min(axis=axis, out=out)
elif q == 100:
return a.max(axis=axis, out=out)
if overwrite_input:
if axis is None:
sorted = a.ravel()
sorted.sort()
else:
a.sort(axis=axis)
sorted = a
else:
sorted = sort(a, axis=axis)
if axis is None:
axis = 0
return _compute_qth_percentile(sorted, q, axis, out)
# handle sequence of q's without calling sort multiple times
def _compute_qth_percentile(sorted, q, axis, out):
if not isscalar(q):
p = [_compute_qth_percentile(sorted, qi, axis, None)
for qi in q]
if out is not None:
out.flat = p
return p
q = q / 100.0
if (q < 0) or (q > 1):
raise ValueError, "percentile must be either in the range [0,100]"
indexer = [slice(None)] * sorted.ndim
Nx = sorted.shape[axis]
index = q*(Nx-1)
i = int(index)
if i == index:
indexer[axis] = slice(i, i+1)
weights = array(1)
sumval = 1.0
else:
indexer[axis] = slice(i, i+2)
j = i + 1
weights = array([(j - index), (index - i)],float)
wshape = [1]*sorted.ndim
wshape[axis] = 2
weights.shape = wshape
sumval = weights.sum()
# Use add.reduce in both cases to coerce data type as well as
# check and use out array.
return add.reduce(sorted[indexer]*weights, axis=axis, out=out)/sumval
def trapz(y, x=None, dx=1.0, axis=-1):
"""
Integrate along the given axis using the composite trapezoidal rule.
Integrate `y` (`x`) along given axis.
Parameters
----------
y : array_like
Input array to integrate.
x : array_like, optional
If `x` is None, then spacing between all `y` elements is `dx`.
dx : scalar, optional
If `x` is None, spacing given by `dx` is assumed. Default is 1.
axis : int, optional
Specify the axis.
Returns
-------
out : float
Definite integral as approximated by trapezoidal rule.
See Also
--------
sum, cumsum
Notes
-----
Image [2]_ illustrates trapezoidal rule -- y-axis locations of points will
be taken from `y` array, by default x-axis distances between points will be
1.0, alternatively they can be provided with `x` array or with `dx` scalar.
Return value will be equal to combined area under the red lines.
References
----------
.. [1] Wikipedia page: http://en.wikipedia.org/wiki/Trapezoidal_rule
.. [2] Illustration image:
http://en.wikipedia.org/wiki/File:Composite_trapezoidal_rule_illustration.png
Examples
--------
>>> np.trapz([1,2,3])
4.0
>>> np.trapz([1,2,3], x=[4,6,8])
8.0
>>> np.trapz([1,2,3], dx=2)
8.0
>>> a = np.arange(6).reshape(2, 3)
>>> a
array([[0, 1, 2],
[3, 4, 5]])
>>> np.trapz(a, axis=0)
array([ 1.5, 2.5, 3.5])
>>> np.trapz(a, axis=1)
array([ 2., 8.])
"""
y = asanyarray(y)
if x is None:
d = dx
else:
x = asanyarray(x)
if x.ndim == 1:
d = diff(x)
# reshape to correct shape
shape = [1]*y.ndim
shape[axis] = d.shape[0]
d = d.reshape(shape)
else:
d = diff(x, axis=axis)
nd = len(y.shape)
slice1 = [slice(None)]*nd
slice2 = [slice(None)]*nd
slice1[axis] = slice(1,None)
slice2[axis] = slice(None,-1)
try:
ret = (d * (y[slice1] +y [slice2]) / 2.0).sum(axis)
except ValueError: # Operations didn't work, cast to ndarray
d = np.asarray(d)
y = np.asarray(y)
ret = add.reduce(d * (y[slice1]+y[slice2])/2.0, axis)
return ret
#always succeed
def add_newdoc(place, obj, doc):
"""Adds documentation to obj which is in module place.
If doc is a string add it to obj as a docstring
If doc is a tuple, then the first element is interpreted as
an attribute of obj and the second as the docstring
(method, docstring)
If doc is a list, then each element of the list should be a
sequence of length two --> [(method1, docstring1),
(method2, docstring2), ...]
This routine never raises an error.
"""
try:
new = {}
exec 'from %s import %s' % (place, obj) in new
if isinstance(doc, str):
add_docstring(new[obj], doc.strip())
elif isinstance(doc, tuple):
add_docstring(getattr(new[obj], doc[0]), doc[1].strip())
elif isinstance(doc, list):
for val in doc:
add_docstring(getattr(new[obj], val[0]), val[1].strip())
except:
pass
# From matplotlib
def meshgrid(x,y):
"""
Return coordinate matrices from two coordinate vectors.
Parameters
----------
x, y : ndarray
Two 1-D arrays representing the x and y coordinates of a grid.
Returns
-------
X, Y : ndarray
For vectors `x`, `y` with lengths ``Nx=len(x)`` and ``Ny=len(y)``,
return `X`, `Y` where `X` and `Y` are ``(Ny, Nx)`` shaped arrays
with the elements of `x` and y repeated to fill the matrix along
the first dimension for `x`, the second for `y`.
See Also
--------
index_tricks.mgrid : Construct a multi-dimensional "meshgrid"
using indexing notation.
index_tricks.ogrid : Construct an open multi-dimensional "meshgrid"
using indexing notation.
Examples
--------
>>> X, Y = np.meshgrid([1,2,3], [4,5,6,7])
>>> X
array([[1, 2, 3],
[1, 2, 3],
[1, 2, 3],
[1, 2, 3]])
>>> Y
array([[4, 4, 4],
[5, 5, 5],
[6, 6, 6],
[7, 7, 7]])
`meshgrid` is very useful to evaluate functions on a grid.
>>> x = np.arange(-5, 5, 0.1)
>>> y = np.arange(-5, 5, 0.1)
>>> xx, yy = np.meshgrid(x, y)
>>> z = np.sin(xx**2+yy**2)/(xx**2+yy**2)
"""
x = asarray(x)
y = asarray(y)
numRows, numCols = len(y), len(x) # yes, reversed
x = x.reshape(1,numCols)
X = x.repeat(numRows, axis=0)
y = y.reshape(numRows,1)
Y = y.repeat(numCols, axis=1)
return X, Y
def delete(arr, obj, axis=None):
"""
Return a new array with sub-arrays along an axis deleted.
Parameters
----------
arr : array_like
Input array.
obj : slice, int or array of ints
Indicate which sub-arrays to remove.
axis : int, optional
The axis along which to delete the subarray defined by `obj`.
If `axis` is None, `obj` is applied to the flattened array.
Returns
-------
out : ndarray
A copy of `arr` with the elements specified by `obj` removed. Note
that `delete` does not occur in-place. If `axis` is None, `out` is
a flattened array.
See Also
--------
insert : Insert elements into an array.
append : Append elements at the end of an array.
Examples
--------
>>> arr = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]])
>>> arr
array([[ 1, 2, 3, 4],
[ 5, 6, 7, 8],
[ 9, 10, 11, 12]])
>>> np.delete(arr, 1, 0)
array([[ 1, 2, 3, 4],
[ 9, 10, 11, 12]])
>>> np.delete(arr, np.s_[::2], 1)
array([[ 2, 4],
[ 6, 8],
[10, 12]])
>>> np.delete(arr, [1,3,5], None)
array([ 1, 3, 5, 7, 8, 9, 10, 11, 12])
"""
wrap = None
if type(arr) is not ndarray:
try:
wrap = arr.__array_wrap__
except AttributeError:
pass
arr = asarray(arr)
ndim = arr.ndim
if axis is None:
if ndim != 1:
arr = arr.ravel()
ndim = arr.ndim;
axis = ndim-1;
if ndim == 0:
if wrap:
return wrap(arr)
else:
return arr.copy()
slobj = [slice(None)]*ndim
N = arr.shape[axis]
newshape = list(arr.shape)
if isinstance(obj, (int, long, integer)):
if (obj < 0): obj += N
if (obj < 0 or obj >=N):
raise ValueError(
"invalid entry")
newshape[axis]-=1;
new = empty(newshape, arr.dtype, arr.flags.fnc)
slobj[axis] = slice(None, obj)
new[slobj] = arr[slobj]
slobj[axis] = slice(obj,None)
slobj2 = [slice(None)]*ndim
slobj2[axis] = slice(obj+1,None)
new[slobj] = arr[slobj2]
elif isinstance(obj, slice):
start, stop, step = obj.indices(N)
numtodel = len(xrange(start, stop, step))
if numtodel <= 0:
if wrap:
return wrap(new)
else:
return arr.copy()
newshape[axis] -= numtodel
new = empty(newshape, arr.dtype, arr.flags.fnc)
# copy initial chunk
if start == 0:
pass
else:
slobj[axis] = slice(None, start)
new[slobj] = arr[slobj]
# copy end chunck
if stop == N:
pass
else:
slobj[axis] = slice(stop-numtodel,None)
slobj2 = [slice(None)]*ndim
slobj2[axis] = slice(stop, None)
new[slobj] = arr[slobj2]
# copy middle pieces
if step == 1:
pass
else: # use array indexing.
obj = arange(start, stop, step, dtype=intp)
all = arange(start, stop, dtype=intp)
obj = setdiff1d(all, obj)
slobj[axis] = slice(start, stop-numtodel)
slobj2 = [slice(None)]*ndim
slobj2[axis] = obj
new[slobj] = arr[slobj2]
else: # default behavior
obj = array(obj, dtype=intp, copy=0, ndmin=1)
all = arange(N, dtype=intp)
obj = setdiff1d(all, obj)
slobj[axis] = obj
new = arr[slobj]
if wrap:
return wrap(new)
else:
return new
def insert(arr, obj, values, axis=None):
"""
Insert values along the given axis before the given indices.
Parameters
----------
arr : array_like
Input array.
obj : int, slice or sequence of ints
Object that defines the index or indices before which `values` is
inserted.
values : array_like
Values to insert into `arr`. If the type of `values` is different
from that of `arr`, `values` is converted to the type of `arr`.
axis : int, optional
Axis along which to insert `values`. If `axis` is None then `arr`
is flattened first.
Returns
-------
out : ndarray
A copy of `arr` with `values` inserted. Note that `insert`
does not occur in-place: a new array is returned. If
`axis` is None, `out` is a flattened array.
See Also
--------
append : Append elements at the end of an array.
delete : Delete elements from an array.
Examples
--------
>>> a = np.array([[1, 1], [2, 2], [3, 3]])
>>> a
array([[1, 1],
[2, 2],
[3, 3]])
>>> np.insert(a, 1, 5)
array([1, 5, 1, 2, 2, 3, 3])
>>> np.insert(a, 1, 5, axis=1)
array([[1, 5, 1],
[2, 5, 2],
[3, 5, 3]])
>>> b = a.flatten()
>>> b
array([1, 1, 2, 2, 3, 3])
>>> np.insert(b, [2, 2], [5, 6])
array([1, 1, 5, 6, 2, 2, 3, 3])
>>> np.insert(b, slice(2, 4), [5, 6])
array([1, 1, 5, 2, 6, 2, 3, 3])
>>> np.insert(b, [2, 2], [7.13, False]) # type casting
array([1, 1, 7, 0, 2, 2, 3, 3])
>>> x = np.arange(8).reshape(2, 4)
>>> idx = (1, 3)
>>> np.insert(x, idx, 999, axis=1)
array([[ 0, 999, 1, 2, 999, 3],
[ 4, 999, 5, 6, 999, 7]])
"""
wrap = None
if type(arr) is not ndarray:
try:
wrap = arr.__array_wrap__
except AttributeError:
pass
arr = asarray(arr)
ndim = arr.ndim
if axis is None:
if ndim != 1:
arr = arr.ravel()
ndim = arr.ndim
axis = ndim-1
if (ndim == 0):
arr = arr.copy()
arr[...] = values
if wrap:
return wrap(arr)
else:
return arr
slobj = [slice(None)]*ndim
N = arr.shape[axis]
newshape = list(arr.shape)
if isinstance(obj, (int, long, integer)):
if (obj < 0): obj += N
if obj < 0 or obj > N:
raise ValueError(
"index (%d) out of range (0<=index<=%d) "\
"in dimension %d" % (obj, N, axis))
newshape[axis] += 1;
new = empty(newshape, arr.dtype, arr.flags.fnc)
slobj[axis] = slice(None, obj)
new[slobj] = arr[slobj]
slobj[axis] = obj
new[slobj] = values
slobj[axis] = slice(obj+1,None)
slobj2 = [slice(None)]*ndim
slobj2[axis] = slice(obj,None)
new[slobj] = arr[slobj2]
if wrap:
return wrap(new)
return new
elif isinstance(obj, slice):
# turn it into a range object
obj = arange(*obj.indices(N),**{'dtype':intp})
# get two sets of indices
# one is the indices which will hold the new stuff
# two is the indices where arr will be copied over
obj = asarray(obj, dtype=intp)
numnew = len(obj)
index1 = obj + arange(numnew)
index2 = setdiff1d(arange(numnew+N),index1)
newshape[axis] += numnew
new = empty(newshape, arr.dtype, arr.flags.fnc)
slobj2 = [slice(None)]*ndim
slobj[axis] = index1
slobj2[axis] = index2
new[slobj] = values
new[slobj2] = arr
if wrap:
return wrap(new)
return new
def append(arr, values, axis=None):
"""
Append values to the end of an array.
Parameters
----------
arr : array_like
Values are appended to a copy of this array.
values : array_like
These values are appended to a copy of `arr`. It must be of the
correct shape (the same shape as `arr`, excluding `axis`). If `axis`
is not specified, `values` can be any shape and will be flattened
before use.
axis : int, optional
The axis along which `values` are appended. If `axis` is not given,
both `arr` and `values` are flattened before use.
Returns
-------
out : ndarray
A copy of `arr` with `values` appended to `axis`. Note that `append`
does not occur in-place: a new array is allocated and filled. If
`axis` is None, `out` is a flattened array.
See Also
--------
insert : Insert elements into an array.
delete : Delete elements from an array.
Examples
--------
>>> np.append([1, 2, 3], [[4, 5, 6], [7, 8, 9]])
array([1, 2, 3, 4, 5, 6, 7, 8, 9])
When `axis` is specified, `values` must have the correct shape.
>>> np.append([[1, 2, 3], [4, 5, 6]], [[7, 8, 9]], axis=0)
array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
>>> np.append([[1, 2, 3], [4, 5, 6]], [7, 8, 9], axis=0)
Traceback (most recent call last):
...
ValueError: arrays must have same number of dimensions
"""
arr = asanyarray(arr)
if axis is None:
if arr.ndim != 1:
arr = arr.ravel()
values = ravel(values)
axis = arr.ndim-1
return concatenate((arr, values), axis=axis)
| agpl-3.0 |
spallavolu/scikit-learn | examples/model_selection/plot_precision_recall.py | 249 | 6150 | """
================
Precision-Recall
================
Example of Precision-Recall metric to evaluate classifier output quality.
In information retrieval, precision is a measure of result relevancy, while
recall is a measure of how many truly relevant results are returned. A high
area under the curve represents both high recall and high precision, where high
precision relates to a low false positive rate, and high recall relates to a
low false negative rate. High scores for both show that the classifier is
returning accurate results (high precision), as well as returning a majority of
all positive results (high recall).
A system with high recall but low precision returns many results, but most of
its predicted labels are incorrect when compared to the training labels. A
system with high precision but low recall is just the opposite, returning very
few results, but most of its predicted labels are correct when compared to the
training labels. An ideal system with high precision and high recall will
return many results, with all results labeled correctly.
Precision (:math:`P`) is defined as the number of true positives (:math:`T_p`)
over the number of true positives plus the number of false positives
(:math:`F_p`).
:math:`P = \\frac{T_p}{T_p+F_p}`
Recall (:math:`R`) is defined as the number of true positives (:math:`T_p`)
over the number of true positives plus the number of false negatives
(:math:`F_n`).
:math:`R = \\frac{T_p}{T_p + F_n}`
These quantities are also related to the (:math:`F_1`) score, which is defined
as the harmonic mean of precision and recall.
:math:`F1 = 2\\frac{P \\times R}{P+R}`
It is important to note that the precision may not decrease with recall. The
definition of precision (:math:`\\frac{T_p}{T_p + F_p}`) shows that lowering
the threshold of a classifier may increase the denominator, by increasing the
number of results returned. If the threshold was previously set too high, the
new results may all be true positives, which will increase precision. If the
previous threshold was about right or too low, further lowering the threshold
will introduce false positives, decreasing precision.
Recall is defined as :math:`\\frac{T_p}{T_p+F_n}`, where :math:`T_p+F_n` does
not depend on the classifier threshold. This means that lowering the classifier
threshold may increase recall, by increasing the number of true positive
results. It is also possible that lowering the threshold may leave recall
unchanged, while the precision fluctuates.
The relationship between recall and precision can be observed in the
stairstep area of the plot - at the edges of these steps a small change
in the threshold considerably reduces precision, with only a minor gain in
recall. See the corner at recall = .59, precision = .8 for an example of this
phenomenon.
Precision-recall curves are typically used in binary classification to study
the output of a classifier. In order to extend Precision-recall curve and
average precision to multi-class or multi-label classification, it is necessary
to binarize the output. One curve can be drawn per label, but one can also draw
a precision-recall curve by considering each element of the label indicator
matrix as a binary prediction (micro-averaging).
.. note::
See also :func:`sklearn.metrics.average_precision_score`,
:func:`sklearn.metrics.recall_score`,
:func:`sklearn.metrics.precision_score`,
:func:`sklearn.metrics.f1_score`
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn import svm, datasets
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import average_precision_score
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import label_binarize
from sklearn.multiclass import OneVsRestClassifier
# import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
# Binarize the output
y = label_binarize(y, classes=[0, 1, 2])
n_classes = y.shape[1]
# Add noisy features
random_state = np.random.RandomState(0)
n_samples, n_features = X.shape
X = np.c_[X, random_state.randn(n_samples, 200 * n_features)]
# Split into training and test
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=random_state)
# Run classifier
classifier = OneVsRestClassifier(svm.SVC(kernel='linear', probability=True,
random_state=random_state))
y_score = classifier.fit(X_train, y_train).decision_function(X_test)
# Compute Precision-Recall and plot curve
precision = dict()
recall = dict()
average_precision = dict()
for i in range(n_classes):
precision[i], recall[i], _ = precision_recall_curve(y_test[:, i],
y_score[:, i])
average_precision[i] = average_precision_score(y_test[:, i], y_score[:, i])
# Compute micro-average ROC curve and ROC area
precision["micro"], recall["micro"], _ = precision_recall_curve(y_test.ravel(),
y_score.ravel())
average_precision["micro"] = average_precision_score(y_test, y_score,
average="micro")
# Plot Precision-Recall curve
plt.clf()
plt.plot(recall[0], precision[0], label='Precision-Recall curve')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.ylim([0.0, 1.05])
plt.xlim([0.0, 1.0])
plt.title('Precision-Recall example: AUC={0:0.2f}'.format(average_precision[0]))
plt.legend(loc="lower left")
plt.show()
# Plot Precision-Recall curve for each class
plt.clf()
plt.plot(recall["micro"], precision["micro"],
label='micro-average Precision-recall curve (area = {0:0.2f})'
''.format(average_precision["micro"]))
for i in range(n_classes):
plt.plot(recall[i], precision[i],
label='Precision-recall curve of class {0} (area = {1:0.2f})'
''.format(i, average_precision[i]))
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.title('Extension of Precision-Recall curve to multi-class')
plt.legend(loc="lower right")
plt.show()
| bsd-3-clause |
polyaxon/polyaxon | core/polyaxon/polyboard/events/schemas.py | 1 | 16747 | #!/usr/bin/python
#
# Copyright 2018-2021 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from collections import namedtuple
from typing import Dict, Mapping, Optional, Union
import polyaxon_sdk
from marshmallow import ValidationError, fields, pre_load, validate, validates_schema
from polyaxon.parser import parser
from polyaxon.polyboard.artifacts.kinds import V1ArtifactKind
from polyaxon.polyboard.utils import validate_csv
from polyaxon.schemas.base import BaseConfig, BaseSchema
from polyaxon.utils.date_utils import parse_datetime
from polyaxon.utils.np_utils import sanitize_np_types
from polyaxon.utils.signal_decorators import check_partial
from polyaxon.utils.tz_utils import now
class EventImageSchema(BaseSchema):
height = fields.Int(allow_none=True)
width = fields.Int(allow_none=True)
colorspace = fields.Int(allow_none=True)
path = fields.Str(allow_none=True)
@staticmethod
def schema_config():
return V1EventImage
class V1EventImage(BaseConfig, polyaxon_sdk.V1EventImage):
IDENTIFIER = "image"
SCHEMA = EventImageSchema
REDUCED_ATTRIBUTES = ["height", "width", "colorspace", "path"]
class EventVideoSchema(BaseSchema):
height = fields.Int(allow_none=True)
width = fields.Int(allow_none=True)
colorspace = fields.Int(allow_none=True)
path = fields.Str(allow_none=True)
content_type = fields.Str(allow_none=True)
@staticmethod
def schema_config():
return V1EventVideo
class V1EventVideo(BaseConfig, polyaxon_sdk.V1EventVideo):
IDENTIFIER = "video"
SCHEMA = EventImageSchema
REDUCED_ATTRIBUTES = ["height", "width", "colorspace", "path", "content_type"]
class EventDataframeSchema(BaseSchema):
path = fields.Str(allow_none=True)
content_type = fields.Str(allow_none=True)
@staticmethod
def schema_config():
return V1EventDataframe
class V1EventDataframe(BaseConfig, polyaxon_sdk.V1EventDataframe):
IDENTIFIER = "dataframe"
SCHEMA = EventDataframeSchema
REDUCED_ATTRIBUTES = ["path", "content_type"]
class EventHistogramSchema(BaseSchema):
values = fields.List(fields.Float(), allow_none=True)
counts = fields.List(fields.Float(), allow_none=True)
@staticmethod
def schema_config():
return V1EventHistogram
class V1EventHistogram(BaseConfig, polyaxon_sdk.V1EventHistogram):
IDENTIFIER = "histogram"
SCHEMA = EventHistogramSchema
REDUCED_ATTRIBUTES = ["values", "counts"]
class EventAudioSchema(BaseSchema):
sample_rate = fields.Float(allow_none=True)
num_channels = fields.Int(allow_none=True)
length_frames = fields.Int(allow_none=True)
path = fields.Str(allow_none=True)
content_type = fields.Str(allow_none=True)
@staticmethod
def schema_config():
return V1EventAudio
class V1EventAudio(BaseConfig, polyaxon_sdk.V1EventAudio):
IDENTIFIER = "audio"
SCHEMA = EventAudioSchema
REDUCED_ATTRIBUTES = [
"sample_rate",
"num_channels",
"length_frames",
"path",
"content_type",
]
class V1EventChartKind(polyaxon_sdk.V1EventChartKind):
pass
class EventChartSchema(BaseSchema):
kind = fields.Str(
allow_none=True, validate=validate.OneOf(V1EventChartKind.allowable_values)
)
figure = fields.Dict(allow_none=True)
@staticmethod
def schema_config():
return V1EventChart
class V1EventChart(BaseConfig, polyaxon_sdk.V1EventChart):
IDENTIFIER = "chart"
SCHEMA = EventChartSchema
REDUCED_ATTRIBUTES = ["kind", "figure"]
def to_dict(self, humanize_values=False, unknown=None, dump=False):
if self.kind == V1EventChartKind.PLOTLY:
import plotly.tools
obj = self.obj_to_dict(
self, humanize_values=humanize_values, unknown=unknown
)
return json.dumps(obj, cls=plotly.utils.PlotlyJSONEncoder)
# Resume normal serialization
return super().to_dict(humanize_values, unknown, dump)
class V1EventCurveKind(polyaxon_sdk.V1EventCurveKind):
pass
class EventCurveSchema(BaseSchema):
kind = fields.Str(
allow_none=True, validate=validate.OneOf(V1EventCurveKind.allowable_values)
)
x = fields.List(fields.Float(), allow_none=True)
y = fields.List(fields.Float(), allow_none=True)
annotation = fields.Str(allow_none=True)
@staticmethod
def schema_config():
return V1EventCurve
class V1EventCurve(BaseConfig, polyaxon_sdk.V1EventCurve):
IDENTIFIER = "curve"
SCHEMA = EventCurveSchema
REDUCED_ATTRIBUTES = ["kind", "x", "y", "annotation"]
class EventArtifactSchema(BaseSchema):
kind = fields.Str(
allow_none=True, validate=validate.OneOf(V1ArtifactKind.allowable_values)
)
path = fields.Str(allow_none=True)
@staticmethod
def schema_config():
return V1EventArtifact
class V1EventArtifact(BaseConfig, polyaxon_sdk.V1EventArtifact):
IDENTIFIER = "artifact"
SCHEMA = EventArtifactSchema
REDUCED_ATTRIBUTES = ["kind", "path"]
class EventModelSchema(BaseSchema):
framework = fields.Str(allow_none=True)
path = fields.Str(allow_none=True)
spec = fields.Raw(allow_none=True)
@staticmethod
def schema_config():
return V1EventModel
class V1EventModel(BaseConfig, polyaxon_sdk.V1EventModel):
IDENTIFIER = "artifact"
SCHEMA = EventModelSchema
REDUCED_ATTRIBUTES = ["framework", "path", "spec"]
class EventSchema(BaseSchema):
timestamp = fields.DateTime(allow_none=True)
step = fields.Int(allow_none=True)
metric = fields.Float(allow_none=True)
image = fields.Nested(EventImageSchema, allow_none=True)
histogram = fields.Nested(EventHistogramSchema, allow_none=True)
audio = fields.Nested(EventAudioSchema, allow_none=True)
video = fields.Nested(EventVideoSchema, allow_none=True)
html = fields.Str(allow_none=True)
text = fields.Str(allow_none=True)
chart = fields.Nested(EventChartSchema, allow_none=True)
curve = fields.Nested(EventCurveSchema, allow_none=True)
artifact = fields.Nested(EventArtifactSchema, allow_none=True)
model = fields.Nested(EventModelSchema, allow_none=True)
dataframe = fields.Nested(EventDataframeSchema, allow_none=True)
@staticmethod
def schema_config():
return V1Event
@pre_load
def pre_validate(self, data, **kwargs):
if data.get("image") is not None:
data["image"] = parser.get_dict(
key="image",
value=data["image"],
)
if data.get("histogram") is not None:
data["histogram"] = parser.get_dict(
key="histogram",
value=data["histogram"],
)
if data.get("audio") is not None:
data["audio"] = parser.get_dict(
key="audio",
value=data["audio"],
)
if data.get("video") is not None:
data["video"] = parser.get_dict(
key="video",
value=data["video"],
)
if data.get("chart") is not None:
data["chart"] = parser.get_dict(
key="chart",
value=data["chart"],
)
if data.get("curve") is not None:
data["curve"] = parser.get_dict(
key="curve",
value=data["curve"],
)
if data.get("artifact") is not None:
data["artifact"] = parser.get_dict(
key="artifact",
value=data["artifact"],
)
if data.get("model") is not None:
data["model"] = parser.get_dict(
key="model",
value=data["model"],
)
if data.get("dataframe") is not None:
data["dataframe"] = parser.get_dict(
key="dataframe",
value=data["dataframe"],
)
return data
@validates_schema
@check_partial
def validate_event(self, values, **kwargs):
count = 0
def increment(c):
c += 1
if c > 1:
raise ValidationError(
"An event should have one and only one primitive, found {}.".format(
c
)
)
return c
if values.get("metric") is not None:
count = increment(count)
if values.get("image") is not None:
count = increment(count)
if values.get("histogram") is not None:
count = increment(count)
if values.get("audio") is not None:
count = increment(count)
if values.get("video") is not None:
count = increment(count)
if values.get("html") is not None:
count = increment(count)
if values.get("text") is not None:
count = increment(count)
if values.get("chart") is not None:
count = increment(count)
if values.get("curve") is not None:
count = increment(count)
if values.get("artifact") is not None:
count = increment(count)
if values.get("model") is not None:
count = increment(count)
if values.get("dataframe") is not None:
count = increment(count)
if count != 1:
raise ValidationError(
"An event should have one and only one primitive, found {}.".format(
count
)
)
class V1Event(BaseConfig, polyaxon_sdk.V1Event):
SEPARATOR = "|"
IDENTIFIER = "event"
SCHEMA = EventSchema
REDUCED_ATTRIBUTES = [
"metric",
"image",
"histogram",
"audio",
"video",
"html",
"text",
"chart",
"curve",
"artifact",
"model",
"dataframe",
]
@classmethod
def make(
cls,
step: int = None,
timestamp=None,
metric: float = None,
image: V1EventImage = None,
histogram: V1EventHistogram = None,
audio: V1EventAudio = None,
video: V1EventVideo = None,
html: str = None,
text: str = None,
chart: V1EventChart = None,
curve: V1EventCurve = None,
artifact: V1EventArtifact = None,
model: V1EventModel = None,
dataframe: V1EventDataframe = None,
) -> "V1Event":
if isinstance(timestamp, str):
try:
timestamp = parse_datetime(timestamp)
except Exception as e:
raise ValidationError("Received an invalid timestamp") from e
return cls(
timestamp=timestamp if timestamp else now(tzinfo=True),
step=step,
metric=metric,
image=image,
histogram=histogram,
audio=audio,
video=video,
html=html,
text=text,
chart=chart,
curve=curve,
artifact=artifact,
model=model,
dataframe=dataframe,
)
def get_value(self, dump=True):
if self.metric is not None:
return str(self.metric) if dump else self.metric
if self.image is not None:
return self.image.to_dict(dump=dump) if dump else self.image
if self.histogram is not None:
return self.histogram.to_dict(dump=dump) if dump else self.histogram
if self.audio is not None:
return self.audio.to_dict(dump=dump) if dump else self.audio
if self.video is not None:
return self.video.to_dict(dump=dump) if dump else self.video
if self.html is not None:
return self.html
if self.text is not None:
return self.text
if self.chart is not None:
return self.chart.to_dict(dump=dump) if dump else self.chart
if self.curve is not None:
return self.curve.to_dict(dump=dump) if dump else self.curve
if self.artifact is not None:
return self.artifact.to_dict(dump=dump) if dump else self.artifact
if self.model is not None:
return self.model.to_dict(dump=dump) if dump else self.model
if self.dataframe is not None:
return self.dataframe.to_dict(dump=dump) if dump else self.dataframe
def to_csv(self) -> str:
values = [
str(self.step) if self.step is not None else "",
str(self.timestamp) if self.timestamp is not None else "",
self.get_value(dump=True),
]
return self.SEPARATOR.join(values)
class V1Events:
ORIENT_CSV = "csv"
ORIENT_DICT = "dict"
def __init__(self, kind, name, df):
self.kind = kind
self.name = name
self.df = df
@classmethod
def read(
cls, kind: str, name: str, data: Union[str, Dict], parse_dates: bool = True
) -> "V1Events":
import pandas as pd
if isinstance(data, str):
csv = validate_csv(data)
if parse_dates:
df = pd.read_csv(
csv,
sep=V1Event.SEPARATOR,
parse_dates=["timestamp"],
)
else:
df = pd.read_csv(
csv,
sep=V1Event.SEPARATOR,
)
elif isinstance(data, dict):
df = pd.DataFrame.from_dict(data)
else:
raise ValueError(
"V1Events received an unsupported value type: {}".format(type(data))
)
return cls(name=name, kind=kind, df=df)
def to_dict(self, orient: str = "list") -> Dict:
import numpy as np
return self.df.replace({np.nan: None}).to_dict(orient=orient)
def get_event_at(self, index):
event = self.df.iloc[index].to_dict()
event["timestamp"] = event["timestamp"].isoformat()
event["step"] = sanitize_np_types(event["step"])
return V1Event.from_dict(event)
def _get_step_summary(self) -> Optional[Dict]:
_count = self.df.step.count()
if _count == 0:
return None
return {
"count": sanitize_np_types(_count),
"min": sanitize_np_types(self.df.step.iloc[0]),
"max": sanitize_np_types(self.df.step.iloc[-1]),
}
def _get_ts_summary(self) -> Optional[Dict]:
_count = self.df.timestamp.count()
if _count == 0:
return None
return {
"min": self.df.timestamp.iloc[0].isoformat(),
"max": self.df.timestamp.iloc[-1].isoformat(),
}
def get_summary(self) -> Dict:
summary = {"is_event": True}
step_summary = self._get_step_summary()
if step_summary:
summary["step"] = step_summary
ts_summary = self._get_ts_summary()
if ts_summary:
summary["timestamp"] = ts_summary
if self.kind == V1ArtifactKind.METRIC:
summary[self.kind] = {
k: sanitize_np_types(v)
for k, v in self.df.metric.describe().to_dict().items()
}
summary[self.kind]["last"] = sanitize_np_types(self.df.metric.iloc[-1])
return summary
class LoggedEventSpec(namedtuple("LoggedEventSpec", "name kind event")):
pass
class LoggedEventListSpec(namedtuple("LoggedEventListSpec", "name kind events")):
def get_csv_header(self) -> str:
return V1Event.SEPARATOR.join(["step", "timestamp", self.kind])
def get_csv_events(self) -> str:
events = ["\n{}".format(e.to_csv()) for e in self.events]
return "".join(events)
def empty_events(self):
self.events[:] = []
def to_dict(self):
return {
"name": self.name,
"kind": self.kind,
"events": [e.to_dict() for e in self.events],
}
@classmethod
def from_dict(cls, value: Mapping) -> "LoggedEventListSpec":
return cls(
name=value.get("name"),
kind=value.get("kind"),
events=[V1Event.from_dict(e) for e in value.get("events", [])],
)
| apache-2.0 |
fraricci/pymatgen | pymatgen/analysis/defects/thermodynamics.py | 2 | 30439 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import logging
import numpy as np
from monty.json import MSONable
from scipy.spatial import HalfspaceIntersection
from scipy.optimize import bisect
from itertools import chain
from pymatgen.electronic_structure.dos import FermiDos
from pymatgen.analysis.defects.core import DefectEntry
from pymatgen.analysis.structure_matcher import PointDefectComparator
import matplotlib.pyplot as plt
import matplotlib.cm as cm
__author__ = "Danny Broberg, Shyam Dwaraknath"
__copyright__ = "Copyright 2018, The Materials Project"
__version__ = "1.0"
__maintainer__ = "Shyam Dwaraknath"
__email__ = "shyamd@lbl.gov"
__status__ = "Development"
__date__ = "Mar 15, 2018"
logger = logging.getLogger(__name__)
class DefectPhaseDiagram(MSONable):
"""
This is similar to a PhaseDiagram object in pymatgen,
but has ability to do quick analysis of defect formation energies
when fed DefectEntry objects.
uses many of the capabilities from PyCDT's DefectsAnalyzer class...
This class is able to get:
a) stability of charge states for a given defect,
b) list of all formation ens
c) transition levels in the gap
Args:
dentries ([DefectEntry]): A list of DefectEntry objects
vbm (float): Valence Band energy to use for all defect entries.
NOTE if using band shifting-type correction then this VBM
should still be that of the GGA calculation
(the bandedgeshifting_correction accounts for shift's
contribution to formation energy).
band_gap (float): Band gap to use for all defect entries.
NOTE if using band shifting-type correction then this gap
should still be that of the Hybrid calculation you are shifting to.
filter_compatible (bool): Whether to consider entries which were ruled
incompatible by the DefectComaptibility class. Note this must be set to False
if you desire a suggestion for larger supercell sizes.
Default is True (to omit calculations which have "is_compatible"=False in
DefectEntry'sparameters)
metadata (dict): Dictionary of metadata to store with the PhaseDiagram. Has
no impact on calculations.
"""
def __init__(self, entries, vbm, band_gap, filter_compatible=True, metadata={}):
self.vbm = vbm
self.band_gap = band_gap
self.filter_compatible = filter_compatible
if filter_compatible:
self.entries = [e for e in entries if e.parameters.get("is_compatible", True)]
else:
self.entries = entries
for ent_ind, ent in enumerate(self.entries):
if 'vbm' not in ent.parameters.keys() or ent.parameters['vbm'] != vbm:
logger.info("Entry {} did not have vbm equal to given DefectPhaseDiagram value."
" Manually overriding.".format(ent.name))
new_ent = ent.copy()
new_ent.parameters['vbm'] = vbm
self.entries[ent_ind] = new_ent
self.metadata = metadata
self.find_stable_charges()
def as_dict(self):
"""
Json-serializable dict representation of DefectPhaseDiagram
"""
d = {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"entries": [entry.as_dict() for entry in self.entries],
"vbm": self.vbm,
"band_gap": self.band_gap,
"filter_compatible": self.filter_compatible,
"metadata": self.metadata}
return d
@classmethod
def from_dict(cls, d):
"""
Reconstitute a DefectPhaseDiagram object from a dict representation created using
as_dict().
Args:
d (dict): dict representation of DefectPhaseDiagram.
Returns:
DefectPhaseDiagram object
"""
entries = [DefectEntry.from_dict(entry_dict) for entry_dict in d.get("entries")]
vbm = d["vbm"]
band_gap = d["band_gap"]
filter_compatible = d.get("filter_compatible", True)
metadata = d.get("metadata", {})
if 'entry_id' in d.keys() and 'entry_id' not in metadata:
metadata['entry_id'] = d['entry_id']
return cls(entries, vbm, band_gap, filter_compatible=filter_compatible,
metadata=metadata)
def find_stable_charges(self):
"""
Sets the stable charges and transition states for a series of
defect entries. This function uses scipy's HalfspaceInterection
to oncstruct the polygons corresponding to defect stability as
a function of the Fermi-level. The Halfspace Intersection
constructs N-dimensional hyperplanes, in this case N=2, based
on the equation of defect formation energy with considering chemical
potentials:
E_form = E_0^{Corrected} + Q_{defect}*(E_{VBM} + E_{Fermi})
Extra hyperplanes are constructed to bound this space so that
the algorithm can actually find enclosed region.
This code was modeled after the Halfspace Intersection code for
the Pourbaix Diagram
"""
def similar_defects(entryset):
"""
Used for grouping similar defects of different charges
Can distinguish identical defects even if they are not in same position
"""
pdc = PointDefectComparator(check_charge=False, check_primitive_cell=True,
check_lattice_scale=False)
grp_def_sets = []
grp_def_indices = []
for ent_ind, ent in enumerate(entryset):
# TODO: more pythonic way of grouping entry sets with PointDefectComparator.
# this is currently most time intensive part of DefectPhaseDiagram
matched_ind = None
for grp_ind, defgrp in enumerate(grp_def_sets):
if pdc.are_equal(ent.defect, defgrp[0].defect):
matched_ind = grp_ind
break
if matched_ind is not None:
grp_def_sets[matched_ind].append(ent.copy())
grp_def_indices[matched_ind].append(ent_ind)
else:
grp_def_sets.append([ent.copy()])
grp_def_indices.append([ent_ind])
return zip(grp_def_sets, grp_def_indices)
# Limits for search
# E_fermi = { -1 eV to band gap+1}
# E_formation = { (min(Eform) - 30) to (max(Eform) + 30)}
all_eform = [one_def.formation_energy(fermi_level=self.band_gap / 2.) for one_def in self.entries]
min_y_lim = min(all_eform) - 30
max_y_lim = max(all_eform) + 30
limits = [[-1, self.band_gap + 1], [min_y_lim, max_y_lim]]
stable_entries = {}
finished_charges = {}
transition_level_map = {}
# Grouping by defect types
for defects, index_list in similar_defects(self.entries):
defects = list(defects)
# prepping coefficient matrix for half-space intersection
# [-Q, 1, -1*(E_form+Q*VBM)] -> -Q*E_fermi+E+-1*(E_form+Q*VBM) <= 0 where E_fermi and E are the variables
# in the hyperplanes
hyperplanes = np.array(
[[-1.0 * entry.charge, 1, -1.0 * (entry.energy + entry.charge * self.vbm)] for entry in defects])
border_hyperplanes = [[-1, 0, limits[0][0]], [1, 0, -1 * limits[0][1]], [0, -1, limits[1][0]],
[0, 1, -1 * limits[1][1]]]
hs_hyperplanes = np.vstack([hyperplanes, border_hyperplanes])
interior_point = [self.band_gap / 2, min(all_eform) - 1.]
hs_ints = HalfspaceIntersection(hs_hyperplanes, np.array(interior_point))
# Group the intersections and coresponding facets
ints_and_facets = zip(hs_ints.intersections, hs_ints.dual_facets)
# Only inlcude the facets corresponding to entries, not the boundaries
total_entries = len(defects)
ints_and_facets = filter(lambda int_and_facet: all(np.array(int_and_facet[1]) < total_entries),
ints_and_facets)
# sort based on transition level
ints_and_facets = list(sorted(ints_and_facets, key=lambda int_and_facet: int_and_facet[0][0]))
# log a defect name for tracking (using full index list to avoid naming
# in-equivalent defects with same name)
str_index_list = [str(ind) for ind in sorted(index_list)]
track_name = defects[0].name + "@" + str("-".join(str_index_list))
if len(ints_and_facets):
# Unpack into lists
_, facets = zip(*ints_and_facets)
# Map of transition level: charge states
transition_level_map[track_name] = {
intersection[0]: [defects[i].charge for i in facet]
for intersection, facet in ints_and_facets
}
stable_entries[track_name] = list(set([defects[i] for dual in facets for i in dual]))
finished_charges[track_name] = [defect.charge for defect in defects]
else:
# if ints_and_facets is empty, then there is likely only one defect...
if len(defects) != 1:
# confirm formation energies dominant for one defect over other identical defects
name_set = [one_def.name + '_chg' + str(one_def.charge) for one_def in defects]
vb_list = [one_def.formation_energy(fermi_level=limits[0][0]) for one_def in defects]
cb_list = [one_def.formation_energy(fermi_level=limits[0][1]) for one_def in defects]
vbm_def_index = vb_list.index(min(vb_list))
name_stable_below_vbm = name_set[vbm_def_index]
cbm_def_index = cb_list.index(min(cb_list))
name_stable_above_cbm = name_set[cbm_def_index]
if name_stable_below_vbm != name_stable_above_cbm:
raise ValueError("HalfSpace identified only one stable charge out of list: {}\n"
"But {} is stable below vbm and {} is "
"stable above cbm.\nList of VBM formation energies: {}\n"
"List of CBM formation energies: {}"
"".format(name_set, name_stable_below_vbm, name_stable_above_cbm,
vb_list, cb_list))
else:
logger.info("{} is only stable defect out of {}".format(name_stable_below_vbm, name_set))
transition_level_map[track_name] = {}
stable_entries[track_name] = list([defects[vbm_def_index]])
finished_charges[track_name] = [one_def.charge for one_def in defects]
else:
transition_level_map[track_name] = {}
stable_entries[track_name] = list([defects[0]])
finished_charges[track_name] = [defects[0].charge]
self.transition_level_map = transition_level_map
self.transition_levels = {
defect_name: list(defect_tls.keys())
for defect_name, defect_tls in transition_level_map.items()
}
self.stable_entries = stable_entries
self.finished_charges = finished_charges
self.stable_charges = {
defect_name: [entry.charge for entry in entries]
for defect_name, entries in stable_entries.items()
}
@property
def defect_types(self):
"""
List types of defects existing in the DefectPhaseDiagram
"""
return list(self.finished_charges.keys())
@property
def all_stable_entries(self):
"""
List all stable entries (defect+charge) in the DefectPhaseDiagram
"""
return set(chain.from_iterable(self.stable_entries.values()))
@property
def all_unstable_entries(self):
"""
List all unstable entries (defect+charge) in the DefectPhaseDiagram
"""
all_stable_entries = self.all_stable_entries
return [e for e in self.entries if e not in all_stable_entries]
def defect_concentrations(self, chemical_potentials, temperature=300, fermi_level=0.):
"""
Give list of all concentrations at specified efermi in the DefectPhaseDiagram
args:
chemical_potentials = {Element: number} is dict of chemical potentials to provide formation energies for
temperature = temperature to produce concentrations from
fermi_level: (float) is fermi level relative to valence band maximum
Default efermi = 0 = VBM energy
returns:
list of dictionaries of defect concentrations
"""
concentrations = []
for dfct in self.all_stable_entries:
concentrations.append({
'conc':
dfct.defect_concentration(
chemical_potentials=chemical_potentials, temperature=temperature, fermi_level=fermi_level),
'name':
dfct.name,
'charge':
dfct.charge
})
return concentrations
def suggest_charges(self, tolerance=0.1):
"""
Suggest possible charges for defects to compute based on proximity
of known transitions from entires to VBM and CBM
Args:
tolerance (float): tolerance with respect to the VBM and CBM to
` continue to compute new charges
"""
recommendations = {}
for def_type in self.defect_types:
test_charges = np.arange(
np.min(self.stable_charges[def_type]) - 1,
np.max(self.stable_charges[def_type]) + 2)
test_charges = [charge for charge in test_charges if charge not in self.finished_charges[def_type]]
if len(self.transition_level_map[def_type].keys()):
# More positive charges will shift the minimum transition level down
# Max charge is limited by this if its transition level is close to VBM
min_tl = min(self.transition_level_map[def_type].keys())
if min_tl < tolerance:
max_charge = max(self.transition_level_map[def_type][min_tl])
test_charges = [charge for charge in test_charges if charge < max_charge]
# More negative charges will shift the maximum transition level up
# Minimum charge is limited by this if transition level is near CBM
max_tl = max(self.transition_level_map[def_type].keys())
if max_tl > (self.band_gap - tolerance):
min_charge = min(self.transition_level_map[def_type][max_tl])
test_charges = [charge for charge in test_charges if charge > min_charge]
else:
test_charges = [charge for charge in test_charges if charge not in self.stable_charges[def_type]]
recommendations[def_type] = test_charges
return recommendations
def suggest_larger_supercells(self, tolerance=0.1):
"""
Suggest larger supercells for different defect+chg combinations based on use of
compatibility analysis. Does this for any charged defects which have is_compatible = False,
and the defect+chg formation energy is stable at fermi levels within the band gap.
NOTE: Requires self.filter_compatible = False
Args:
tolerance (float): tolerance with respect to the VBM and CBM for considering
larger supercells for a given charge
"""
if self.filter_compatible:
raise ValueError("Cannot suggest larger supercells if filter_compatible is True.")
recommendations = {}
for def_type in self.defect_types:
template_entry = self.stable_entries[def_type][0].copy()
defect_indices = [int(def_ind) for def_ind in def_type.split('@')[-1].split('-')]
for charge in self.finished_charges[def_type]:
chg_defect = template_entry.defect.copy()
chg_defect.set_charge(charge)
for entry_index in defect_indices:
entry = self.entries[entry_index]
if entry.charge == charge:
break
if entry.parameters.get("is_compatible", True):
continue
else:
# consider if transition level is within
# tolerance of band edges
suggest_bigger_supercell = True
for tl, chgset in self.transition_level_map.items():
sorted_chgset = list(chgset)
sorted_chgset.sort(reverse=True)
if charge == sorted_chgset[0] and tl < tolerance:
suggest_bigger_supercell = False
elif charge == sorted_chgset[1] and tl > (self.band_gap - tolerance):
suggest_bigger_supercell = False
if suggest_bigger_supercell:
if def_type not in recommendations:
recommendations[def_type] = []
recommendations[def_type].append(charge)
return recommendations
def solve_for_fermi_energy(self, temperature, chemical_potentials, bulk_dos):
"""
Solve for the Fermi energy self-consistently as a function of T
Observations are Defect concentrations, electron and hole conc
Args:
temperature: Temperature to equilibrate fermi energies for
chemical_potentials: dict of chemical potentials to use for calculation fermi level
bulk_dos: bulk system dos (pymatgen Dos object)
Returns:
Fermi energy dictated by charge neutrality
"""
fdos = FermiDos(bulk_dos, bandgap=self.band_gap)
_, fdos_vbm = fdos.get_cbm_vbm()
def _get_total_q(ef):
qd_tot = sum([
d['charge'] * d['conc']
for d in self.defect_concentrations(
chemical_potentials=chemical_potentials, temperature=temperature, fermi_level=ef)
])
qd_tot += fdos.get_doping(fermi_level=ef + fdos_vbm, temperature=temperature)
return qd_tot
return bisect(_get_total_q, -1., self.band_gap + 1.)
def solve_for_non_equilibrium_fermi_energy(self, temperature, quench_temperature,
chemical_potentials, bulk_dos):
"""
Solve for the Fermi energy after quenching in the defect concentrations at a higher
temperature (the quench temperature),
as outlined in P. Canepa et al (2017) Chemistry of Materials (doi: 10.1021/acs.chemmater.7b02909)
Args:
temperature: Temperature to equilibrate fermi energy at after quenching in defects
quench_temperature: Temperature to equilibrate defect concentrations at (higher temperature)
chemical_potentials: dict of chemical potentials to use for calculation fermi level
bulk_dos: bulk system dos (pymatgen Dos object)
Returns:
Fermi energy dictated by charge neutrality with respect to frozen in defect concentrations
"""
high_temp_fermi_level = self.solve_for_fermi_energy(quench_temperature, chemical_potentials,
bulk_dos)
fixed_defect_charge = sum([
d['charge'] * d['conc']
for d in self.defect_concentrations(
chemical_potentials=chemical_potentials, temperature=quench_temperature,
fermi_level=high_temp_fermi_level)
])
fdos = FermiDos(bulk_dos, bandgap=self.band_gap)
_, fdos_vbm = fdos.get_cbm_vbm()
def _get_total_q(ef):
qd_tot = fixed_defect_charge
qd_tot += fdos.get_doping(fermi_level=ef + fdos_vbm, temperature=temperature)
return qd_tot
return bisect(_get_total_q, -1., self.band_gap + 1.)
return
def get_dopability_limits(self, chemical_potentials):
"""
Find Dopability limits for a given chemical potential.
This is defined by the defect formation energies which first cross zero
in formation energies.
This determine bounds on the fermi level.
Does this by computing formation energy for every stable defect with non-zero charge.
If the formation energy value changes sign on either side of the band gap, then
compute the fermi level value where the formation energy is zero
(formation energies are lines and basic algebra shows: x_crossing = x1 - (y1 / q)
for fermi level, x1, producing formation energy y1)
Args:
chemical_potentials: dict of chemical potentials to use for calculation fermi level
Returns:
lower dopability limit, upper dopability limit
(returns None if no limit exists for upper or lower i.e. no negative defect
crossing before +/- 20 of band edges OR defect formation energies are entirely zero)
"""
min_fl_range = -20.
max_fl_range = self.band_gap + 20.
lower_lim = None
upper_lim = None
for def_entry in self.all_stable_entries:
min_fl_formen = def_entry.formation_energy(chemical_potentials=chemical_potentials,
fermi_level=min_fl_range)
max_fl_formen = def_entry.formation_energy(chemical_potentials=chemical_potentials,
fermi_level=max_fl_range)
if min_fl_formen < 0. and max_fl_formen < 0.:
logger.error("Formation energy is negative through entire gap for entry {} q={}."
" Cannot return dopability limits.".format(def_entry.name, def_entry.charge))
return None, None
elif np.sign(min_fl_formen) != np.sign(max_fl_formen):
x_crossing = min_fl_range - (min_fl_formen / def_entry.charge)
if min_fl_formen < 0.:
if lower_lim is None or lower_lim < x_crossing:
lower_lim = x_crossing
else:
if upper_lim is None or upper_lim > x_crossing:
upper_lim = x_crossing
return lower_lim, upper_lim
def plot(self, mu_elts=None, xlim=None, ylim=None, ax_fontsize=1.3, lg_fontsize=1.,
lg_position=None, fermi_level=None, title=None, saved=False):
"""
Produce defect Formation energy vs Fermi energy plot
Args:
mu_elts:
a dictionnary of {Element:value} giving the chemical
potential of each element
xlim:
Tuple (min,max) giving the range of the x (fermi energy) axis
ylim:
Tuple (min,max) giving the range for the formation energy axis
ax_fontsize:
float multiplier to change axis label fontsize
lg_fontsize:
float multiplier to change legend label fontsize
lg_position:
Tuple (horizontal-position, vertical-position) giving the position
to place the legend.
Example: (0.5,-0.75) will likely put it below the x-axis.
saved:
Returns:
a matplotlib object
"""
if xlim is None:
xlim = (-0.5, self.band_gap + 0.5)
xy = {}
lower_cap = -100.
upper_cap = 100.
y_range_vals = [] # for finding max/min values on y-axis based on x-limits
for defnom, def_tl in self.transition_level_map.items():
xy[defnom] = [[], []]
if def_tl:
org_x = list(def_tl.keys()) # list of transition levels
org_x.sort() # sorted with lowest first
# establish lower x-bound
first_charge = max(def_tl[org_x[0]])
for chg_ent in self.stable_entries[defnom]:
if chg_ent.charge == first_charge:
form_en = chg_ent.formation_energy(chemical_potentials=mu_elts,
fermi_level=lower_cap)
fe_left = chg_ent.formation_energy(chemical_potentials=mu_elts,
fermi_level=xlim[0])
xy[defnom][0].append(lower_cap)
xy[defnom][1].append(form_en)
y_range_vals.append(fe_left)
# iterate over stable charge state transitions
for fl in org_x:
charge = max(def_tl[fl])
for chg_ent in self.stable_entries[defnom]:
if chg_ent.charge == charge:
form_en = chg_ent.formation_energy(chemical_potentials=mu_elts,
fermi_level=fl)
xy[defnom][0].append(fl)
xy[defnom][1].append(form_en)
y_range_vals.append(form_en)
# establish upper x-bound
last_charge = min(def_tl[org_x[-1]])
for chg_ent in self.stable_entries[defnom]:
if chg_ent.charge == last_charge:
form_en = chg_ent.formation_energy(chemical_potentials=mu_elts,
fermi_level=upper_cap)
fe_right = chg_ent.formation_energy(chemical_potentials=mu_elts,
fermi_level=xlim[1])
xy[defnom][0].append(upper_cap)
xy[defnom][1].append(form_en)
y_range_vals.append(fe_right)
else:
# no transition - just one stable charge
chg_ent = self.stable_entries[defnom][0]
for x_extrem in [lower_cap, upper_cap]:
xy[defnom][0].append(x_extrem)
xy[defnom][1].append(chg_ent.formation_energy(chemical_potentials=mu_elts,
fermi_level=x_extrem)
)
for x_window in xlim:
y_range_vals.append(chg_ent.formation_energy(chemical_potentials=mu_elts,
fermi_level=x_window)
)
if ylim is None:
window = max(y_range_vals) - min(y_range_vals)
spacer = 0.1 * window
ylim = (min(y_range_vals) - spacer, max(y_range_vals) + spacer)
if len(xy) <= 8:
colors = cm.Dark2(np.linspace(0, 1, len(xy)))
else:
colors = cm.gist_rainbow(np.linspace(0, 1, len(xy)))
plt.figure()
plt.clf()
width = 12
# plot formation energy lines
for_legend = []
for cnt, defnom in enumerate(xy.keys()):
plt.plot(xy[defnom][0], xy[defnom][1], linewidth=3, color=colors[cnt])
for_legend.append(self.stable_entries[defnom][0].copy())
# plot transtition levels
for cnt, defnom in enumerate(xy.keys()):
x_trans, y_trans = [], []
for x_val, chargeset in self.transition_level_map[defnom].items():
x_trans.append(x_val)
for chg_ent in self.stable_entries[defnom]:
if chg_ent.charge == chargeset[0]:
form_en = chg_ent.formation_energy(chemical_potentials=mu_elts,
fermi_level=x_val)
y_trans.append(form_en)
if len(x_trans):
plt.plot(x_trans, y_trans, marker='*', color=colors[cnt], markersize=12, fillstyle='full')
# get latex-like legend titles
legends_txt = []
for dfct in for_legend:
flds = dfct.name.split('_')
if 'Vac' == flds[0]:
base = '$Vac'
sub_str = '_{' + flds[1] + '}$'
elif 'Sub' == flds[0]:
flds = dfct.name.split('_')
base = '$' + flds[1]
sub_str = '_{' + flds[3] + '}$'
elif 'Int' == flds[0]:
base = '$' + flds[1]
sub_str = '_{inter}$'
else:
base = dfct.name
sub_str = ''
legends_txt.append(base + sub_str)
if not lg_position:
plt.legend(legends_txt, fontsize=lg_fontsize * width, loc=0)
else:
plt.legend(legends_txt, fontsize=lg_fontsize * width, ncol=3,
loc='lower center', bbox_to_anchor=lg_position)
plt.ylim(ylim)
plt.xlim(xlim)
plt.plot([xlim[0], xlim[1]], [0, 0], 'k-') # black dashed line for Eformation = 0
plt.axvline(x=0.0, linestyle='--', color='k', linewidth=3) # black dashed lines for gap edges
plt.axvline(x=self.band_gap, linestyle='--', color='k',
linewidth=3)
if fermi_level is not None:
plt.axvline(x=fermi_level, linestyle='-.', color='k', linewidth=2) # smaller dashed lines for gap edges
plt.xlabel("Fermi energy (eV)", size=ax_fontsize * width)
plt.ylabel("Defect Formation\nEnergy (eV)", size=ax_fontsize * width)
if title:
plt.title("{}".format(title), size=ax_fontsize * width)
if saved:
plt.savefig(str(title) + "FreyplnravgPlot.pdf")
else:
return plt
| mit |
tensorflow/probability | tensorflow_probability/python/sts/regularization.py | 1 | 12469 | # Copyright 2021 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Utilities for regularizing a time series to a fixed frequency."""
import collections
import itertools
import math
import warnings
import numpy as np
__all__ = [
'MissingValuesTolerance',
'regularize_series',
]
MissingValuesTolerance = collections.namedtuple(
'MissingValuesTolerance',
['overall_fraction',
'fraction_low_missing_number',
'fraction_high_missing_number',
'low_missing_number',
'high_missing_number'])
# Map from Pandas to Numpy timedelta identifiers.
_PD_TO_NP_DELTAS = {'weeks': 'W', 'days': 'D', 'hours': 'h', 'minutes': 'm',
'seconds': 's', 'milliseconds': 'ms', 'microseconds': 'us',
'nanoseconds': 'ns'}
# This defines valid dtypes for time series values.
_VALID_DATA_TYPES = (np.float16, np.float32, np.float64, np.int32, np.int64)
def regularize_series(series,
frequency=None,
warn_missing_tolerance=None,
err_missing_tolerance=None,
max_series_length=None):
"""Infers frequency and makes an irregular time series regular.
Converts a time series into a regular time series having the same
period between successive time points (e.g. 5 seconds, or 1 day). If
the frequency is known, it can be supplied through the 'frequency'
argument; otherwise it will be inferred.
If multiple values share the same timestamp, they are summed into a single
value.
Args:
series: a Pandas `pd.Series` instance indexed by a `pd.DateTimeIndex`. This
may also be a single-column `pd.DataFrame`.
frequency: a Pandas DateOffset object, e.g. `pd.DateOffset(seconds=1)`. If
no frequency is specified, and the index of `series` does not have a
frequency populated, the granularity of the time series will be inferred
automatically.
Default value: `None`.
warn_missing_tolerance: optional instance of
`tfp.sts.MissingValuesTolerance`, specifying warning thresholds for
too many missing values.
Default value: `None`. (do not warn).
err_missing_tolerance: optional instance of
`tfp.sts.MissingValuesTolerance`, specifying error thresholds for
too many missing values.
Default value: `None`. (do not raise errors).
max_series_length: `int` maximum length of the regularized
series (note that regularization may increase the length of the series).
Used to bound the resources used per invocation.
Default value: `None`.
Returns:
regularized_series: instance of the same type as `series`
(`pd.Series` or `pd.DataFrame`) whose index follows a regular
frequency (`regularized_series.index.freq is not None`). Any values
not provided are filled in as `NaN`.
Raises:
TypeError: if `data` is not an instance of `pd.Series` or `pd.DataFrame`.
ValueError: if `data` is empty, `data.index` is not a DatetimeIndex,
`data.index` is not sorted, or if applying the inferred
frequency would exceed the `max_series_length` or create
more missing values than allowed by `err_missing_vals`.
"""
# pylint: disable=unused-import,g-import-not-at-top
import pandas as pd # Defer import to avoid a package-level Pandas dep.
from pandas.core.resample import asfreq # see b/169217869
# pylint: enable=unused-import,g-import-not-at-top
_check_data(series)
# Sum all values provided at each time step, if there is more than one.
series = series.groupby(
by=lambda x: x
# Use numpy sum because just calling `groupby().sum()` would drop NaNs.
).agg(lambda x: np.sum(x.values))
if not frequency:
frequency = _infer_frequency(series.index)
# If the frequency is monthly and the first date is the end of a
# month, follow that convention in future months. Note that this condition
# is not triggered if the frequency is already `MonthEnd` (which has
# `kwds == {}`).
if ('months' in frequency.kwds and
len(frequency.kwds) == 1 and
np.all(series.index.is_month_end)):
frequency = pd.offsets.MonthEnd(n=frequency.kwds['months'])
if (max_series_length is not None and
max_series_length < _estimate_num_steps(
series.index[0], series.index[-1], frequency)):
raise ValueError("Applying inferred frequency {} to the time period "
"starting at '{}' and ending at '{}' would exceed the "
"maximum series length ({}).".format(
frequency,
series.index[0],
series.index[-1],
max_series_length))
regularized_series = series.asfreq(frequency)
if warn_missing_tolerance or err_missing_tolerance:
_check_missing_values(regularized_series,
warn_vals=warn_missing_tolerance,
err_vals=err_missing_tolerance)
return regularized_series
def _check_missing_values(series, warn_vals, err_vals=None):
"""Checks for excess missing values after making a series regular.
After setting of automatic granularity and/or making a time series regular,
it may contain a large number of missing values. This method will throw
an error or warning if a series contains a high fraction or raw number
missing values. See _ERR_VALS and WARN_VALS for thresholds.
Args:
series: instance of `pd.Series` or `pd.DataFrame`.
warn_vals: optional instance of `tfp.sts.MissingValuesTolerance`
specifying thresholds at which to warn about too many missing values.
err_vals: optional instance of `tfp.sts.MissingValuesTolerance`
specifying thresholds at which to raise an error about too many missing
values.
Default value: `None`.
Raises:
ValueError: if the series contains too many missing values.
"""
missing_number = np.sum(np.isnan(series.values))
total_number = np.prod(series.shape)
missing_fraction = missing_number / total_number
missing_msg = 'Too many missing values: {} out of {}.'.format(
missing_number, total_number)
warning_msg = 'Large number of missing values: {}'.format(missing_msg)
# Raise an Error (default) or warning if too many missing values.
if err_vals:
if (missing_fraction >= err_vals.overall_fraction or
(missing_fraction >= err_vals.fraction_low_missing_number and
missing_number >= err_vals.low_missing_number) or
(missing_fraction >= err_vals.fraction_high_missing_number and
missing_number >= err_vals.high_missing_number)):
raise ValueError('Too many missing values: ' + missing_msg)
# Raise a warning in case of a lot of missing values.
if (missing_fraction >= warn_vals.overall_fraction or
(missing_fraction >= warn_vals.fraction_low_missing_number and
missing_number >= warn_vals.low_missing_number) or
(missing_fraction >= warn_vals.fraction_high_missing_number and
missing_number >= warn_vals.high_missing_number)):
warnings.warn(warning_msg)
def _check_data(data):
"""Performs validation checks on pandas input data."""
# Defer import to avoid a package-level Pandas dep.
import pandas as pd # pylint: disable=g-import-not-at-top
if not isinstance(data, (pd.Series, pd.DataFrame)):
raise TypeError('Expected a pandas Series or DataFrame.')
if data.empty:
raise ValueError('Input data is empty')
if not isinstance(data.index,
pd.core.indexes.datetimes.DatetimeIndex):
raise ValueError('Input data index is not a DatetimeIndex')
if data.values.dtype not in _VALID_DATA_TYPES:
raise ValueError('Invalid data type. '
'Valid types are: {}. '.format(_VALID_DATA_TYPES) +
'Received: {}'.format(data.values.dtype))
if not data.index.is_monotonic_increasing:
raise ValueError('Input data index is not sorted.')
def _infer_frequency(date_time_index):
"""Infers frequency from a Pandas DatetimeIndex.
The frequency is automatically inferred as follows:
1. Computes the time differences between all time points and determine
the smallest difference.
2. For the smallest time difference determine the smallest time
component from 'seconds', 'minutes, 'hours', 'days', and 'weeks'.
3. Convert all time differences to the smallest time component determined
in (2).
4. Find the greatest common denominator (gcd) determined from the
resulting time differences in (3). This is used to automatically
set a time series frequency.
Args:
date_time_index: instance of Pandas.DatetimeIndex. Typically this is
`df.index` for an appropriate dateframe `df`.
Returns:
frequency: The inferred frequency as a `pd.DateOffset` instance. This will
either be a special offset, like `pd.offsets.MonthEnd()`, or will be a
base `pd.DateOffset` instance with a single keyword component
(e.g., `DateOffset(hours=26)` rather than `DateOffset(days=1, hours=2)`).
"""
# Defer import to avoid a package-level Pandas dep.
import pandas as pd # pylint: disable=g-import-not-at-top
# Compute series time deltas and get their minimum.
diffs = pd.Series(date_time_index).diff()[1:]
diffs_table = diffs.value_counts()
min_diff = diffs_table.index.min()
# Extract datetime components and identify smallest time component.
min_diff_components = pd.Series(min_diff).dt.components
available_components = list(min_diff_components)
components_present = min_diff_components.values[0] > 0
smallest_unit = list(itertools.compress(available_components,
components_present))[-1]
irregular_freqs = []
if smallest_unit == 'days' and min_diff.days >= 365:
# Attempt to infer a yearly frequency.
irregular_freqs += [pd.DateOffset(years=min_diff.days // 365)]
if smallest_unit == 'days' and min_diff.days >= 28:
# Attempt to infer a monthly frequency. Note that the candidate interval of
# `days // 28` will fail for intervals larger than 11 months.
irregular_freqs += [pd.DateOffset(months=min_diff.days // 28),
pd.offsets.MonthEnd(n=min_diff.days // 28)]
for candidate_freq in irregular_freqs:
# If the candidate frequency explains all of the provided dates, it's
# probably a reasonable choice.
if set(pd.date_range(date_time_index.min(),
date_time_index.max(),
freq=candidate_freq)).issuperset(date_time_index):
return candidate_freq
# Pandas Timedelta does not support 'weeks' time components by default.
# Deal with that special case.
if smallest_unit == 'days' and min_diff.days % 7 == 0:
smallest_unit = 'weeks'
# Express time differences in common unit
series_divider = np.timedelta64(1, _PD_TO_NP_DELTAS[smallest_unit])
diffs_common = list((diffs / series_divider).astype(int))
# Compute the greatest common denominator of time differences.
diffs_gcd = diffs_common[0]
for d in diffs_common[1:]:
diffs_gcd = math.gcd(diffs_gcd, d)
return pd.DateOffset(**{smallest_unit: diffs_gcd})
def _estimate_num_steps(start_time, end_time, freq):
"""Estimates the number of steps between the given times at the given freq."""
# Unfortunately `(end_time - start_time) / freq` doesn't work in general,
# because some frequencies (e.g., MonthEnd) don't correspond to a fixed length
# of time. Instead, we use a 'typical' length estimated by averaging over a
# small number of steps. This recovers the exact calculation when `freq` does
# have a fixed length (e.g., is measured in seconds, minutes, etc.).
timedelta = ((start_time + 10 * freq) - start_time) / 10.
return (end_time - start_time) / timedelta
| apache-2.0 |
adewynter/Tools | MLandDS/MachineLearning/Kmeans-CustomerAnalysis.py | 1 | 3531 | # Adrian deWynter, 2016
# This dataset is nasty, so we are also going to use some PCA.
import numpy as np
import pandas as pd
from sklearn import preprocessing
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
import matplotlib
import math
PLOT_TYPE_TEXT = False
PLOT_VECTORS = True
matplotlib.style.use('ggplot')
c = ['red', 'green', 'blue', 'orange', 'yellow', 'brown']
# Draws features on PCA space
def drawVectors(transformed_features, components_, columns, plt):
num_columns = len(columns)
xvector = components_[0] * max(transformed_features[:,0])
yvector = components_[1] * max(transformed_features[:,1])
# Sort each column by its length (not PCA columns)
important_features = { columns[i] : math.sqrt(xvector[i]**2 + yvector[i]**2) for i in range(num_columns) }
important_features = sorted(zip(important_features.values(), important_features.keys()), reverse=True)
print "Projected Features by importance:\n", important_features
ax = plt.axes()
for i in range(num_columns):
# Project each original feature on the PCA axes
plt.arrow(0, 0, xvector[i], yvector[i], color='b', width=0.0005, head_width=0.02, alpha=0.75, zorder=600000)
plt.text(xvector[i]*1.2, yvector[i]*1.2, list(columns)[i], color='b', alpha=0.75, zorder=600000)
return ax
def doPCA(data, dimensions=2):
from sklearn.decomposition import RandomizedPCA
model = RandomizedPCA(n_components=dimensions)
model.fit(data)
return model
def doKMeans(data, clusters=0):
kmeans = KMeans(n_clusters=clusters)
kmeans.fit(data)
kmeans.predict(data)
return kmeans.cluster_centers_, kmeans.labels_
df = pd.read_csv('Datasets/Wholesale customers data.csv')
df = df.fillna(value=0)
# Assume single-location wholesale
df = df.drop('Channel',1)
df = df.drop('Region',1)
# We don't care much for outlier customers.
drop = {}
for col in df.columns:
# Bottom 5
sort = df.sort_values(by=col, ascending=True)
if len(sort) > 5: sort=sort[:5]
for index in sort.index: drop[index] = True
# Top 5
sort = df.sort_values(by=col, ascending=False)
if len(sort) > 5: sort=sort[:5]
for index in sort.index: drop[index] = True
print "Dropping {0} Outliers...".format(len(drop))
df.drop(inplace=True, labels=drop.keys(), axis=0)
print df.describe()
#T = preprocessing.StandardScaler().fit_transform(df)
#T = preprocessing.MinMaxScaler().fit_transform(df)
#T = preprocessing.normalize(df)
#T = preprocessing.scale(df)
T = df
# There are so few features that doing PCA ahead of time isn't really necessary
# Do KMeans
n_clusters = 3
centroids, labels = doKMeans(T, n_clusters)
print centroids
# Do PCA to visualize the results.
display_pca = doPCA(T)
T = display_pca.transform(T)
CC = display_pca.transform(centroids)
# Visualize all the samples
fig = plt.figure()
ax = fig.add_subplot(111)
if PLOT_TYPE_TEXT:
for i in range(len(T)): ax.text(T[i,0], T[i,1], df.index[i], color=c[labels[i]], alpha=0.75, zorder=600000)
ax.set_xlim(min(T[:,0])*1.2, max(T[:,0])*1.2)
ax.set_ylim(min(T[:,1])*1.2, max(T[:,1])*1.2)
else:
# Plot a regular scatter plot
sample_colors = [ c[labels[i]] for i in range(len(T)) ]
ax.scatter(T[:, 0], T[:, 1], c=sample_colors, marker='o', alpha=0.2)
# Plot the centroids
ax.scatter(CC[:, 0], CC[:, 1], marker='x', s=169, linewidths=3, zorder=1000, c=c)
for i in range(len(centroids)): ax.text(CC[i, 0], CC[i, 1], str(i), zorder=500010, fontsize=18, color=c[i])
if PLOT_VECTORS: drawVectors(T, display_pca.components_, df.columns, plt)
df['label'] = pd.Series(labels, index=df.index)
print df
plt.show() | mit |
conversationai/wikidetox | experimental/conversation_go_awry/kaggle/trainer/model.py | 1 | 12041 | """
Classifiers for the Toxic Comment Classification Kaggle challenge,
https://www.kaggle.com/c/jigsaw-toxic-comment-classification-challenge
To run locally:
python trainer/model.py --train_data=train.csv --predict_data=test.csv --y_class=toxic
To run locally using Cloud ML Engine:
gcloud ml-engine local train \
--module-name=trainer.model \
--package-path=trainer \
--job-dir=model -- \
--train_data=train.csv \
--predict_data=test.csv \
--y_class=toxic \
--train_steps=100
To run TensorBoard locally:
tensorboard --logdir=model/
Then visit http://localhost:6006/ to see the dashboard.
"""
from __future__ import print_function
from __future__ import division
import argparse
import os
import sys
import shutil
import pandas as pd
import tensorflow as tf
from sklearn import metrics
from trainer import wikidata
from collections import namedtuple
FLAGS = None
# Data Params
TRAIN_PERCENT = .8 # Percent of data to allocate to training
DATA_SEED = 48173 # Random seed used for splitting the data into train/test
MAX_LABEL = 2
MAX_DOCUMENT_LENGTH = 500 # Max length of each comment in words
CNNParams = namedtuple(
'CNNParams', ['WINDOW_SIZE', 'EMBEDDING_SIZE','POOLING_WINDOW', 'POOLING_STRIDE',
'N_FILTERS', 'FILTER_SHAPE1', 'FILTER_SHAPE2'])
cnn_values = {'WINDOW_SIZE':20, 'EMBEDDING_SIZE':20, 'POOLING_WINDOW':4,
'POOLING_STRIDE':2, 'N_FILTERS':10}
cnn_values['FILTER_SHAPE1'] = [cnn_values['WINDOW_SIZE'], cnn_values['EMBEDDING_SIZE']]
cnn_values['FILTER_SHAPE2'] = [cnn_values['WINDOW_SIZE'], cnn_values['N_FILTERS']]
CNN_PARAMS = CNNParams(**cnn_values)
BOWParams = namedtuple('BOWParams', ['EMBEDDING_SIZE'])
BOW_PARAMS = BOWParams(EMBEDDING_SIZE = 20)
WORDS_FEATURE = 'words' # Name of the input words feature.
MODEL_LIST = ['bag_of_words', 'cnn'] # Possible models
# Training Params
TRAIN_SEED = 9812 # Random seed used to initialize training
LEARNING_RATE = 0.01
BATCH_SIZE = 20
def estimator_spec_for_softmax_classification(logits, labels, mode):
"""
Depending on the value of mode, different EstimatorSpec arguments are required.
For mode == ModeKeys.TRAIN: required fields are loss and train_op.
For mode == ModeKeys.EVAL: required field is loss.
For mode == ModeKeys.PREDICT: required fields are predictions.
Returns EstimatorSpec instance for softmax classification.
"""
predicted_classes = tf.argmax(logits, axis=1)
predicted_probs = tf.nn.softmax(logits, name='softmax_tensor')
predictions = {
# Holds the raw logit values
'logits': logits,
# Holds the class id (0,1) representing the model's prediction of the most
# likely species for this example.
'classes': predicted_classes,
# Holds the probabilities for each prediction
'probs': predicted_probs,
}
# Represents an output of a model that can be served.
export_outputs = {
'output': tf.estimator.export.ClassificationOutput(scores=predicted_probs)
}
# PREDICT Mode
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions,
export_outputs=export_outputs
)
# Calculate loss for both TRAIN and EVAL modes
loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
eval_metric_ops = {
'accuracy': tf.metrics.accuracy(
labels=labels, predictions=predicted_classes, name='acc_op'),
'auc': tf.metrics.auc(
labels=labels, predictions=predicted_classes, name='auc_op'),
}
# Add summary ops to the graph. These metrics will be tracked graphed
# on each checkpoint by TensorBoard.
tf.summary.scalar('accuracy', eval_metric_ops['accuracy'][1])
tf.summary.scalar('auc', eval_metric_ops['auc'][1])
# TRAIN Mode
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.AdamOptimizer(learning_rate=LEARNING_RATE)
train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step())
logging_hook = tf.train.LoggingTensorHook(
tensors={'loss': loss}, every_n_iter=50)
return tf.estimator.EstimatorSpec(
mode=mode,
loss=loss,
train_op=train_op,
training_hooks=[logging_hook],
predictions={'loss': loss},
export_outputs=export_outputs,
eval_metric_ops=eval_metric_ops
)
# EVAL Mode
assert mode == tf.estimator.ModeKeys.EVAL
return tf.estimator.EstimatorSpec(
mode=mode,
loss=loss,
predictions=predictions,
eval_metric_ops=eval_metric_ops,
export_outputs=export_outputs
)
def cnn_model(features, labels, mode):
"""
A 2 layer ConvNet to predict from sequence of words to a class.
Largely stolen from:
https://github.com/tensorflow/tensorflow/blob/master/tensorflow/examples/learn/text_classification_cnn.py
Returns a tf.estimator.EstimatorSpec.
"""
# Convert indexes of words into embeddings.
# This creates embeddings matrix of [n_words, EMBEDDING_SIZE] and then
# maps word indexes of the sequence into [batch_size, sequence_length,
# EMBEDDING_SIZE].
word_vectors = tf.contrib.layers.embed_sequence(
features[WORDS_FEATURE], vocab_size=n_words, embed_dim=
CNN_PARAMS.EMBEDDING_SIZE)
# Inserts a dimension of 1 into a tensor's shape.
word_vectors = tf.expand_dims(word_vectors, 3)
with tf.variable_scope('CNN_Layer1'):
# Apply Convolution filtering on input sequence.
conv1 = tf.layers.conv2d(
word_vectors,
filters=CNN_PARAMS.N_FILTERS,
kernel_size=CNN_PARAMS.FILTER_SHAPE1,
padding='VALID',
# Add a ReLU for non linearity.
activation=tf.nn.relu)
# Max pooling across output of Convolution+Relu.
pool1 = tf.layers.max_pooling2d(
conv1,
pool_size=CNN_PARAMS.POOLING_WINDOW,
strides=CNN_PARAMS.POOLING_STRIDE,
padding='SAME')
# Transpose matrix so that n_filters from convolution becomes width.
pool1 = tf.transpose(pool1, [0, 1, 3, 2])
with tf.variable_scope('CNN_Layer2'):
# Second level of convolution filtering.
conv2 = tf.layers.conv2d(
pool1,
filters=CNN_PARAMS.N_FILTERS,
kernel_size=CNN_PARAMS.FILTER_SHAPE2,
padding='VALID')
# Max across each filter to get useful features for classification.
pool2 = tf.squeeze(tf.reduce_max(conv2, 1), squeeze_dims=[1])
# Apply regular WX + B and classification.
logits = tf.layers.dense(pool2, MAX_LABEL, activation=None)
predicted_classes = tf.argmax(logits, 1)
return estimator_spec_for_softmax_classification(
logits=logits, labels=labels, mode=mode)
def bag_of_words_model(features, labels, mode):
"""
A bag-of-words model using a learned word embedding. Note it disregards the
word order in the text.
Returns a tf.estimator.EstimatorSpec.
"""
bow_column = tf.feature_column.categorical_column_with_identity(
WORDS_FEATURE, num_buckets=n_words)
# The embedding values are initialized randomly, and are trained along with
# all other model parameters to minimize the training loss.
bow_embedding_column = tf.feature_column.embedding_column(
bow_column, dimension=BOW_PARAMS.EMBEDDING_SIZE)
bow = tf.feature_column.input_layer(
features,
feature_columns=[bow_embedding_column])
logits = tf.layers.dense(bow, MAX_LABEL, activation=None)
return estimator_spec_for_softmax_classification(
logits=logits, labels=labels, mode=mode)
def main():
global n_words
tf.logging.set_verbosity(tf.logging.INFO)
if FLAGS.verbose:
tf.logging.info('Running in verbose mode')
tf.logging.set_verbosity(tf.logging.DEBUG)
if os.path.isdir(FLAGS.model_dir):
tf.logging.info("Removing model data from '/{0}'".format(FLAGS.model_dir))
shutil.rmtree(FLAGS.model_dir)
# Load and split data
tf.logging.info('Loading data from {0}'.format(FLAGS.train_data))
data = wikidata.WikiData(
FLAGS.train_data, FLAGS.y_class, seed=DATA_SEED, train_percent=TRAIN_PERCENT,
max_document_length=MAX_DOCUMENT_LENGTH)
n_words = len(data.vocab_processor.vocabulary_)
tf.logging.info('Total words: %d' % n_words)
# Build model
if FLAGS.model == 'bag_of_words':
model_fn = bag_of_words_model
# Subtract 1 because VocabularyProcessor outputs a word-id matrix where word
# ids start from 1 and 0 means 'no word'. But categorical_column_with_identity
# assumes 0-based count and uses -1 for missing word.
data.x_train = data.x_train - 1
data.x_test = data.x_test - 1
elif FLAGS.model == 'cnn':
model_fn = cnn_model
else:
tf.logging.error("Unknown specified model '{}', must be one of {}"
.format(FLAGS.model, MODEL_LIST))
raise ValueError
classifier = tf.estimator.Estimator(
model_fn=model_fn,
config=tf.contrib.learn.RunConfig(
tf_random_seed=TRAIN_SEED,
),
model_dir=FLAGS.model_dir)
# Train model
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={WORDS_FEATURE: data.x_train},
y=data.y_train,
batch_size=BATCH_SIZE,
num_epochs=None, # Note: For training, set this to None, so the input_fn
# keeps returning data until the required number of train
# steps is reached.
shuffle=True)
classifier.train(input_fn=train_input_fn, steps=FLAGS.train_steps)
# Predict on held-out test data
test_input_fn = tf.estimator.inputs.numpy_input_fn(
x={WORDS_FEATURE: data.x_test},
y=data.y_test,
num_epochs=1, # Note: For evaluation and prediction set this to 1,
# so the input_fn will iterate over the data once and
# then raise OutOfRangeError
shuffle=False)
predicted_test = classifier.predict(input_fn=test_input_fn)
test_out = pd.DataFrame(
[(p['classes'], p['probs'][1]) for p in predicted_test],
columns=['y_predicted', 'prob']
)
# Score with sklearn and TensorFlow
sklearn_score = metrics.accuracy_score(data.y_test, test_out['y_predicted'])
tf_scores = classifier.evaluate(input_fn=test_input_fn)
train_size = len(data.x_train)
test_size = len(data.x_test)
baseline = len(data.y_train[data.y_train==0]) / len(data.y_train)
if baseline < .5:
baseline = 1 - baseline
tf.logging.info('')
tf.logging.info('----------Evaluation on Held-Out Data---------')
tf.logging.info('Train Size: {0} Test Size: {1}'.format(train_size, test_size))
tf.logging.info('Baseline (class distribution): {0:f}'.format(baseline))
tf.logging.info('Accuracy (sklearn): {0:f}'.format(sklearn_score))
for key in sorted(tf_scores):
tf.logging.info("%s: %s" % (key, tf_scores[key]))
# Export the model
feature_spec = {
WORDS_FEATURE: tf.FixedLenFeature(
dtype=tf.int64, shape=MAX_DOCUMENT_LENGTH)
}
serving_input_fn = tf.estimator.export.build_parsing_serving_input_receiver_fn(feature_spec)
classifier.export_savedmodel(FLAGS.saved_model_dir, serving_input_fn)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--verbose', help='Run in verbose mode.', action='store_true')
parser.add_argument(
"--train_data", type=str, default="", help="Path to the training data.")
parser.add_argument(
"--model_dir", type=str, default="model", help="Temp place for model files")
parser.add_argument(
"--saved_model_dir", type=str, default="saved_models", help="Place to saved model files")
parser.add_argument(
"--y_class", type=str, default="toxic",
help="Class to train model against, one of cnn, bag_of_words")
parser.add_argument(
"--model", type=str, default="bag_of_words",
help="The model to train, one of {}".format(MODEL_LIST))
parser.add_argument(
"--train_steps", type=int, default=100, help="The number of steps to train the model")
FLAGS, unparsed = parser.parse_known_args()
main()
| apache-2.0 |
conversationai/conversationai-models | model_evaluation/utils_export/utils_cloudml_test.py | 1 | 4111 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf records utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pandas as pd
import unittest
import utils_cloudml
class CallModelPredictionsFromDf(unittest.TestCase):
"""Tests for `call_model_predictions_from_df`."""
#TODO(fprost): Implement these.
def test_correct(self):
return
class CheckJobOver(unittest.TestCase):
"""Tests for `check_job_over`."""
# TODO(fprost): Implement these.
def test_correct(self):
return
class AddModelPredictionsToDf(unittest.TestCase):
"""Tests for `add_model_predictions_to_df`."""
def setUp(self):
self.COMMENT_KEY = 'comment_key'
self._df = pd.DataFrame({
self.COMMENT_KEY: [0, 1],
'other_field_1': ['I am a man', 'I am a woman'],
})
self._prediction_file = 'gs://kaggle-model-experiments/files_for_unittest/model1:v1'
self._model_col_name = 'model1:v1_preds'
self._prediction_name = 'toxicity/logistic'
self._example_key = self.COMMENT_KEY
def test_missing_prediction_file(self):
path = 'not_existing_folder/not_existing_file_path'
with self.assertRaises(Exception) as context:
utils_cloudml.add_model_predictions_to_df(
self._df,
path,
self._model_col_name,
self._prediction_name,
self._example_key)
self.assertIn(
'Prediction file does not exist.',
str(context.exception))
def test_empty_prediction_file(self):
path = 'gs://kaggle-model-experiments/files_for_unittest/for_empty_predictions'
with self.assertRaises(Exception) as context:
utils_cloudml.add_model_predictions_to_df(
self._df,
path,
self._model_col_name,
self._prediction_name,
self._example_key)
self.assertIn(
'The prediction file returned by CMLE is empty.',
str(context.exception))
def test_missing_example_key(self):
example_key = 'not_found_example_key'
with self.assertRaises(Exception) as context:
utils_cloudml.add_model_predictions_to_df(
self._df,
self._prediction_file,
self._model_col_name,
self._prediction_name,
example_key,
)
self.assertIn(
"Predictions do not contain the 'example_key' field.",
str(context.exception))
def test_missing_prediction_key(self):
prediction_key = 'not_found_prediction_key'
with self.assertRaises(Exception) as context:
utils_cloudml.add_model_predictions_to_df(
self._df,
self._prediction_file,
self._model_col_name,
prediction_key,
self._example_key)
self.assertIn(
"Predictions do not contain the 'prediction_name' field.",
str(context.exception))
def test_correct(self):
output_df = utils_cloudml.add_model_predictions_to_df(
self._df,
self._prediction_file,
self._model_col_name,
self._prediction_name,
self._example_key)
right_output = pd.DataFrame({
self.COMMENT_KEY: [0, 1],
'other_field_1': ['I am a man', 'I am a woman'],
self._model_col_name: [0.38753455877304077, 0.045782867819070816]
})
pd.testing.assert_frame_equal(
output_df.sort_index(axis=1), right_output.sort_index(axis=1))
if __name__ == '__main__':
unittest.main() | apache-2.0 |
TaxIPP-Life/Til | til/pgm/Archives/DataTable_from_liam.py | 2 | 7093 | # -*- coding:utf-8 -*-
"""
Convert Liam output in OpenFisca Input
"""
from pandas import HDFStore, merge # DataFrame
import numpy as np
import pdb
import time
from src.lib.simulation import SurveySimulation
from src.parametres.paramData import XmlReader, Tree2Object
import pandas as pd
import datetime as dt
import pandas.rpy.common as com
from rpy2.robjects import r
temps = time.clock()
input = "C:/Myliam2/Model/simulTest.h5"
output = "C:/openfisca/output/liam/"
name_convertion = {'person':'ind','declar':'foy','menage':'men', 'fam':'fam'}
store = HDFStore(input)
goal = HDFStore("C:/openfisca/src/countries/france/data/surveyLiam.h5")
#available_years = sorted([int(x[-4:]) for x in store.keys()])
# on travaille d'abord sur l'ensemble des tables puis on selectionne chaque annee
# step 1
table = {}
nom = 'person'
base = 'entities/'+nom
ent = name_convertion[nom]
table[ent] = store[str(base)]
# get years
years = np.unique(table[ent]['period'].values)[:1]
# rename variables to make them OF ones
table[ent] = table[ent].rename(columns={'res': 'idmen', 'quires': 'quimen', 'foy': 'idfoy', 'id': 'noi'})
# travail important sur les qui==2
time_qui = time.clock()
for ent in ('men','foy'): # 'fam' un jour...
qui= 'qui'+ent
ident = 'id'+ent
trav = table['ind'].ix[table['ind'][qui]==2, [ident,qui,'period']]
for name, group in trav.groupby([ident,'period']):
to_add = range(len(group))
group[qui] = group[qui]+to_add
table['ind'].ix[group[qui].index, qui] = group[qui]
print "les qui pour ", ent," sont réglés"
time_qui = time.clock() - time_qui
print time_qui
ent = 'ind'
# création de variable
table[ent]['agem'] = 12 * table[ent]['age']
table[ent]['ageq'] = table[ent]['age']/5 - 4
f = lambda x: min( max(x, 0), 12)
table[ent]['ageq'] = table[ent]['ageq'].map(f)
# menage qu'on élimine pour l'instant
#diff_foy = set([3880, 3916, 4190, 7853, 8658, 9376, 9508, 9717, 12114, 13912, 15260])
#
#temp = table['ind']['idfoy'].isin(diff_foy)
#diff_ind = table['ind'][temp]['id']
#diff_men = table['ind'][temp]['idmen'].copy()
#temp_ent = table[ent]['idmen'].isin(diff_men)
#table[ent] = table[ent][-temp_ent]
# il faut espérer après qu'avec les ménages, on s'en sort et qu'on n'a pas de
# pere dans diff_ind pour quelqu'un d'autre, ie, un pere hors du domicile supprimé
# on fait on s'en fout, on fait que de la légisaltion ici
# create fam base
table[ent][['idfam','quifam']] = table[ent][['idmen','quimen']]
# save information on qui == 0
foy0 = table[ent].ix[table[ent]['quifoy']==0,['id','idfoy','idmen','idfam','period']]
men0 = table[ent].ix[table[ent]['quimen']==0,['id','idfoy','idmen','idfam','period']]
fam0 = men0
for nom in ('menage','declar','fam'):
ent = name_convertion[nom]
base = 'entities/'+nom
ident = 'id'+ent
if ent == 'fam':
table[ent] = eval(ent +'0')
else :
table[ent] = store[str(base)].rename(columns={'id': ident})
table[ent] = merge(table[ent], eval(ent +'0'), how='left', left_on=[ident,'period'], right_on=[ident,'period'])
# traduction de variable en OF pour ces entités
if ent=='men':
# nbinde est limité à 6 personnes et donc valeur = 5 en python
table[ent]['nbinde'] = (table[ent]['nb_persons']-1) * (table[ent]['nb_persons']-1 <=5) +5*(table[ent]['nb_persons']-1 >5)
# temp_ent = table[ent]['idmen'].isin(diff_men)
# print ent
# table[ent] = table[ent][-temp_ent]
# test sur le nombre de qui ==0
test = {}
for year in years:
for nom in ('menage','declar'):
ent = name_convertion[nom]
base = 'entities/'+nom
ident = 'id'+ent
print ent, base, ident
test[ent] = store[str(base)].rename(columns={'id': ident})
test[ent] = test[ent].ix[test[ent]['period']==year,:]
test0 = eval(ent +'0')[eval(ent +'0')['period']==year]
tab = table[ent].ix[table[ent]['period']==year,['id','id'+ent,'idfam']]
ind = table['ind'].ix[table['ind']['period']==year,['qui'+ent]]
list_ind = ind[ind==0]
lidmen = test[ent][ident]
lidmenU = np.unique(lidmen)
diff1 = set(test0[ident]).symmetric_difference(lidmenU)
#voir = store[str(base)][['id','period']].rename(columns={'id': ident})
#voir = store[str(base)].rename(columns={'id': ident})[[ident,'period']]
#voir.ix[ voir['period']==2011,'id']
#
#test[ent][ident][:10]
#test1.ix[table[ent]['period']==year,['idmen']]
# il y a un truc avec les gens qui se marient puis divorcent
# en profiter pour bien gerer les conj = 0 ou conj =-1
# si on ne s'arrete pas là, c'est qu'on n'a pas de problème !!
print year, ent, diff1
for k in diff1:
pd.set_printoptions(max_columns=30)
listind = table['ind'][table['ind'][ident]==k]
print listind
for indiv in np.unique(listind['id']):
print table['ind'].ix[table['ind']['id']==indiv,['id','period','sexe','idmen','quimen','idfoy','quifoy','conj','mere','pere']]
pdb.set_trace()
for year in years:
goal.remove('survey_'+str(year))
for ent in ('ind','men','foy','fam'):
tab = table[ent].ix[table[ent]['period']==year]
key = 'survey_'+str(year) + '/'+ent
goal.put(key, tab)
# if year == 2010:
# pdb.set_trace()
# tab = table[ent].ix[table[ent]['period']==year]
# tab[:5]
# len(tab['idfam'])
# len(np.unique(tab['idfam']))
# list_qui = tab['idfam']
# double = list_qui.value_counts()[list_qui.value_counts()>1]
# tabind = table['ind'].ix[table['ind']['period']==year]
store.close()
goal.close()
# on fais maintenant tourner le modèle OF
country = 'france'
for year in years:
yr = str(year)
deb3 = time.clock()
simu = SurveySimulation()
simu.set_config(year = year, country = country)
# mettre les paramètres de a législation 2009
date_str = str(2009)+ '-01-01'
date = dt.datetime.strptime(date_str ,"%Y-%m-%d").date()
reader = XmlReader(simu.param_file, date)
rootNode = reader.tree
simu.P_default = Tree2Object(rootNode, defaut=True)
simu.P_default.datesim = date
simu.P = Tree2Object(rootNode, defaut=False)
simu.P.datesim = date
simu.set_survey(filename="C:/openfisca/src/countries/france/data/surveyLiam.h5", num_table=3, print_missing=True)
simu.compute()
for ent in ('ind','men','foy','fam'):
df = simu.outputs.table3[ent]
not_bool = df.dtypes[df.dtypes != bool]
print df.ix[:50,df.dtypes[df.dtypes == bool].index]
df = df.ix[:,not_bool.index]
r_dataframe = com.convert_to_r_dataframe(df)
name = ent+'_'+str(year)
r.assign(name, r_dataframe)
file_dir = output + name+ ".gzip"
phrase = "save("+name+", file='" +file_dir+"', compress=TRUE)"
r(phrase)
fin3 = time.clock()
print time.clock()- temps
| gpl-3.0 |
khaeru/py-gdx | gdx/__init__.py | 1 | 19054 | # coding: utf-8
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from itertools import cycle
import logging
import numpy
import pandas
import xarray as xr
from .pycompat import install_aliases, filter, raise_from, range, super, zip
install_aliases()
from .api import GDX, gdxcc, type_str, vartype_str
logger = logging.getLogger(__name__)
debug = logger.debug
info = logger.info
__version__ = '2'
__all__ = [
'File',
]
class File(xr.Dataset):
"""Load the file at *filename* into memory.
If *lazy* is ``True`` (default), then the data for GDX Parameters is not
loaded until each individual parameter is first accessed; otherwise all
parameters except those listed in *skip* (default: empty) are loaded
immediately.
If *implicit* is ``True`` (default) then, for each dimension of any GDX
Parameter declared over '*' (the universal set), an implicit set is
constructed, containing only the labels appearing in the respective
dimension of that parameter.
.. note::
For instance, the GAMS Parameter ``foo(*,*,*)`` is loaded as
``foo(_foo_0,_foo_1,_foo_2)``, where ``_foo_0`` is an implicit set that
contains only labels appearing along the first dimension of ``foo``,
etc. This workaround is essential for GDX files where ``*`` is large;
otherwise, loading ``foo`` as declared raises :py:class:`MemoryError`.
"""
# For the benefit of xr.Dataset.__getattr__
_api = None
_index = []
_state = {}
_alias = {}
_implicit = False
def __init__(self, filename='', lazy=True, implicit=True, skip=set()):
"""Constructor."""
super(File, self).__init__() # Invoke Dataset constructor
# load the GDX API
self._api = GDX()
self._api.open_read(str(filename))
# Basic information about the GDX file
v, p = self._api.file_version()
sc, ec = self._api.system_info()
self.attrs['version'] = v.strip()
self.attrs['producer'] = p.strip()
self.attrs['symbol_count'] = sc
self.attrs['element_count'] = ec
# Initialize private variables
self._index = [None for _ in range(sc + 1)]
self._state = {}
self._alias = {}
self._implicit = implicit
# Read symbols
for s_num in range(sc + 1):
name, type_code = self._load_symbol(s_num)
if type_code == gdxcc.GMS_DT_SET and name not in skip:
self._load_symbol_data(name)
if not lazy:
for name in filter(None, self._index):
if name not in skip:
self._load_symbol_data(name)
def _load_symbol(self, index):
"""Load the *index*-th Symbol in the GDX file."""
# Load basic information
name, dim, type_code = self._api.symbol_info(index)
n_records, vartype, desc = self._api.symbol_info_x(index)
self._index[index] = name # Record the name
attrs = {
'index': index,
'name': name,
'dim': dim,
'type_code': type_code,
'records': n_records,
'vartype': vartype,
'description': desc,
}
# Assemble a string description of the Symbol's type
type_str_ = type_str[type_code]
if type_code == gdxcc.GMS_DT_PAR and dim == 0:
type_str_ = 'scalar'
try:
vartype_str_ = vartype_str[vartype]
except KeyError: # pragma: no cover
# Some other vartype is returned that's not described by the GDX
# API docs
vartype_str_ = ''
attrs['type_str'] = '{} {}'.format(vartype_str_, type_str_)
debug(str('Loading #{index} {name}: {dim}-D, {records} records, '
u'"{description}"').format(**attrs))
# Equations and Aliases require limited processing
if type_code == gdxcc.GMS_DT_EQU:
info('Loading of GMS_DT_EQU not implemented: {} {} not loaded.'.
format(index, name))
self._state[name] = None
return name, type_code
elif type_code == gdxcc.GMS_DT_ALIAS:
parent = desc.replace('Aliased with ', '')
self._alias[name] = parent
assert self[parent].attrs['_gdx_type_code'] == gdxcc.GMS_DT_SET
# Duplicate the variable
self._variables[name] = self._variables[parent]
self._state[name] = True
super(File, self).set_coords(name, inplace=True)
return name, type_code
# The Symbol is either a Set, Parameter or Variable
try: # Read the domain, as a list of names
domain = self._api.symbol_get_domain_x(index)
debug('domain: {}'.format(domain))
except Exception: # gdxSymbolGetDomainX fails for the universal set
assert name == '*'
domain = []
# Cache the attributes
attrs['domain'] = domain
self._state[name] = {'attrs': attrs}
return name, type_code
def _load_symbol_data(self, name):
"""Load the Symbol *name*."""
if self._state[name] in (True, None): # Skip Symbols already loaded
return
# Unpack attributes
attrs = self._state[name]['attrs']
index, dim, domain, records = [attrs[k] for k in ('index', 'dim',
'domain', 'records')]
# Read the data
self._cache_data(name, index, dim, records)
# If the GAMS method 'sameas' is invoked in a program, the resulting
# GDX file contains an empty Set named 'SameAs' with domain (*,*). Do
# not read this
if name == 'SameAs' and domain == ['*', '*']:
self._state[name] = None
self._index[index] = None
return
domain = self._infer_domain(name, domain,
self._state[name]['elements'])
# Create an xr.DataArray with the Symbol's data
self._add_symbol(name, dim, domain, attrs)
def _cache_data(self, name, index, dim, records):
"""Read data for the Symbol *name* from the GDX file."""
# Initiate the data read. The API method returns a number of records,
# which should match that given by gdxSymbolInfoX in _load_symbol()
records2 = self._api.data_read_str_start(index)
assert records == records2, \
('{}: gdxSymbolInfoX ({}) and gdxDataReadStrStart ({}) disagree on'
' number of records.').format(name, records, records2)
# Indices of data records, one list per dimension
elements = [list() for _ in range(dim)]
# Data points. Keys are index tuples, values are data. For a 1-D Set,
# the data is the GDX 'string number' of the text associated with the
# element
data = {}
try:
while True: # Loop over all records
labels, value, _ = self._api.data_read_str() # Next record
# Update elements with the indices
for j, label in enumerate(labels):
if label not in elements[j]:
elements[j].append(label)
# Convert a 1-D index from a tuple to a bare string
key = labels[0] if dim == 1 else tuple(labels)
# The value is a sequence, containing the level, marginal,
# lower & upper bounds, etc. Store only the value (first
# element).
data[key] = value[gdxcc.GMS_VAL_LEVEL]
except Exception:
if len(data) == records:
pass # All data has been read
else: # pragma: no cover
raise # Some other read error
# Cache the read data
self._state[name].update({
'data': data,
'elements': elements,
})
def _infer_domain(self, name, domain, elements):
"""Infer the domain of the Symbol *name*.
Lazy GAMS modellers may create variables like myvar(*,*,*,*). If the
size of the universal set * is large, then attempting to instantiate a
xr.DataArray with this many elements may cause a MemoryError. For every
dimension of *name* defined on the domain '*' this method tries to find
a Set from the file which contains all the labels appearing in *name*'s
data.
"""
if '*' not in domain:
return domain
debug('guessing a better domain for {}: {}'.format(name, domain))
# Domain as a list of references to Variables in the File/xr.Dataset
domain_ = [self[d] for d in domain]
for i, d in enumerate(domain_): # Iterate over dimensions
e = set(elements[i])
if d.name != '*' or len(e) == 0: # pragma: no cover
assert set(d.values).issuperset(e)
continue # The stated domain matches the data; or no data
# '*' is given
if (self._state[name]['attrs']['type_code'] == gdxcc.GMS_DT_PAR and
self._implicit):
d = '_{}_{}'.format(name, i)
debug(('Constructing implicit set {} for dimension {} of {}\n'
' {} instead of {} elements')
.format(d, name, i, len(e), len(self['*'])))
self.coords[d] = elements[i]
d = self[d]
else:
# try to find a smaller domain for this dimension
# Iterate over every Set/Coordinate
for s in self.coords.values():
if s.ndim == 1 and set(s.values).issuperset(e) and \
len(s) < len(d):
d = s # Found a smaller Set; use this instead
domain_[i] = d
# Convert the references to names
inferred = [d.name for d in domain_]
if domain != inferred:
# Store the result
self._state[name]['attrs']['domain_inferred'] = inferred
debug('…inferred {}.'.format(inferred))
else:
debug('…failed.')
return inferred
def _root_dim(self, dim):
"""Return the ultimate ancestor of the 1-D Set *dim*."""
parent = self[dim].dims[0]
return dim if parent == dim else self._root_dim(parent)
def _empty(self, *dims, **kwargs):
"""Return an empty numpy.ndarray for a GAMS Set or Parameter."""
size = []
dtypes = []
for d in dims:
size.append(len(self[d]))
dtypes.append(self[d].dtype)
dtype = kwargs.pop('dtype', numpy.result_type(*dtypes))
fv = kwargs.pop('fill_value')
return numpy.full(size, fill_value=fv, dtype=dtype)
def _add_symbol(self, name, dim, domain, attrs):
"""Add a xray.DataArray with the data from Symbol *name*."""
# Transform the attrs for storage, unpack data
gdx_attrs = {'_gdx_{}'.format(k): v for k, v in attrs.items()}
data = self._state[name]['data']
elements = self._state[name]['elements']
# Erase the cache; this also prevents __getitem__ from triggering lazy-
# loading, which is still in progress
self._state[name] = True
kwargs = {} # Arguments to xr.Dataset.__setitem__()
if dim == 0:
# 0-D Variable or scalar Parameter
super(File, self).__setitem__(name, ([], data.popitem()[1],
gdx_attrs))
return
elif attrs['type_code'] == gdxcc.GMS_DT_SET: # GAMS Set
if dim == 1:
# One-dimensional Set
self.coords[name] = elements[0]
self.coords[name].attrs = gdx_attrs
else:
# Multi-dimensional Sets are mappings indexed by other Sets;
# elements are either 'on'/True or 'off'/False
kwargs['dtype'] = bool
kwargs['fill_value'] = False
# Don't define over the actual domain dimensions, but over the
# parent Set/xr.Coordinates for each dimension
dims = [self._root_dim(d) for d in domain]
# Update coords
self.coords.__setitem__(name, (dims, self._empty(*domain,
**kwargs),
gdx_attrs))
# Store the elements
for k in data.keys():
self[name].loc[k] = k if dim == 1 else True
else: # 1+-dimensional GAMS Parameters
kwargs['dtype'] = float
kwargs['fill_value'] = numpy.nan
dims = [self._root_dim(d) for d in domain] # Same as above
# Create an empty xr.DataArray; this ensures that the data
# read in below has the proper form and indices
super(File, self).__setitem__(name, (dims, self._empty(*domain,
**kwargs),
gdx_attrs))
# Fill in extra keys
longest = numpy.argmax(self[name].values.shape)
iters = []
for i, d in enumerate(dims):
if i == longest:
iters.append(self[d].to_index())
else:
iters.append(cycle(self[d].to_index()))
data.update({k: numpy.nan for k in set(zip(*iters)) -
set(data.keys())})
# Use pandas and xarray IO methods to convert data, a dict, to a
# xr.DataArray of the correct shape, then extract its values
tmp = pandas.Series(data)
tmp.index.names = dims
tmp = xr.DataArray.from_series(tmp).reindex_like(self[name])
self[name].values = tmp.values
def dealias(self, name):
"""Identify the GDX Symbol that *name* refers to, and return the
corresponding :py:class:`xarray.DataArray`."""
return self[self._alias[name]] if name in self._alias else self[name]
def extract(self, name):
"""Extract the GAMS Symbol *name* from the dataset.
The Sets and Parameters in the :class:`File` can be accessed directly,
as e.g. `f['name']`; but for more complex xarray operations, such as
concatenation and merging, this carries along sub-Sets and other
Coordinates which confound xarray.
:func:`extract()` returns a self-contained :py:class:`xarray.DataArray`
with the declared dimensions of the Symbol (and *only* those
dimensions), which does not make reference to the :class:`File`.
"""
# Copy the Symbol, triggering lazy-loading if needed
result = self[name].copy()
# Declared dimensions of the Symbol, and their parents
try:
domain = result.attrs['_gdx_domain_inferred']
except KeyError: # No domain was inferred for this Symbol
domain = result.attrs['_gdx_domain']
dims = {c: self._root_dim(c) for c in domain}
keep = set(dims.keys()) | set(dims.values())
# Extraneous dimensions
drop_coords = set(result.coords) - keep
# Reduce the data
for c, p in dims.items():
if c == '*': # Dimension is '*', drop empty labels
result = result.dropna(dim='*', how='all')
elif c == p: # Dimension already indexed by the correct coord
continue
else:
# Dimension is indexed by 'p', but declared 'c'. First drop
# the elements which do not appear in the sub-Set c;, then
# rename 'p' to 'c'
drop = set(self[p].values) - set(self[c].values) - set('')
result = result.drop(drop, dim=p).swap_dims({p: c})
# Add the old coord to the set of coords to drop
drop_coords.add(p)
# Do this last, in case two dimensions have the same parent (p)
return result.drop(drop_coords)
def info(self, name):
"""Informal string representation of the Symbol with *name*."""
if isinstance(self._state[name], dict):
attrs = self._state[name]['attrs']
return '{} {}({}), {} records: {}'.format(
attrs['type_str'], name, ','.join(attrs['domain']),
attrs['records'], attrs['description'])
else:
return repr(self[name])
def _loaded_and_cached(self, type_code):
"""Return a list of loaded and not-loaded Symbols of *type_code*."""
names = set()
for name, state in self._state.items():
if state is True:
tc = self._variables[name].attrs['_gdx_type_code']
elif isinstance(state, dict):
tc = state['attrs']['type_code']
else: # pragma: no cover
continue
if tc == type_code:
names.add(name)
return names
def set(self, name, as_dict=False):
"""Return the elements of GAMS Set *name*.
Because :py:mod:`xarray` stores non-null labels for each element of a
coord, a GAMS sub-Set will contain some ``''`` elements, corresponding
to elements of the parent Set which do not appear in *name*.
:func:`set()` returns the elements without these placeholders.
"""
assert self[name].attrs['_gdx_type_code'] == gdxcc.GMS_DT_SET, \
'Variable {} is not a GAMS Set'.format(name)
if len(self[name].dims) > 1:
return self[name]
elif as_dict:
from collections import OrderedDict
result = OrderedDict()
parent = self[name].attrs['_gdx_domain'][0]
for label in self[parent].values:
result[label] = label in self[name].values
return result
else:
return list(self[name].values)
def sets(self):
"""Return a list of all GDX Sets."""
return self._loaded_and_cached(gdxcc.GMS_DT_SET)
def parameters(self):
"""Return a list of all GDX Parameters."""
return self._loaded_and_cached(gdxcc.GMS_DT_PAR)
def get_symbol_by_index(self, index):
"""Retrieve the GAMS Symbol from the *index*-th position of the
:class:`File`."""
return self[self._index[index]]
def __getitem__(self, key):
"""Set element access."""
try:
return super(File, self).__getitem__(key)
except KeyError as e:
if isinstance(self._state[key], dict):
debug('Lazy-loading {}'.format(key))
self._load_symbol_data(key)
return super(File, self).__getitem__(key)
else:
raise raise_from(KeyError(key), e)
| mit |
huangyh09/hilearn | hilearn/plot/seaborn_plot.py | 1 | 2264 | # some wrapped functions from seaborn
import numpy as np
import scipy.stats as st
import matplotlib.pyplot as plt
def regplot(x, y, hue=None, hue_values=None, show_corr=True, legend_on=True,
**kwargs):
"""Extended plotting of `seaborn.regplot` with showing correlation
coeffecient and supporting multiple regression lines by hue (and hue_values)
Parameters
----------
x: `array_like`, (1, )
Values on x-axis
y: `array_like`, (1, )
Values on y-axis
hue: `array_like`, (1, )
Values to stratify samples into different groups
hue_values: list or `array_like`
A list of unique hue values; orders are retained in plotting layers
show_corr: bool
Whether show Pearson's correlation coefficient in legend
legend_on: bool
Whether display legend
**kwargs:
for `seaborn.regplot`
https://seaborn.pydata.org/generated/seaborn.regplot.html
Returns
-------
ax: matplotlib Axes
The Axes object containing the plot.
same as seaborn.regplot
Examples
--------
.. plot::
>>> import numpy as np
>>> from hilearn.plot import regplot
>>> np.random.seed(1)
>>> x1 = np.random.rand(50)
>>> x2 = np.random.rand(50)
>>> y1 = 2 * x1 + (0.5 + 2 * x1) * np.random.rand(50)
>>> y2 = 4 * x2 + ((2 + x2) ** 2) * np.random.rand(50)
>>> x, y = np.append(x1, x2), np.append(y1, y2)
>>> hue = np.array(['group1'] * 50 + ['group2'] * 50)
>>> regplot(x, y, hue)
"""
import seaborn
if hue is None:
if show_corr:
_label = "R=%.2f" %(st.pearsonr(x, y)[0])
else:
_label = None
ax = seaborn.regplot(x, y, label=_label, **kwargs)
else:
if hue_values is None:
hue_values = np.unique(hue)
for hue_val in hue_values:
_idx = hue == hue_val
if show_corr:
_label = str(hue_val) + ": R=%.2f" %(
st.pearsonr(x[_idx], y[_idx])[0])
else:
_label = None
ax = seaborn.regplot(x[_idx], y[_idx], label=_label, **kwargs)
if legend_on:
plt.legend()
return ax | apache-2.0 |
JPFrancoia/scikit-learn | examples/ensemble/plot_bias_variance.py | 357 | 7324 | """
============================================================
Single estimator versus bagging: bias-variance decomposition
============================================================
This example illustrates and compares the bias-variance decomposition of the
expected mean squared error of a single estimator against a bagging ensemble.
In regression, the expected mean squared error of an estimator can be
decomposed in terms of bias, variance and noise. On average over datasets of
the regression problem, the bias term measures the average amount by which the
predictions of the estimator differ from the predictions of the best possible
estimator for the problem (i.e., the Bayes model). The variance term measures
the variability of the predictions of the estimator when fit over different
instances LS of the problem. Finally, the noise measures the irreducible part
of the error which is due the variability in the data.
The upper left figure illustrates the predictions (in dark red) of a single
decision tree trained over a random dataset LS (the blue dots) of a toy 1d
regression problem. It also illustrates the predictions (in light red) of other
single decision trees trained over other (and different) randomly drawn
instances LS of the problem. Intuitively, the variance term here corresponds to
the width of the beam of predictions (in light red) of the individual
estimators. The larger the variance, the more sensitive are the predictions for
`x` to small changes in the training set. The bias term corresponds to the
difference between the average prediction of the estimator (in cyan) and the
best possible model (in dark blue). On this problem, we can thus observe that
the bias is quite low (both the cyan and the blue curves are close to each
other) while the variance is large (the red beam is rather wide).
The lower left figure plots the pointwise decomposition of the expected mean
squared error of a single decision tree. It confirms that the bias term (in
blue) is low while the variance is large (in green). It also illustrates the
noise part of the error which, as expected, appears to be constant and around
`0.01`.
The right figures correspond to the same plots but using instead a bagging
ensemble of decision trees. In both figures, we can observe that the bias term
is larger than in the previous case. In the upper right figure, the difference
between the average prediction (in cyan) and the best possible model is larger
(e.g., notice the offset around `x=2`). In the lower right figure, the bias
curve is also slightly higher than in the lower left figure. In terms of
variance however, the beam of predictions is narrower, which suggests that the
variance is lower. Indeed, as the lower right figure confirms, the variance
term (in green) is lower than for single decision trees. Overall, the bias-
variance decomposition is therefore no longer the same. The tradeoff is better
for bagging: averaging several decision trees fit on bootstrap copies of the
dataset slightly increases the bias term but allows for a larger reduction of
the variance, which results in a lower overall mean squared error (compare the
red curves int the lower figures). The script output also confirms this
intuition. The total error of the bagging ensemble is lower than the total
error of a single decision tree, and this difference indeed mainly stems from a
reduced variance.
For further details on bias-variance decomposition, see section 7.3 of [1]_.
References
----------
.. [1] T. Hastie, R. Tibshirani and J. Friedman,
"Elements of Statistical Learning", Springer, 2009.
"""
print(__doc__)
# Author: Gilles Louppe <g.louppe@gmail.com>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import BaggingRegressor
from sklearn.tree import DecisionTreeRegressor
# Settings
n_repeat = 50 # Number of iterations for computing expectations
n_train = 50 # Size of the training set
n_test = 1000 # Size of the test set
noise = 0.1 # Standard deviation of the noise
np.random.seed(0)
# Change this for exploring the bias-variance decomposition of other
# estimators. This should work well for estimators with high variance (e.g.,
# decision trees or KNN), but poorly for estimators with low variance (e.g.,
# linear models).
estimators = [("Tree", DecisionTreeRegressor()),
("Bagging(Tree)", BaggingRegressor(DecisionTreeRegressor()))]
n_estimators = len(estimators)
# Generate data
def f(x):
x = x.ravel()
return np.exp(-x ** 2) + 1.5 * np.exp(-(x - 2) ** 2)
def generate(n_samples, noise, n_repeat=1):
X = np.random.rand(n_samples) * 10 - 5
X = np.sort(X)
if n_repeat == 1:
y = f(X) + np.random.normal(0.0, noise, n_samples)
else:
y = np.zeros((n_samples, n_repeat))
for i in range(n_repeat):
y[:, i] = f(X) + np.random.normal(0.0, noise, n_samples)
X = X.reshape((n_samples, 1))
return X, y
X_train = []
y_train = []
for i in range(n_repeat):
X, y = generate(n_samples=n_train, noise=noise)
X_train.append(X)
y_train.append(y)
X_test, y_test = generate(n_samples=n_test, noise=noise, n_repeat=n_repeat)
# Loop over estimators to compare
for n, (name, estimator) in enumerate(estimators):
# Compute predictions
y_predict = np.zeros((n_test, n_repeat))
for i in range(n_repeat):
estimator.fit(X_train[i], y_train[i])
y_predict[:, i] = estimator.predict(X_test)
# Bias^2 + Variance + Noise decomposition of the mean squared error
y_error = np.zeros(n_test)
for i in range(n_repeat):
for j in range(n_repeat):
y_error += (y_test[:, j] - y_predict[:, i]) ** 2
y_error /= (n_repeat * n_repeat)
y_noise = np.var(y_test, axis=1)
y_bias = (f(X_test) - np.mean(y_predict, axis=1)) ** 2
y_var = np.var(y_predict, axis=1)
print("{0}: {1:.4f} (error) = {2:.4f} (bias^2) "
" + {3:.4f} (var) + {4:.4f} (noise)".format(name,
np.mean(y_error),
np.mean(y_bias),
np.mean(y_var),
np.mean(y_noise)))
# Plot figures
plt.subplot(2, n_estimators, n + 1)
plt.plot(X_test, f(X_test), "b", label="$f(x)$")
plt.plot(X_train[0], y_train[0], ".b", label="LS ~ $y = f(x)+noise$")
for i in range(n_repeat):
if i == 0:
plt.plot(X_test, y_predict[:, i], "r", label="$\^y(x)$")
else:
plt.plot(X_test, y_predict[:, i], "r", alpha=0.05)
plt.plot(X_test, np.mean(y_predict, axis=1), "c",
label="$\mathbb{E}_{LS} \^y(x)$")
plt.xlim([-5, 5])
plt.title(name)
if n == 0:
plt.legend(loc="upper left", prop={"size": 11})
plt.subplot(2, n_estimators, n_estimators + n + 1)
plt.plot(X_test, y_error, "r", label="$error(x)$")
plt.plot(X_test, y_bias, "b", label="$bias^2(x)$"),
plt.plot(X_test, y_var, "g", label="$variance(x)$"),
plt.plot(X_test, y_noise, "c", label="$noise(x)$")
plt.xlim([-5, 5])
plt.ylim([0, 0.1])
if n == 0:
plt.legend(loc="upper left", prop={"size": 11})
plt.show()
| bsd-3-clause |
lucashtnguyen/wqio | wqio/core/hydro.py | 1 | 29082 | import warnings
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import matplotlib.gridspec as gridspec
import matplotlib.patches as mpatches
import matplotlib.lines as mlines
import seaborn.apionly as seaborn
import pandas
from wqio import utils
SEC_PER_MINUTE = 60.
MIN_PER_HOUR = 60.
HOUR_PER_DAY = 24.
SEC_PER_HOUR = SEC_PER_MINUTE * MIN_PER_HOUR
SEC_PER_DAY = SEC_PER_HOUR * HOUR_PER_DAY
class Storm(object):
def __init__(self, dataframe, stormnumber, precipcol='precip',
inflowcol='inflow', outflowcol='outflow',
tempcol='temp', stormcol='storm', freqMinutes=5,
volume_conversion=1):
self.inflowcol = inflowcol
self.outflowcol = outflowcol
self.precipcol = precipcol
self.tempcol = tempcol
self.stormnumber = stormnumber
self.freqMinutes = freqMinutes
self.volume_conversion = volume_conversion * SEC_PER_MINUTE * self.freqMinutes
# basic data
self.data = dataframe[dataframe[stormcol] == self.stormnumber].copy()
self.hydrofreq_label = '{0} min'.format(self.freqMinutes)
# tease out start/stop info
self.start = self.data.index[0]
self.end = self.data.index[-1]
self._season = utils.getSeason(self.start)
# storm duration (hours)
duration = self.end - self.start
self.duration_hours = duration.total_seconds() / SEC_PER_HOUR
# antecedent dry period (hours)
if self.stormnumber > 1:
prev_storm_mask = dataframe[stormcol] == self.stormnumber - 1
previous_end = dataframe[prev_storm_mask].index[-1]
antecedent_timedelta = self.start - previous_end
self.antecedent_period_days = antecedent_timedelta.total_seconds() / SEC_PER_DAY
else:
self.antecedent_period_days = np.nan
# quantities
self._precip = None
self._inflow = None
self._outflow = None
# starts and stop
self._precip_start = None
self._precip_end = None
self._inflow_start = None
self._inflow_end = None
self._outflow_start = None
self._outflow_end = None
# peaks
self._peak_precip_intensity = None
self._peak_inflow = None
self._peak_outflow = None
# times of peaks
self._peak_precip_intensity_time = None
self._peak_inflow_time = None
self._peak_outflow_time = None
self._peak_lag_hours = None
# centroids
self._centroid_precip_time = None
self._centroid_inflow_time = None
self._centroid_outflow_time = None
self._centroid_lag_hours = None
# totals
self._total_precip_depth = None
self._total_inflow_volume = None
self._total_outflow_volume = None
self.meta = {
self.outflowcol: {
'name': 'Flow (calculated, L/s)',
'ylabel': 'Effluent flow (L/s)',
'color': 'CornFlowerBlue',
'linewidth': 1.5,
'alpha': 0.5,
'ymin': 0
},
self.inflowcol: {
'name': 'Inflow (estimated, L/s)',
'ylabel': 'Estimated influent flow (L/s)',
'color': 'Maroon',
'linewidth': 1.5,
'alpha': 0.5,
'ymin': 0
},
self.precipcol: {
'name': 'Precip (mm)',
'ylabel': '%s Precip.\nDepth (mm)' % self.hydrofreq_label,
'color': 'DarkGreen',
'linewidth': 1.5,
'alpha': 0.4,
'ymin': 0
},
# self.waterlevelcol: {
# 'name': 'Level (m)',
# 'ylabel': 'Water level in BMP (m)',
# 'color': 'Black',
# 'linewidth': 1.5,
# 'alpha': 0.5,
# 'ymin': 0
# },
self.tempcol: {
'name': 'Air Temp (deg C)',
'ylabel': 'Air Temperature (deg. C)',
'color': 'DarkGoldenRod',
'linewidth': 1.5,
'alpha': 0.5,
'ymin': None
}
}
# summaries
self._summary_dict = None
# quantities
@property
def precip(self):
if self._precip is None:
if self.precipcol is not None:
self._precip = self.data[self.data[self.precipcol] > 0][self.precipcol]
else:
self._precip = np.array([])
return self._precip
@property
def inflow(self):
if self._inflow is None:
if self.inflowcol is not None:
self._inflow = self.data[self.data[self.inflowcol] > 0][self.inflowcol]
else:
self._inflow = np.array([])
return self._inflow
@property
def outflow(self):
if self._outflow is None:
if self.outflowcol is not None:
self._outflow = self.data[self.data[self.outflowcol] > 0][self.outflowcol]
else:
self._outflow = np.array([])
return self._outflow
@property
def has_precip(self):
return self.precip.shape[0] > 0
@property
def has_inflow(self):
return self.inflow.shape[0] > 0
@property
def has_outflow(self):
return self.outflow.shape[0] > 0
@property
def season(self):
return self._season
@season.setter
def season(self, value):
self._season = value
# starts and stops
@property
def precip_start(self):
if self._precip_start is None and self.has_precip:
self._precip_start = self._get_event_time(self.precipcol, 'start')
return self._precip_start
@property
def precip_end(self):
if self._precip_end is None and self.has_precip:
self._precip_end = self._get_event_time(self.precipcol, 'end')
return self._precip_end
@property
def inflow_start(self):
if self._inflow_start is None and self.has_inflow:
self._inflow_start = self._get_event_time(self.inflowcol, 'start')
return self._inflow_start
@property
def inflow_end(self):
if self._inflow_end is None and self.has_inflow:
self._inflow_end = self._get_event_time(self.inflowcol, 'end')
return self._inflow_end
@property
def outflow_start(self):
if self._outflow_start is None and self.has_outflow:
self._outflow_start = self._get_event_time(self.outflowcol, 'start')
return self._outflow_start
@property
def outflow_end(self):
if self._outflow_end is None and self.has_outflow:
self._outflow_end = self._get_event_time(self.outflowcol, 'end')
return self._outflow_end
# peaks
@property
def _peak_depth(self):
if self.has_precip:
return self.precip.max()
@property
def peak_precip_intensity(self):
if self._peak_precip_intensity is None and self.has_precip:
self._peak_precip_intensity = self._peak_depth * MIN_PER_HOUR / self.freqMinutes
return self._peak_precip_intensity
@property
def peak_inflow(self):
if self._peak_inflow is None and self.has_inflow:
self._peak_inflow = self.inflow.max()
return self._peak_inflow
@property
def peak_outflow(self):
if self._peak_outflow is None and self.has_outflow:
self._peak_outflow = self.outflow.max()
return self._peak_outflow
# totals
@property
def total_precip_depth(self):
if self._total_precip_depth is None and self.has_precip:
self._total_precip_depth = self.data[self.precipcol].sum()
return self._total_precip_depth
@property
def total_inflow_volume(self):
if self._total_inflow_volume is None and self.has_inflow:
self._total_inflow_volume = self.data[self.inflowcol].sum() * self.volume_conversion
return self._total_inflow_volume
@property
def total_outflow_volume(self):
if self._total_outflow_volume is None and self.has_outflow:
self._total_outflow_volume = self.data[self.outflowcol].sum() * self.volume_conversion
return self._total_outflow_volume
# centroids
@property
def centroid_precip_time(self):
if self._centroid_precip_time is None and self.has_precip:
self._centroid_precip_time = self._compute_centroid(self.precipcol)
return self._centroid_precip_time
@property
def centroid_inflow_time(self):
if self._centroid_inflow_time is None and self.has_inflow:
self._centroid_inflow_time = self._compute_centroid(self.inflowcol)
return self._centroid_inflow_time
@property
def centroid_outflow_time(self):
if self._centroid_outflow_time is None and self.has_outflow:
self._centroid_outflow_time = self._compute_centroid(self.outflowcol)
return self._centroid_outflow_time
@property
def centroid_lag_hours(self):
if (self._centroid_lag_hours is None and
self.centroid_outflow_time is not None and
self.centroid_inflow_time is not None):
self._centroid_lag_hours = (
self.centroid_outflow_time - self.centroid_inflow_time
).total_seconds() / SEC_PER_HOUR
return self._centroid_lag_hours
#times
@property
def peak_precip_intensity_time(self):
if self._peak_precip_intensity_time is None and self.has_precip:
PI_selector = self.data[self.precipcol] == self._peak_depth
self._peak_precip_intensity_time = self.data[PI_selector].index[0]
return self._peak_precip_intensity_time
@property
def peak_inflow_time(self):
if self._peak_inflow_time is None and self.has_inflow:
PInf_selector = self.data[self.inflowcol] == self.peak_inflow
self._peak_inflow_time = self.data[PInf_selector].index[0]
return self._peak_inflow_time
@property
def peak_outflow_time(self):
if self._peak_outflow_time is None and self.has_outflow:
PEff_selector = self.data[self.outflowcol] == self.peak_outflow
if PEff_selector.sum() > 0:
self._peak_outflow_time = self.data[PEff_selector].index[0]
return self._peak_outflow_time
@property
def peak_lag_hours(self):
if (self._peak_lag_hours is None and
self.peak_outflow_time is not None and
self.peak_inflow_time is not None):
time_delta = self.peak_outflow_time - self.peak_inflow_time
self._peak_lag_hours = time_delta.total_seconds() / SEC_PER_HOUR
return self._peak_lag_hours
@property
def summary_dict(self):
if self._summary_dict is None:
self._summary_dict = {
'Storm Number': self.stormnumber,
'Antecedent Days': self.antecedent_period_days,
'Start Date': self.start,
'End Date': self.end,
'Duration Hours': self.duration_hours,
'Peak Precip Intensity': self.peak_precip_intensity,
'Total Precip Depth': self.total_precip_depth,
'Total Inflow Volume': self.total_inflow_volume,
'Peak Inflow': self.peak_inflow,
'Total Outflow Volume': self.total_outflow_volume,
'Peak Outflow': self.peak_outflow,
'Peak Lag Hours': self.peak_lag_hours,
'Centroid Lag Hours': self.centroid_lag_hours,
'Season': self.season
}
return self._summary_dict
def is_small(self, minprecip=0.0, mininflow=0.0, minoutflow=0.0):
storm_is_small = (
(self.total_precip_depth is not None and self.total_precip_depth < minprecip) or
(self.total_inflow_volume is not None and self.total_inflow_volume < mininflow) or
(self.total_outflow_volume is not None and self.total_outflow_volume < minoutflow)
)
return storm_is_small
def _get_event_time(self, column, bound):
index_map = {'start': 0, 'end': -1}
quantity = self.data[self.data[column] > 0]
if quantity.shape[0] == 0:
warnings.warn("Storm has no {}".format(column), UserWarning)
else:
return quantity.index[index_map[bound]]
def _get_max_quantity(self, column):
return self.data[column].max()
def _compute_centroid(self, column):
# ordinal time index of storm
time_idx = [
mdates.date2num(idx.to_datetime()) for idx in self.data.index.tolist()
]
centroid = np.sum(self.data[column] * time_idx) / np.sum(self.data[column])
if np.isnan(centroid):
return None
else:
return pandas.Timestamp(mdates.num2date(centroid)).tz_convert(None)
def _plot_centroids(self, ax, yfactor=0.5):
artists = []
labels = []
y_val = yfactor*ax.get_ylim()[1]
if self.centroid_precip is not None:
ax.plot([self.centroid_precip], [y_val], color='DarkGreen', marker='o',
linestyle='none', zorder=20, markersize=6)
artists.append(mlines.Line2D([0], [0], marker='.', markersize=6,
linestyle='none', color='DarkGreen'))
labels.append('Precip. centroid')
if self.centroid_flow is not None:
ax.plot([self.centroid_flow], [y_val], color='CornflowerBlue',
marker='s', linestyle='none', zorder=20, markersize=6)
artists.append(mlines.Line2D([0], [0], marker='s',
markersize=6, linestyle='none', color='CornflowerBlue'))
labels.append('Effluent centroid')
if self.centroid_precip is not None and self.centroid_flow is not None:
ax.annotate('', (self.centroid_flow, y_val),
arrowprops=dict(arrowstyle="-|>"),
xytext=(self.centroid_precip, y_val))
return artists, labels
def plot_hydroquantity(self, quantity, ax=None, label=None, otherlabels=None, artists=None):
'''Plots a hydrologic quantity to a matplotlib axes.
Parameters
----------
quantity : string
Column name of the quantity you want to plot.
ax : matplotlib axes object or None, optional
The axes on which the data will be plotted. If None, a new
one will be created.
Returns
-------
proxy : matplotlib artist
A proxy artist for the plotted quantity
'''
# setup the figure
if ax is None:
fig, ax = plt.subplots()
else:
fig = ax.figure
if label is None:
label = quantity
# select the plot props based on the column
try:
meta = self.meta[quantity]
except KeyError:
raise KeyError('%s not available'.format(quantity))
# plot the data
self.data[quantity].fillna(0).plot(ax=ax, kind='area', color=meta['color'],
alpha=meta['alpha'], zorder=5)
if artists is not None:
proxy = mpatches.Rectangle(
(0, 0), 1, 1, facecolor=meta['color'], linewidth=0, alpha=meta['alpha']
)
artists.append(proxy)
if otherlabels is not None:
otherlabels.append(label)
return fig, otherlabels, artists
def summaryPlot(self, axratio=2, filename=None, showLegend=True,
precip=True, inflow=True, outflow=True, figopts={},
serieslabels={}):
'''
Creates a figure showing the hydrlogic record (flow and
precipitation) of the storm
Input:
axratio : optional float or int (default = 2)
Relative height of the flow axis compared to the
precipiation axis.
filename : optional string (default = None)
Filename to which the figure will be saved.
**figwargs will be passed on to `plt.Figure`
Writes:
Figure of flow and precipitation for a storm
Returns:
None
'''
fig = plt.figure(**figopts)
gs = gridspec.GridSpec(nrows=2, ncols=1, height_ratios=[1, axratio],
hspace=0.12)
rainax = fig.add_subplot(gs[0])
rainax.yaxis.set_major_locator(plt.MaxNLocator(5))
flowax = fig.add_subplot(gs[1], sharex=rainax)
# create the legend proxy artists
artists = []
labels = []
legcols = 0
# in the label assignment: `serieslabels.pop(item, item)` might
# seem odd. What it does is looks for a label (value) in the
# dictionary with the key equal to `item`. If there is no valur
# for that key in the dictionary the `item` itself is returned.
# so if there's nothing called "test" in mydict,
# `mydict.pop("test", "test")` returns `"test"`.
if inflow:
fig, labels, artists = self.plot_hydroquantity(
self.inflowcol,
ax=flowax,
label=serieslabels.pop(self.inflowcol, self.inflowcol),
otherlabels=labels,
artists=artists,
)
if outflow:
fig, labels, arti = self.plot_hydroquantity(
self.outflowcol,
ax=flowax,
label=serieslabels.pop(self.outflowcol, self.outflowcol),
otherlabels=labels,
artists=artists
)
if precip:
fig, labels, arti = self.plot_hydroquantity(
self.precipcol,
ax=rainax,
label=serieslabels.pop(self.precipcol, self.precipcol),
otherlabels=labels,
artists=artists
)
rainax.invert_yaxis()
if showLegend:
leg = rainax.legend(artists, labels, fontsize=7, ncol=1,
markerscale=0.75, frameon=False,
loc='lower right')
leg.get_frame().set_zorder(25)
_leg = [leg]
else:
_leg = None
seaborn.despine(ax=rainax, bottom=True, top=False)
seaborn.despine(ax=flowax)
flowax.set_xlabel('')
rainax.set_xlabel('')
# grid lines and axis background color and layout
#fig.tight_layout()
if filename is not None:
fig.savefig(filename, dpi=300, transparent=True,
bbox_inches='tight', bbox_extra_artists=_leg)
return fig, artists, labels
class HydroRecord(object):
'''
Parameters
----------
hydrodata : pandas.DataFrame
DataFrame of hydrologic data of the storm. Should contain
a unique index of type pandas.DatetimeIndex.
precipcol : string, optional (default = None)
Name of column in `hydrodata` containing precipiation data.
inflowcol : string, optional (default = None)
Name of column in `hydrodata` containing influent flow data.
outflowcol : string, optional (default = None)
Name of column in `hydrodata` containing effluent flow data.
intereventPeriods : int, optional (default = 36)
The number of dry records (no flow or rain) required to end
a storm.
standardizeColNames : bool, optional (default = True)
Toggles renaming columns to standard names in the returned
DataFrame.
outputfreqMinutes : int, optional (default = 10)
The default frequency (minutes) to which all data will be
resampled. Precipitation data will be summed up across '
multiple timesteps during resampling, while flow will be
averaged.
debug : bool (default = False)
If True, diagnostic columns will not be dropped prior to
returning the dataframe of parsed_storms.
'''
def __init__(self, hydrodata, precipcol=None, inflowcol=None,
outflowcol=None, tempcol=None, stormcol='storm',
minprecip=0.0, mininflow=0.0, minoutflow=0.0,
outputfreqMinutes=10, intereventHours=6,
volume_conversion=1, stormclass=None, lowmem=False):
# validate input
if precipcol is None and inflowcol is None and outflowcol is None:
msg = '`hydrodata` must have at least a precip or in/outflow column'
raise ValueError(msg)
if stormclass is None:
self.stormclass = Storm
else:
self.stormclass = stormclass
# static input
self._raw_data = hydrodata
self.precipcol = precipcol
self.inflowcol = inflowcol
self.outflowcol = outflowcol
self.stormcol = stormcol
self.tempcol = tempcol
self.outputfreq = pandas.offsets.Minute(outputfreqMinutes)
self.intereventHours = intereventHours
self.intereventPeriods = MIN_PER_HOUR / self.outputfreq.n * self.intereventHours
self.minprecip = minprecip
self.mininflow = mininflow
self.minoutflow = minoutflow
self.volume_conversion = volume_conversion
self.lowmem = lowmem
# properties
self._data = None
self._all_storms = None
self._storms = None
self._storm_stats = None
@property
def data(self):
if self._data is None:
self._data = self._define_storms()
if self.lowmem:
self._data = self._data[self._data[self.stormcol] != 0]
return self._data
@property
def all_storms(self):
if self._all_storms is None:
self._all_storms = {}
for storm_number in self.data[self.stormcol].unique():
if storm_number > 0:
this_storm = self.stormclass(
self.data, storm_number, precipcol=self.precipcol,
inflowcol=self.inflowcol, outflowcol=self.outflowcol,
tempcol=self.tempcol, stormcol=self.stormcol,
volume_conversion=self.volume_conversion,
freqMinutes=self.outputfreq.n,
)
self._all_storms[storm_number] = this_storm
return self._all_storms
@property
def storms(self):
if self._storms is None:
self._storms = {}
for snum, storm in self.all_storms.items():
is_small = storm.is_small(
minprecip=self.minprecip,
mininflow=self.mininflow,
minoutflow=self.minoutflow
)
if not is_small:
self._storms[snum] = storm
return self._storms
@property
def storm_stats(self):
col_order = [
'Storm Number', 'Antecedent Days', 'Season', 'Start Date', 'End Date',
'Duration Hours', 'Peak Precip Intensity', 'Total Precip Depth',
'Total Inflow Volume', 'Peak Inflow', 'Total Outflow Volume',
'Peak Outflow', 'Peak Lag Hours', 'Centroid Lag Hours'
]
if self._storm_stats is None:
storm_stats = pandas.DataFrame([
self.storms[sn].summary_dict for sn in self.storms
])
self._storm_stats = storm_stats[col_order]
return self._storm_stats.sort(columns=['Storm Number']).reset_index(drop=True)
def _define_storms(self, debug=False):
'''
Loops through the hydrologic records and parses the data into
storms. In this context, a storm is defined as starting whenever
the hydrologic records shows non-zero precipitation or
[in|out]flow from the BMP after a minimum inter-event dry period
duration specified in the the function call.
Parameters
----------
debug : bool (default = False)
If True, diagnostic columns will not be dropped prior to
returning the dataframe of parsed_storms.
Writes
------
None
Returns
-------
parsed_storms : pandas.DataFrame
Copy of the origin `hydrodata` DataFrame, but resmapled to
a fixed frequency, columns possibly renamed, and a `storm`
column added to denote the storm to which each record
belongs. Records where `storm` == 0 are not a part of any
storm.
'''
data = self._raw_data.copy()
# pull out the rain and flow data
if self.precipcol is None:
precipcol = 'precip'
data.loc[:, precipcol] = np.nan
else:
precipcol = self.precipcol
if self.inflowcol is None:
inflowcol = 'inflow'
data.loc[:, inflowcol] = np.nan
else:
inflowcol = self.inflowcol
if self.outflowcol is None:
outflowcol = 'outflow'
data.loc[:, outflowcol] = np.nan
else:
outflowcol = self.outflowcol
# bool column where True means there's rain or flow of some kind
water_columns = [precipcol, inflowcol, outflowcol]
data.loc[:, 'wet'] = np.any(data[water_columns] > 0, axis=1)
# copy the bool column into its own df and add a bunch
# shifted columns so each row looks backwards and forwards
data.loc[:, 'windiff'] = pandas.rolling_apply(
data['wet'],
self.intereventPeriods,
lambda x: x.any(),
min_periods=1
).diff()
firstrow = data.iloc[0]
if firstrow['wet']:
data.loc[firstrow.name, 'windiff'] = 1
data.loc[:, 'event_start'] = False
data.loc[:, 'event_end'] = False
starts = data['windiff'] == 1
data.loc[starts, 'event_start'] = True
stops = data['windiff'].shift(-1 * self.intereventPeriods) == -1
data.loc[stops, 'event_end'] = True
# initialize the new column as zeros
data.loc[:, self.stormcol] = 0
# each time a storm starts, incriment the storm number + 1
data.loc[:, self.stormcol] = data['event_start'].cumsum()
# periods between storms are where the cumulative number
# of storms that have ended are equal to the cumulative
# number of storms that have started.
# Stack Overflow: http://tinyurl.com/lsjkr9x
nostorm = data[self.stormcol] == data['event_end'].shift(2).cumsum()
data.loc[nostorm, self.stormcol] = 0
if not debug:
cols_to_drop = ['wet', 'windiff', 'event_end', 'event_start']
data = data.drop(cols_to_drop, axis=1)
return data
def getStormFromTimestamp(self, timestamp, lookback_hours=0, smallstorms=False):
'''Get the storm associdated with a give (sample) date
Parameters
----------
timestamp : pandas.Timestamp
The date/time for which to search within the hydrologic
record.
lookback_hours : positive int or float, optional (default = 0)
If no storm is actively occuring at the provided timestamp,
we can optionally look backwards in the hydrologic record a
fixed amount of time (specified in hours). Negative values
are ignored.
smallstorms : bool, optional (default = False)
If True, small storms will be included in the search.
Returns
-------
storm_number : int
storm : wqio.Storm
'''
# santize date input
timestamp = utils.santizeTimestamp(timestamp)
# check lookback hours
if lookback_hours < 0:
raise ValueError('`lookback_hours` must be greater than 0')
# initial search for the storm
storm_number = int(self.data.loc[:timestamp, self.stormcol].iloc[-1])
# look backwards if we have too
if (storm_number == 0 or pandas.isnull(storm_number)) and lookback_hours != 0:
lookback_time = timestamp - pandas.offsets.Hour(lookback_hours)
storms = self.data.loc[lookback_time:timestamp, [self.stormcol]]
storms = storms[storms > 0].dropna()
if storms.shape[0] == 0:
# no storm
storm_number = None
else:
# storm w/i the lookback period
storm_number = int(storms.iloc[-1])
# return storm_number and storms
if smallstorms:
return storm_number, self.all_storms.get(storm_number, None)
else:
return storm_number, self.storms.get(storm_number, None)
| bsd-3-clause |
rhambach/TEMareels | qcal/momentum_dispersion.py | 1 | 26008 | """
Analysis of the q/x dispersion from the borders of
a small, round aperture in momentum space / real space.
TODO
- polynomial distortion fails if dispersion has no zero
Copyright (c) 2013, rhambach.
This file is part of the TEMareels package and released
under the MIT-Licence. See LICENCE file for details.
"""
import copy
import numpy as np
import scipy.optimize as opt;
import matplotlib.pylab as plt
import TEMareels.tools.tifffile as tif
import TEMareels.tools.transformations as trafo
from TEMareels.tools.img_filter import gaussfilt1D
from TEMareels.gui.wq_stack import WQBrowser, WQStackBrowser
from TEMareels.qcal import fit_border
from TEMareels import Release
class QDispersion:
"""
Determine the distortions of the energy filter from a series
calibration images. To this end, we record a series of E-q maps
of a small round aperture at different positions in the
filter-entrance plane and fit the borders (iso-q-lines). In
diffraction mode, the illumination of the aperture is very
inhomogeneous and should be corrected by a reference.
The q-dispersion is extracted from the change of the appearent
size of the aperture (distance between left and right border).
The following steps have to be performed:
1. normalization of pixel coordinates (x,y) with respect to
spectrum magnification and shift, we use the (x,y)-positions
of the slit borders and the position of the direct beam
-> normalized coordinates (u,v)
2. fitting of the measured iso-q-lines in order to correct
the trapezoidal distortion in the image
-> slit coordinates (s,t)
3. calculate change of appearent aperture size and linearize q-axis
-> linearized q-coordinates (q,r)
"""
def __init__(self,ap_series,illu_ref=None,reverse_y=False,N=4096,verbosity=1):
"""
ap_series ... names of tif-images of the shifted aperture, shape (Nap,)
illu_ref ... (opt) name of reference to correct non-homogeneous illumination
reverse_y ... (opt) True if y-axis should be inversed
N ... (opt) number of pixels of camera
verbosity ... (opt) 0: silent, 1: minimal, 2: verbose, >10: debug
"""
self.Npx = N;
self.ap_names = ap_series;
self.ref_name = illu_ref;
self.verbosity= verbosity
self.history = [];
# load image files
self.ref_img = tif.imread(illu_ref,verbosity=verbosity) \
if illu_ref is not None else None; # Ny,Nx
self.ap_stack = tif.imread(ap_series,verbosity=verbosity); # Nap,Ny,Nx
# reverse images
if reverse_y:
print "WARNING: in QDispersion: reverse_y==True";
self.ref_img= self.ref_img[::-1];
self.ap_stack=self.ap_stack[:,::-1];
# set image parameters
self.Nap, self.Ny, self.Nx = self.ap_stack.shape;
self.ybin,self.xbin = self.Npx/float(self.Ny), self.Npx/float(self.Nx);
self.crop_img();
# transformations (identity by default)
self.u2x = trafo.I(); # normalised coordinates
self.s2u = trafo.I(); # slit coordinates = distortions of iso-q-lines
self.q2s = trafo.I(); # non-linear dispersion on q-axis
# History + DEBUG
self.history = ["momentum_dispersion.py, version %s (%s)" %
(Release.version, Release.version_id)];
self.__dbg_fig=[]; # list of figures
def crop_img(self,xmin=0,xmax=np.inf,ymin=0,ymax=np.inf):
"""
reduce image size for fitting and resampling
xmin,xmax,ymin,ymax are given in image pixels between 0 and N
"""
xmax=min(xmax,self.Npx);
ymax=min(ymax,self.Npx);
self.crop={'xmin':xmin,'xmax':xmax,'ymin':ymin,'ymax':ymax};
self.history.append("Crop");
param = ", ".join([key+": %d"%val for key,val in self.crop.items()])
self.history.append("|- "+param);
assert xmax-xmin > self.xbin
assert ymax-ymin > self.ybin
def fit_aperture_borders(self,order=2,log=False,**kwargs):
"""
Determine the left and right border of the aperture as polynom x(y)
for the series of E-q maps.
order ... (opt) order of the polynomial to fit
log ... (opt) consider aperture images on log-scale
for further options, see fit_border.get_border_points()
RETURNS list of tuples(left,right) containing polynomials x = left(y)
"""
self.ap_order =order;
self.ap_log =log;
# illumination reference (smoothed version)
if self.ref_name is not None:
ref = np.abs(gaussfilt1D(self.ref_img,11)); # gauss-filter
#ref = np.abs(lorentzfit1D(self.ref_img,offset=offset)); # fit lorentz
ref[ref<np.mean(ref)] = np.mean(ref); # constant for low values
# to avoid 0 devision
if self.verbosity>9:
self.__dbg_fig.append(self.plot_reference(ref)); # draw figure and save in list
else:
ref = 1; # no reference given
# iterate over all aperture images
points = []; fit = []; lines=[]; stack=[]; info=[];
for i,image in enumerate(self.ap_stack):
# correct image by illu_ref
filtimg = image/ref;
if log: filtimg = np.log(np.abs(filtimg)+1);
# get aperture border as point list in px (correct for binning!)
c = self.crop;
l,r = fit_border.get_border_points(filtimg, interp_out=True,
xmin=int(c['xmin']/self.xbin), xmax=int(c['xmax']/self.xbin),
ymin=int(c['ymin']/self.ybin), ymax=int(c['ymax']/self.ybin),
verbosity=self.verbosity-10, **kwargs);
l = ((l+0.5).T*(self.xbin,self.ybin)).T; # convert to px, points (x,y)
r = ((r+0.5).T*(self.xbin,self.ybin)).T;
points.append([l,r]);
# fit polynom x=p(y) to left and right border
polyl = np.poly1d(np.polyfit(l[1],l[0],order));
polyr = np.poly1d(np.polyfit(r[1],r[0],order));
fit.append((polyl,polyr));
# DEBUG: drawing fit points and polynoms
if self.verbosity>2:
y = np.arange(self.crop['ymin'],self.crop['ymax']);
stack.append(filtimg);
info.append({'desc': 'DEBUG: '+self.ap_names[i],
'xperchan':self.xbin, 'yperchan':self.ybin});
p1 = plt.Line2D(l[0],l[1],marker='x',ls='',c='b');
p2 = plt.Line2D(r[0],r[1],marker='x',ls='',c='b')
p3 = plt.Line2D(polyl(y),y,color='r');
p4 = plt.Line2D(polyr(y),y,color='r');
lines.append([p1,p2,p3,p4]);
if self.verbosity>2:
self.__dbg_fig.append( self.plot_aperture_images(stack,info,lines) );
# store results
self.ap_points=points; # contains Nap (l,r) tuples; l,r are a lists of (x,y) points
self.ap_poly =fit; # contains Nap (pl,pr) tuples; pl,pr are polynoms y(x)
# comments
self.history.append("Fit aperture borders");
self.history.append("|- order=%d, log=%s"%(order,log));
params = ", ".join([key+": "+str(val) for key,val in kwargs.items()]);
self.history.append("|- " + params);
return fit;
def plot_reference(self,ref=None):
if self.ref_name is None:
print 'WARNING: in QDispersion.plot_reference(): no reference specified.';
return;
if ref is None: ref=self.ref_img;
info = {'desc': 'DEBUG: illumination reference, '+self.ref_name,
'xperchan':self.xbin, 'yperchan':self.ybin};
return WQBrowser(ref,info,aspect='auto');
def plot_aperture_images(self,stack=None,info=None,lines=None):
if stack is None: stack=self.ap_stack;
WQB = WQStackBrowser(self.ap_stack,info,lines,aspect='auto');
c=self.crop;
WQB.axis.add_patch(plt.Rectangle((c['xmin'],c['ymin']), \
c['xmax']-c['xmin'],c['ymax']-c['ymin'],lw=3,ec='red',fc='0.5',alpha=0.2));
return WQB;
def normalize_coordinates(self,x0,y0,xl,xr,aspect=None):
"""
Transfrom from pixels (x,y) to normalised units (u,v):
(x0,y0)->(0,0), (xl,y0)->(ul,0), (xr,y0)->(ur,0), ur-ul=1. I.e.,
to remove the influence of a spectrum shift on the camera, the new
origin is set to the position of the direct beam; and remove an
influence of the spectrum magnification, the length of the slit in
the E-q map is normalized to 1. By default, the y-axis is not
scaled. Otherwise, the aspect dx/dy can be specified explicitly
(use 1 for common magnification of x and y axis).
x0 ... horizontal position of the direct beam [px]
y0 ... vertical slit position [px]
xl,xr ... horizontal left/right slit position [px]
aspect... (opt) change of apect ratio (du/dv)/(dx/dy)
RETURNS Tranformation object transforming (x,y) to (u,v)
"""
self.u2x = trafo.Normalize(x0,y0,xl,xr,aspect); # (u,v) -> (x,y)
# save slit borders in normalized coordinates
self.u2x.ul,_ = self.u2x.inverse(xl,y0);
self.u2x.ur,_ = self.u2x.inverse(xl,y0);
# history
self.history.extend(self.u2x.info(3).split("\n"));
return trafo.Inv(self.u2x);
def __resample_aperture_border(self,N):
" resample aperture border with n points and normalise "
# sample points for aperture in image coordinates
if N is None: # original points
guides=np.reshape(self.ap_points,(2*self.Nap,2,-1));
x = guides[:,0];
y = guides[:,1];
assert np.allclose( y-y[0], 0); # all y-postitions should be the same
else: # resample points from quadratic fit
s = np.linspace(self.crop['ymin'],self.crop['ymax'],N,endpoint=False); # sampling points
x = np.asarray([ q(s) for b in self.ap_poly for q in b ]); # x-position for iso-q-lines
y = np.tile(s,(x.shape[0],1)); # y-position "
# convert to normalised coordinates
try:
u,v = self.u2x.inverse(x,y);
except NameError:
print " ERROR: run fit_aperture_borders() first.";
raise;
return u,v; # shape (2*Nap,n)
def __residuals(self,u,v,T,u0,leastsq=True):
" residuals for fitting transformations to distorted lines "
K,N = u.shape; assert len(u0)==K; assert v.shape==(K,N)
s,t = np.tile(u0,(N,1)).T, v; # undistorted coordinates, shape (K,N)
uu,vv= T.transform(s,t); # perform distortion trafo
if leastsq: return (u-uu).flatten(); # residuals u-u' for leastsq()
return T, u0, u-uu; # return trafo, u0, residuals
def __debug_distortion(self,T,u,v,u0,res,title=""):
" debugging commands which are common for all fitting of distortions "
K,N = u.shape;
if self.verbosity>2: # PRINT INFO
print T.info(3);
if self.verbosity>9:
max_res = np.max(np.abs(res),axis=1);
print ' line u0-values max. deviation ';
for k in range(len(u)):
print ' %2d %8.3g %8.3g'%(k,u0[k],max_res[k]);
if self.verbosity>2: # PLOT POINTS AND FIT
vv = np.linspace(-1000,18000,100);
s,t = np.tile(u0,(len(vv),1)).T, vv; # iso-q-lines
U,V = T.transform(s,t); # perform trafo
fig = plt.figure(); plt.title(title);
plt.plot(u.reshape(K/2,2*N).T,v.reshape(K/2,2*N).T,'x');
# input points (left and right together)
plt.plot(U.T,V.T,'k-'); # fitted lines
plt.xlabel('u');
plt.ylabel('v');
plt.xlim(-0.61,0.5);
plt.ylim(vv.max(), vv.min());
self.__dbg_fig.append(fig);
def fit_trapezoidal_distortions(self, N=None, vp=None, u=None, v=None):
"""
Least-square fitting of iso-q-lines (u,v) using rays with
common vanishing point (U,V), passing through points (u0,0)
N ... (opt) number of sampling points along y-direction for each image
vp ... (opt) initial guess for the vanishing point (U,V)
RETURN (trapz,u0)
trapz... TrapezodialDistortion object
u0 ... 1d-array; slit coordinate for each iso-q-line
"""
# data + initial parameters
if u is not None and v is not None: # data given explicitely
u,v = np.atleast_2d(u,v); N=u.shape[1]; # K... number of iso-q-lines
else:
u,v = self.__resample_aperture_border(N)# aperture borders, shape (K,N)
if vp is None: vp = (self.Npx/2,self.Npx);# set reasonable start value
trapz = trafo.TrapezoidalDistortion(vp); # initialize trafo
param0= list(vp)+[0]*len(u); # param: [vp, u0]
# deviation of given trapezoidal distortion from observed values u,v
def residuals(param,u,v,T,leastsq=True):
T.vp = param[:2]; u0 = param[2:]; # fit parameters
return self.__residuals(u,v,T,u0,leastsq);
# perform fitting
fit,_ = opt.leastsq(residuals, param0, args=(u,v,trapz));
trapz,u0,res = residuals(fit,u,v,trapz,False);
self.__debug_distortion(trapz,u,v,u0,res,
title="DEBUG: fit_trapezoidal_distortions()");
# save results, slit borders in slit-cooridinates
self.s2u = trapz;
self.s2u.sl=self.u2x.ul;
self.s2u.sr=self.u2x.ur; # same as borders in normalized coords
# history
self.history.extend(self.s2u.info(3).split("\n"));
return trapz,u0;
def fit_polynomial_distortions(self,N=None,I=1,J=1,c0=None,const='fixed_slit'):
"""
Least-square fitting of all iso-q-lines (u,v) by polynomial functions
of order J along the energy axis t=v with the restriction, that the
coefficients of different iso-q-lines can be expressed as polynoms in q
of order I. This corresponds to a transformation T:(s,t)->(u,v)
u(s,t) = sum_ij C_ij s^i t^j; v(s,t) = t.
Note that also the exact position s_k of the k'th iso-q-line at E=0
(v=0) is not known exactly and included in the fit. As the fitting
parameters s_k and C_ij are not independent, we add further constraints
according to the parameter 'const'.
N ... (opt) number of sampling points along y-direction for each image
J ... (opt) degree of fit u = sum_j c_j v^j for a single aperture border
I ... (opt) degree of polynomial c_j = sum_i C_ij s^i for coefficients
c0 ... (opt) initial guess for the coefficients C_ij, overwrites I,J
const.. (opt) constraints for fitting parameters:
'fixed_slit': T(s,0) = (s,0), the trafo will not
change the coordinates at the slit position t=0;
'aperture_calibration': T(0,0) = (0,0) to avoid shifts
and constant aperture size s[k/2+2]-s[k/2]=1
RETURN (poly,u)
poly ... PolynomialDistortion object
s_k ... 1d-array; slit coordinate for each iso-q-line
"""
# data + initial parameters
u,v = self.__resample_aperture_border(N); # aperture borders, shape (K,N)
# fit approximate trapezoidal distortions, used to
# -> calculate initial parameters for polynomial fit
# -> distinguish between multiple solutions in inverse() of PolynomialDistortion
self.verbosity-=10;
trapz,u0_trapz = self.fit_trapezoidal_distortions(self, u=u, v=v);
self.verbosity+=10;
# initial fit parameters
if c0 is not None: c0 = np.asarray(c0,dtype=float);
else:
c0 = np.zeros((I+1,J+1),dtype=float); # i=0,...,I; j=0,...;J
c0[0,:2] = [0, trapz.vp[0]/trapz.vp[1]];
c0[1,:2] = [1, -1/trapz.vp[1] ];
I = c0.shape[0]-1; J = c0.shape[1]-1;
K = u.shape[0]; # K=2Nap (# of left+right aperture borders)
# 1. FIXED-SLIT CONSTRAINTS
# restrictions for fitting C_ij and s_k:
# T(s,0)=(s,0) <=> C_i0 = 1 if i==1 else 0
# remaining fit parameters:
# param[0:(I+1)*J] ... C_ij for j=1,...,J
# param[(I+1)*J:(I+1)*J+K] ... s_k for k=0,...,K-1
if const=='fixed_slit':
c0[:,0]= 0; c0[1,0] = 1; # => T(s,0) = (s,0)
poly = trafo.PolynomialDistortion(c0,T=trapz); # initialize trafo
param0 = np.hstack((c0[:,1:].flatten(),u0_trapz)); # param: [c0, u0]
def residuals(param,u,v,T,leastsq=True):
T.coeff[:,1:] = param[:(I+1)*J].reshape(I+1,J);
s_k = param[(I+1)*J:];
return self.__residuals(u,v,T,s_k,leastsq);
# 2. FIXED APERTURE SIZE
# restrictions for fitting C_ij and s_k:
# T(0,0)=(0,0) <=> C_00 = 0;
# s[k/2+1] - s[k/2] = 1 for all k
# Note: k=0 mod K/2 for left border, k=1 mod K/2 for right border
# remaining fit parameters:
# param[0:Nc] ... C_ij for all i,j except C_00, Nc=(I+1)(J+1)-1
# param[Nc:Nc+K/2] ... s_k for k=0,2,...,K
elif const=='aperture_calibration':
assert K%2==0; # even number of lines required
poly = trafo.PolynomialDistortion(c0,T=trapz);
param0 = np.hstack((c0.flatten()[1:], u0_trapz[::2]));
DS = np.mean(u0_trapz[1::2]-u0_trapz[::2]);
def residuals(param,u,v,T,leastsq=True):
T.coeff = np.insert(param[:(I+1)*(J+1)-1],0,0).reshape((I+1,J+1)); # C_00=0
s_k = np.asarray([[s,s+DS] for s in param[(I+1)*(J+1)-1:]]).flat;
# set s[k+1]-s[k]=DS instead of 1 such that the total slit width remains close
# to 1 like in the case of const='fixed slit' (exact value is not important)
return self.__residuals(u,v,T,s_k,leastsq);
else: raise ValueError("Parameter const='%s' is not allowed."%const);
# perform fitting
fit,_ = opt.leastsq(residuals, param0, args=(u,v,poly));
#fit = param0
poly,s_k,res = residuals(fit,u,v,poly,False);
self.__debug_distortion(poly,u,v,s_k,res,
title="DEBUG: fit_polynomial_distortions(), %s, I=%d, J=%d"%(const,I,J));
# save results and slit borders in slit coordinates
self.s2u = poly;
self.s2u.sl,_=poly.inverse(self.u2x.ul,0);
self.s2u.sr,_=poly.inverse(self.u2x.ur,0);
# history
self.history.extend(self.s2u.info(3).split("\n"));
self.history.append("|- I=%d, J=%d, const=%s"%(I,J,const));
return poly,s_k
def linearize_qaxis(self,N=20,ord=2,dq=1):
"""
Fit transformation
N ... (opt) number of sampling points along y-direction for each image
ord ... (opt) order of fitting polynomial
dq ... (opt) size of the aperture q-coordinates
RETURNS aperture size and position in px, shape (k, n)
"""
# 1. get undistorted coordinates of aperture borders
u,v = self.__resample_aperture_border(N); # aperture borders, shape (K,N)
s,t = self.s2u.inverse(u,v); # correct distortions
# 2. calculate apearent aperture size
s = s.reshape(self.Nap,2,N); # shape (k,2,N)
size = s[:,1] - s[:,0]; # right-left
pos = 0.5*(s[:,1]+s[:,0]); # (right+left)/2
# 3. fit polynomial (common for all v-values)
size_dispersion = np.poly1d(np.polyfit(pos.flatten(),size.flatten(),ord));
if self.verbosity>2: # DEBUG: plot aperture size + quadratic fit
smin,smax,slen = s.min(),s.max(),s.max()-s.min();
x = np.mgrid[smin-0.1*slen:smax+0.1*slen:100j];
fig=plt.figure();
plt.title("DEBUG: Normalized aperture size for different y");
plt.gca().set_color_cycle([plt.cm.winter(1.*i/N) for i in range(N)]); # continous colors
plt.plot(pos,size,'o',alpha=0.5);
plt.plot(x,size_dispersion(x),'k-');
plt.xlabel("slit position s");
plt.ylabel("appearent aperture size ds");
self.__dbg_fig.append(fig);
# 4. create transformation object (q,r) -> (s,t)
self.q2s=trafo.NonlinearDispersion(size_dispersion,scale=dq);
# 5. write history
self.history.extend(self.q2s.info(3).split('\n'));
# TEST: check positive dispersion within the slit
if self.q2s.xrange[0]>=self.s2u.sl or self.q2s.xrange[1]<=self.s2u.sr:
print self.q2s.info(3);
plt.show();
raise ValueError("Unexpected xrange in QDispersion.linearize_qaxis().\n"\
"Check polynomial fit of appearent aperture size using verbosity>2");
if self.verbosity>2:
print self.q2s.info(3);
# TEST: aperture size should be roughly dq in q coordinates
q,r=self.q2s.inverse(s,t.reshape(self.Nap,2,N));
qsize = np.mean(q[:,1]-q[:,0],axis=1); # average over energies
# - deviation of single aperture from dq by >5%
if not np.allclose(qsize,dq,rtol=0.05) and self.verbosity>0:
print "WARNING: in QDispersion.linearize_qaxis(): \n"+ \
" calculated aperture size deviates by more than 5% from scale dq: \n"+ \
" dq: %8.3f, %8.3f < qsize < %8.3f \n " % (dq,qsize.min(),qsize.max());
# - variation of aperture size
if np.std(qsize)/np.mean(qsize)>0.01 and self.verbosity>0: # rel error > 1%
print "WARNING: in QDispersion.linearize_qaxis(): \n"+ \
" calculated aperture size varies by more than 1%: \n"+ \
" mean(dq): %8.3g, std(dq): %8.3g, variation: %5.2f%%\n"\
%(np.mean(qsize),np.std(qsize),100*np.std(qsize)/np.mean(qsize));
return size,pos
def get_q2u(self):
"""
RETURN combined transformation from linearized coordinates
to normalized slit coordinates (q,r)->(s,t)->(u,v)
"""
return trafo.Seq(self.s2u,self.q2s);
def get_absolute_qs(self,line,verbosity=3):
"""
OLD!
determine two points on y-axis with known q-distance
(low-loss w-q reference with central spot and bragg spot)
line ... 1D array with N-points containing two peaks
"""
x = np.arange(N,dtype='float');
ref=gaussfilt1D(line, 5); peaks=[];
for i in range(2): # fit 2 peaks
imax = np.argmax(ref); # initial guess for peak
p, pconv = \
opt.curve_fit(models.gauss,x,ref,p0=(imax, np.sum(ref[imax-5:imax+5]), 10));
peaks.append(p); # gauss fit
imin = max(p[0]-5*p[2],0);
imax = min(p[0]+5*p[2],N);
ref[imin:imax]=0; # remove peak from line (5*fwhm around x0)
if verbosity>2:
plt.figure(); plt.title("DEBUG: Fit q-reference");
plt.plot(x,line,'k');
plt.plot(x,models.gauss(x,*peaks[0]),'r');
plt.plot(x,models.gauss(x,*peaks[1]),'g');
return peaks[0][0], peaks[1][0];
def get_status(self):
return "\n".join(self.history);
def calibrate_qaxis(q2s,sl,sr,G):
"""
Calibration of q-axis with two symmetric Bragg spots -G,G.
q2s ... NonlinearDispersion object mapping linear. q to slit coordinates
sl,sr ... slit coordinates of -G,G Bragg spot
G ... length of G in reciprocal units [1/A]
Note: we test for consistency of sl and sr, the direct beam is at s=0;
RETURNS: rescaled trafo q2s
"""
# calculate linearized coordinates corresponding to u-values
Q2s = copy.deepcopy(q2s);
ql,_= q2s.inverse(sl,0);
qr,_= q2s.inverse(sr,0);
assert ql < 0 and qr > 0;
assert np.allclose( (0,0), q2s.inverse(0,0) ); # direct beam at coordinate u=0=q
# calculate scaling factor and check consistency
q =(qr-ql)/2.;
scale = G/q;
Q2s.scale_u(scale); # change scale in NonlinearDispersion
# check consistency (ql vs qr)
rel_err=np.abs(qr-q)/q;
if rel_err > 0.01 : # relative error of 1%
print "WARNING in calibrate_qaxis(): left and right q-vector deviate:"
print " ql=%.3f, qr=%.3f, rel_err=%.1f%% " %(scale*ql,scale*qr, rel_err*100)
return Q2s;
def fit_aperture_borders(ap_series,illu_ref=None,reverse_y=False,verbosity=1,offset=0,**kwargs):
" wrapper for backward compatibility "
QDisp=QDispersion(ap_series, illu_ref,reverse_y=reverse_y,verbosity=verbosity);
QDisp.crop_img(ymin=offset);
return QDisp.fit_aperture_borders(**kwargs);
# -- main ----------------------------------------
if __name__ == '__main__':
try:
# filenames
aperture_files = ["../tests/qseries%d.tif" % (i) for i in range(1,10) if i<>2];
ref_illumination = "../tests/qreference.tif";
# fitting aperture borders + normalization
QDisp=QDispersion(aperture_files, ref_illumination,verbosity=11);
QDisp.crop_img(xmin=22, ymin=700);
QDisp.fit_aperture_borders(rel_threshold=0.2);
QDisp.normalize_coordinates(2371,1060,900,3850);
# fit non-linear polynomial distortion + linearize q-axis
poly,u0 = QDisp.fit_polynomial_distortions(I=3,J=2,const='fixed_slit');
QDisp.linearize_qaxis(ord=4);
# ADDITIONAL TESTS
print 'HISTORY';
for l in QDisp.get_status().split("\n"): print "| "+l;
QDisp.verbosity=0;
# test normalisation to slit coordinates
T = QDisp.u2x;
assert np.allclose((2371,1060),T.transform(0,0)); # origin at direct beam
assert np.allclose(1,T.inverse(3850,1060)[0]-T.inverse(900,1060)[0]); # slit lenght 1
# test fitting of trapezoidal distortions
uv = np.reshape([ (u,k*u) for k in range(1,6) for u in range(100) ], (5,100,2));
trapz,u0= QDisp.fit_trapezoidal_distortions(vp=(0,1),u=uv[...,0],v=uv[...,1]);
assert( np.allclose( trapz.vp , (0,0), atol=1e-6 ) );
# test coherence between trapezoidal and polynomial distortion
trapz,u0= QDisp.fit_trapezoidal_distortions();
poly,u0p= QDisp.fit_polynomial_distortions(const='fixed_slit');
#print poly.coeff, [[ 0, trapz.vp[0]/trapz.vp[1]], [1, -1/trapz.vp[1]]]
assert np.allclose(poly.coeff, [[ 0, trapz.vp[0]/trapz.vp[1]], [1, -1/trapz.vp[1]]]);
assert np.allclose(u0,u0p);
# test position of origin; q=0 should be at pos. of direct beam
# TODO: polynomial distortion fails if dispersion has no zero
for const in ('aperture_calibration','fixed_slit'):
poly,u0 = QDisp.fit_polynomial_distortions(I=3,J=2,const=const);
QDisp.linearize_qaxis(ord=2);
T=QDisp.get_q2u();
assert np.allclose((0,0),T.transform(0,0));
plt.show();
# uncomment to raise all figures before closing upon exception
except Exception, e:
print e;
#plt.show();
raise
| mit |
KIT-MRT/PLCC | SumImages.py | 1 | 2341 | #!/usr/bin/env python
# encoding: utf-8
#
# This file is part of PLCC.
#
# Copyright 2016 Johannes Graeter <johannes.graeter@kit.edu (Karlsruhe Institute of Technology)
#
# PLCC comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
#
# PLCC is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PLCC is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import sys
import os
import getopt
import cv2
import glob
from matplotlib import pyplot as plt
def main(argv=None):
#option handling
if argv is None:
argv=sys.argv
try:
opts, args = getopt.getopt(sys.argv[1:], "h", ["help"])
except getopt.error, msg:
print msg
print "for help use --help"
sys.exit(2)
# process options
for o, a in opts:
if o in ("-h", "--help"):
print __doc__
sys.exit(0)
# interpret input data
FolderName=argv[1]
print "Image folder="+FolderName
OutputFile=argv[2]
print "OutputFile="+OutputFile
if (not os.path.isdir(FolderName) ):
print "not a directory"
return
AllFileNames=glob.glob(FolderName+"/*.png")
#read first image to add rest up
MaxImg=cv2.imread(AllFileNames[0],0).astype("double")
del(AllFileNames[0])
for ImName in AllFileNames:
img=cv2.imread(ImName,0).astype("double")
MaxImg+=img
#Threshold image
MaxVal=len(AllFileNames)*8.
Max=0.
Min=999999.
for line in MaxImg:
for pixel in line:
if pixel>MaxVal:
pixel=MaxVal
if pixel>Max:
Max=pixel
if pixel<Min:
Min=pixel
MaxImg=255./(Max-Min)*(MaxImg-Min)
cv2.imwrite(OutputFile,MaxImg.astype("uint8"))
plt.imshow(MaxImg)
plt.show()
if __name__ == '__main__':
main()
| gpl-3.0 |
gviejo/ThalamusPhysio | python/main_make_AUTOCOR_during_SPINDLES.py | 1 | 2006 | #!/usr/bin/env python
'''
File name: main_ripp_mod.py
Author: Guillaume Viejo
Date created: 16/08/2017
Python Version: 3.5.2
Sharp-waves ripples modulation
Used to make figure 1
'''
import numpy as np
import pandas as pd
import scipy.io
from functions import *
# from pylab import *
# import ipyparallel
from multiprocessing import Pool
import os, sys
import neuroseries as nts
import time
data_directory = '/mnt/DataGuillaume/MergedData/'
datasets = np.loadtxt(data_directory+'datasets_ThalHpc.list', delimiter = '\n', dtype = str, comments = '#')
datatosave = {}
# clients = ipyparallel.Client()
# dview = clients.direct_view()
dview = Pool(8)
for session in datasets:
generalinfo = scipy.io.loadmat(data_directory+session+'/Analysis/GeneralInfo.mat')
shankStructure = loadShankStructure(generalinfo)
if len(generalinfo['channelStructure'][0][0][1][0]) == 2:
hpc_channel = generalinfo['channelStructure'][0][0][1][0][1][0][0] - 1
else:
hpc_channel = generalinfo['channelStructure'][0][0][1][0][0][0][0] - 1
spikes,shank = loadSpikeData(data_directory+session+'/Analysis/SpikeData.mat', shankStructure['thalamus'])
spind_ep_hpc = np.genfromtxt(data_directory+session+"/"+session.split("/")[1]+".evt.spd.hpc")[:,0]
spind_ep_hpc = spind_ep_hpc.reshape(len(spind_ep_hpc)//2,2)
spind_ep_hpc = nts.IntervalSet(spind_ep_hpc[:,0], spind_ep_hpc[:,1], time_units = 'ms')
spikes_spd = {n:spikes[n].restrict(spind_ep_hpc) for n in spikes.keys() if len(spikes[n].restrict(spind_ep_hpc))}
spikes_list = [spikes_spd[i].as_units('ms').index.values for i in spikes_spd.keys()]
# for tsd in spikes_list:
# cross_correlation((tsd, tsd))
Hcorr = dview.map_async(autocorr, spikes_list).get()
for n,i in zip(spikes_spd.keys(), range(len(spikes_spd.keys()))):
datatosave[session.split("/")[1]+"_"+str(n)] = Hcorr[i]
print(session)
import _pickle as cPickle
cPickle.dump(datatosave, open('/mnt/DataGuillaume/MergedData/AUTOCORR_SPD.pickle', 'wb'))
| gpl-3.0 |
eezee-it/addons-yelizariev | sugarcrm_migration/import_sugarcrm.py | 16 | 44410 | # -*- coding: utf-8 -*-
import logging
_logger = logging.getLogger(__name__)
try:
import MySQLdb
import MySQLdb.cursors
from pandas import merge, DataFrame
except ImportError:
pass
from openerp.addons.import_framework.import_base import import_base, create_childs
from openerp.addons.import_framework.mapper import *
import subprocess
def fix_email(text):
return text.replace('\r', '<br>')
class import_sugarcrm(import_base):
TABLE_USER = 'users'
TABLE_ACCOUNT = 'accounts'
TABLE_ACCOUNT_LEAD = 'accounts_leads'
TABLE_ACCOUNT_TAG = 'accounts_tags_'
TABLE_CONTACT = 'contacts'
TABLE_CONTACT_COMPANY = 'contacts_companies_'
TABLE_CONTACT_TAG = 'contacts_tags_'
TABLE_CASE = 'cases'
TABLE_CASE_TAG = 'cases_tags_'
#TABLE_EMPLOYEE = 'Employees'
#TABLE_OPPORTUNITY = 'Opportunities'
#TABLE_LEAD = 'Leads'
#TABLE_STAGE = 'crm_stage'
#TABLE_ATTENDEE = 'calendar_attendee'
#TABLE_CALL = 'Calls'
#TABLE_MEETING = 'Meetings'
#TABLE_TASK = 'Tasks'
#TABLE_PROJECT = 'Project'
#TABLE_PROJECT_TASK = 'ProjectTask'
#TABLE_BUG = 'Bugs'
TABLE_NOTE = 'Notes'
TABLE_NOTE_INTERNAL = 'notes_internal'
TABLE_EMAIL = 'emails'
#TABLE_COMPAIGN = 'Campaigns'
#TABLE_DOCUMENT = 'Documents'
#TABLE_HISTORY_ATTACHMNET = 'history_attachment'
def initialize(self):
self.db = MySQLdb.connect(host=self.context.get('db_host'),
port=int(self.context.get('db_port')),
user=self.context.get('db_user'),
passwd=self.context.get('db_passwd'),
db=self.context.get('db_name'),
charset='utf8',
cursorclass=MySQLdb.cursors.DictCursor
)
db_dump_fies = self.context.get('db_dump_fies')
if db_dump_fies:
cur = self.db.cursor()
for f in db_dump_fies:
_logger.info('load dump %s' % f)
fd = open(f, 'r')
subprocess.Popen(['mysql',
'-u', self.context.get('db_user'),
'-p{}'.format(self.context.get('db_passwd')),
'-h', self.context.get('db_host'),
'-P', self.context.get('db_port'),
self.context.get('db_name')], stdin=fd).wait()
cur.close()
def finalize(self):
pass
def finalize_note(self):
mail_message_obj = self.pool['mail.message']
ids = self.pool['ir.attachment'].search(self.cr, self.uid, [('res_model_tmp','=','mail.message')])
for a in self.pool['ir.attachment'].read(self.cr, self.uid, ids, ['id', 'res_id_tmp'], context=self.context):
if not a['res_id_tmp']:
continue
mail_message_obj.write(self.cr, self.uid, [a['res_id_tmp']],
{'attachment_ids':[(4, a['id'])]})
def get_data(self, table):
cur = self.db.cursor()
query = "SELECT * FROM %s" % table
#query = query + ' order by rand()' # for debug
cur.execute(query)
res = cur.fetchall()
cur.close()
return list(res)
def get_mapping(self):
res = [
self.get_mapping_user(),
self.get_mapping_account(),
self.get_mapping_contact(),
self.get_mapping_case(),
self.get_mapping_email(),
self.get_mapping_note_internal(),
self.get_mapping_note(),
]
return res
def merge_table_email(self, df, id_on='id'):
#mysql> select bean_module, count(*) from email_addr_bean_rel group by bean_module;
#+-------------+----------+
#| bean_module | count(*) |
#+-------------+----------+
#| Contacts | 1048 |
#| Leads | 31 |
#| Prospects | 20391 |
#| Users | 33 |
#+-------------+----------+
#4 rows in set (0.21 sec)
t1 = merge(df,
DataFrame(self.get_data('email_addr_bean_rel')),
how='left',
left_on=id_on,
suffixes=('', '_email_addr_bean_rel'),
right_on='bean_id')
t2 = merge(t1,
DataFrame(self.get_data('email_addresses')),
how='left',
left_on = 'email_address_id',
suffixes=('', '_email_addresses'),
right_on = 'id')
return t2
def table_user(self):
t1 = self.merge_table_email(DataFrame(self.get_data('users')))
return t1
def get_mapping_user(self):
return {
'name': self.TABLE_USER,
'table': self.table_user,
'models':[{
'model' : 'res.users',
'fields': {
'id': xml_id(self.TABLE_USER, 'id'),
'active': lambda record: not record['deleted'], # status == 'Active'
'name': concat('first_name', 'last_name'),
'login': value('user_name', fallback='last_name'),
'password' : 'user_hash',
'company_id/id': const('base.main_company'),
'alias_name': value('user_name', fallback='last_name', lower=True),
'email': 'email_address',
}
}]
}
def table_account(self):
t1 = merge(DataFrame(self.get_data('accounts')),
DataFrame(self.get_data('accounts_cstm')),
left_on='id',
right_on='id_c'
)
#t1 = t1[:100] # for debug
return t1
def get_hook_tag(self, field_name):
def f(external_values):
res = []
value = external_values.get(field_name)
value = value or ''
if not isinstance(value, basestring):
value = str(value)
for v in value.split(','):
v = do_clean_sugar(v)
if v:
res.append({field_name:v})
return res
return f
def tag(self, model, xml_id_prefix, field_name):
parent = xml_id_prefix + field_name
return {'model':model,
'hook':self.get_hook_tag(field_name),
'fields': {
'id': xml_id(parent, field_name),
'name': field_name,
'parent_id/id':const('sugarcrm_migration.'+parent),
}
}
def context_partner(self):
# see module description
return {"skip_addr_sync":True}
def get_mapping_account(self):
def partner(prefix, suffix):
return {'model' : 'res.partner',
'hook': self.get_hook_ignore_empty('%sfirst_name%s'%(prefix, suffix),
'%slast_name%s'%(prefix, suffix)),
'context':self.context_partner,
'fields': {
'id': xml_id(self.TABLE_ACCOUNT + '_%s%s'%(prefix, suffix), 'id'),
'name': concat('%sfirst_name%s'%(prefix, suffix), '%slast_name%s'%(prefix, suffix)),
'phone': '%sphone%s'%(prefix, suffix),
'mobile': '%smobile%s'%(prefix, suffix),
'fax': '%sfax%s'%(prefix, suffix),
'email': '%semail%s'%(prefix, suffix),
'parent_id/id': xml_id(self.TABLE_ACCOUNT, 'id'),
'function': '%sjob_title%s'%(prefix, suffix),
'customer': const('1'),
'supplier': const('0'),
},
}
partner_list = [
partner('finance_', ''),
partner('pa_', '_primary_c'),
partner('pa_', '_secondary_c'),
partner('', '_primary_c'),
partner('', '_secondary_c'),
partner('', '_quantenary_c'),
partner('', '_other_c'),
]
tag_list = [
self.tag('res.partner.category', self.TABLE_ACCOUNT_TAG, 'initial_source_of_referral_c'),
self.tag('res.partner.category', self.TABLE_ACCOUNT_TAG, 'private_sector_new_c'),
self.tag('res.partner.category', self.TABLE_ACCOUNT_TAG, 'rtw_organisation_type_c'),
self.tag('res.partner.category', self.TABLE_ACCOUNT_TAG, 'sales_funnel_c'),
self.tag('res.partner.category', self.TABLE_ACCOUNT_TAG, 'shenley_holdings_company_new_c'),
self.tag('res.partner.category', self.TABLE_ACCOUNT_TAG, 'source_of_referral_c'),
self.tag('res.partner.category', self.TABLE_ACCOUNT_TAG, 'status_c'),
self.tag('res.partner.category', self.TABLE_ACCOUNT_TAG, 'introduced_by_c'),
self.tag('res.partner.category', self.TABLE_ACCOUNT_TAG, 'introduced_by_customer_c'),
self.tag('res.partner.category', self.TABLE_ACCOUNT_TAG, 'sister_company_c'),
]
return {
'name': self.TABLE_ACCOUNT,
'table': self.table_account,
'dependencies' : [self.TABLE_USER],
'models': tag_list + [
# company
{
'model' : 'res.partner',
'context':self.context_partner,
'fields' :
{
'id': xml_id(self.TABLE_ACCOUNT, 'id'),
'name': concat('name', 'first_name_c', 'last_name_c'),
'is_company': const('1'),
'date': fixdate('date_entered'),
'active': lambda record: not record['deleted'],
'user_id/.id': user_by_login('account_manager_2_c'),
'website': first('website', 'website_c'),
'phone':'company_phone_c',
'email':first('email_address', 'email_c', lower=True),
'fax': first('phone_fax', 'fax_c', 'fax_primary_c'),
'city': 'company_city_c',
'zip': 'company_post_code_c',
#'state_id': 'company_region_c',
'street': 'company_street_c',
'street2': concat('company_street_2_c','company_street_3_c'),
'country_id/.id': country_by_name('europe_c'),
'opt_out': mapper_int('unsubscribe_c'),
'customer': const('1'),
'supplier': const('0'),
'category_id/id': tags_from_fields(self.TABLE_ACCOUNT_TAG, ['initial_source_of_referral_c', 'private_sector_new_c', 'rtw_organisation_type_c', 'sales_funnel_c', 'shenley_holdings_company_new_c', 'source_of_referral_c', 'status_c', 'introduced_by_c', 'introduced_by_customer_c', 'sister_company_c',]),
'comment': ppconcat('website_c'),
}},
# realted lead
{
'model' : 'crm.lead',
'fields': {
'id': xml_id(self.TABLE_ACCOUNT_LEAD, 'id'),
'partner_id/id': xml_id(self.TABLE_ACCOUNT, 'id'),
'name': concat('name', 'first_name_c', 'last_name_c'),
'active': lambda record: not record['deleted'],
#'user_id/id': xml_id(self.TABLE_USER, 'assigned_user_id'),
'phone':first('phone_office', 'telephone_c', 'company_phone_c'),
'email_from':first('email_address', 'email_c', lower=True),
'fax': first('phone_fax', 'fax_c', 'fax_primary_c'),
'probability': map_val('sales_funnel_c', self.map_lead_probability, 0),
'stage_id/id': map_val('status_c', self.map_lead_stage, 'crm.stage_lead1'),
'type': map_val('status_c', self.map_lead_type, 'lead'),
'section_id/id': const('sales_team.section_sales_department'),
}
}
] + partner_list # related contacts
}
map_lead_probability = {
'Lost': 0,
'Proposal Sent': 50,
'Prospect Identified': 1,
'Prospect Qualified': 20,
'Sales Won': 100,
'Scheduled': 100, #in sugarcrm: 150,
'Suspect': 0,
}
#mysql> select sales_funnel_c, count(*) from accounts_cstm group by sales_funnel_c;
#+---------------------+----------+
#| sales_funnel_c | count(*) |
#+---------------------+----------+
#| NULL | 4322 |
#| | 144 |
#| Lost | 1 |
#| Proposal Sent | 3 |
#| Prospect Identified | 5 |
#| Prospect Qualified | 20 |
#| Sales Won | 2 |
#| Scheduled | 1 |
#| Suspect | 62 |
map_lead_stage = {
'': 'crm.stage_lead7', # Lost
'Archived': 'crm.stage_lead2', # Dead
'Dorment': 'crm.stage_lead4', # Proposition
'Live Contact': 'crm.stage_lead6', # Won
'Pipeline': 'crm.stage_lead5', # Negotiation
'Prospect': 'crm.stage_lead1', # New
}
map_lead_type = {
'Dorment': 'opportunity',
'Live Contact': 'opportunity',
'Pipeline': 'opportunity',
}
#mysql> select status_c, count(*) from accounts_cstm group by status_c;
#+---------------+----------+
#| status_c | count(*) |
#+---------------+----------+
#| NULL | 210 |
#| | 655 |
#| Archived | 84 |
#| Dorment | 101 |
#| Live Contract | 73 |
#| Pipeline | 390 |
#| Prospect | 3047 |
#+---------------+----------+
def table_contact(self):
t1 = merge(DataFrame(self.get_data('contacts')),
DataFrame(self.get_data('contacts_cstm')),
left_on='id',
right_on='id_c'
)
t2 = self.merge_table_email(t1)
#t2 = t2[:10] # for debug
return t2
def get_mapping_contact(self):
tag_list = [
self.tag('res.partner.category', self.TABLE_CONTACT_TAG, 'agreed_commission_c'),
self.tag('res.partner.category', self.TABLE_CONTACT_TAG, 'agreed_introducer_commission_c'),
self.tag('res.partner.category', self.TABLE_CONTACT_TAG, 'ambassador_c'),
self.tag('res.partner.category', self.TABLE_CONTACT_TAG, 'consultant_type_c'),
self.tag('res.partner.category', self.TABLE_CONTACT_TAG, 'consultant_type_other_c'),
self.tag('res.partner.category', self.TABLE_CONTACT_TAG, 'england_c'),
self.tag('res.partner.category', self.TABLE_CONTACT_TAG, 'ethnicity_c'),
self.tag('res.partner.category', self.TABLE_CONTACT_TAG, 'europe_c'),
self.tag('res.partner.category', self.TABLE_CONTACT_TAG, 'first_language_c'),
self.tag('res.partner.category', self.TABLE_CONTACT_TAG, 'gender_c'),
self.tag('res.partner.category', self.TABLE_CONTACT_TAG, 'other_languages_c'),
self.tag('res.partner.category', self.TABLE_CONTACT_TAG, 'religion_c'),
self.tag('res.partner.category', self.TABLE_CONTACT_TAG, 'role_c'),
self.tag('res.partner.category', self.TABLE_CONTACT_TAG, 'role_type_c'),
self.tag('res.partner.category', self.TABLE_CONTACT_TAG, 'specialism_c'),
self.tag('res.partner.category', self.TABLE_CONTACT_TAG, 'status_live_c'),
self.tag('res.partner.category', self.TABLE_CONTACT_TAG, 'status_live_new_c'),
self.tag('res.partner.category', self.TABLE_CONTACT_TAG, 'trainer_type_c'),
self.tag('res.partner.category', self.TABLE_CONTACT_TAG, 'training_experience_c'),
self.tag('res.partner.category', self.TABLE_CONTACT_TAG, 'willing_to_travel_c'),
self.tag('res.partner.category', self.TABLE_CONTACT_TAG, 'skill_set_c'),
]
def company(field_name):
return {'model':'res.partner',
'context':self.context_partner,
'hook':self.get_hook_ignore_empty(field_name),
'fields': {
'id': xml_id(self.TABLE_CONTACT_COMPANY, field_name),
'name': field_name,
'is_company': const('1'),
'customer': const('0'),
'supplier': const('1'),
}
}
return {
'name': self.TABLE_CONTACT,
'table': self.table_contact,
'dependencies' : [self.TABLE_USER],
'models':tag_list + [company('company_name_c')] + [{
'model' : 'res.partner',
'context':self.context_partner,
'fields': {
'id': xml_id(self.TABLE_CONTACT, 'id'),
'name': concat('title', 'first_name', 'last_name'),
'parent_id/id': xml_id(self.TABLE_CONTACT_COMPANY, 'company_name_c'),
'create_date': 'date_entered',
'write_date': 'date_modified',
'active': lambda record: not record['deleted'],
#'user_id/id': xml_id(self.TABLE_USER, 'assigned_user_id'),
'city': 'city_c',
'street': 'company_street_c',
'street2': concat('company_street_2_c','company_street_3_c'),
'zip': 'company_post_code_c',
'phone':first('company_phone_c', 'home_phone_c', 'phone_home', 'phone_work', 'phone_other', 'home_telephone_c', 'business_telephone_c'),
'mobile':first('phone_mobile', 'personal_mobile_phone_c'),
'email':first('email_c', 'email_address', 'personal_email_c', 'business_email_c', 'other_email_c', 'email_2_c'),
'website': first('website', 'website_c'),
'fax': first('phone_fax', 'company_fax_c'),
'customer': const('0'),
'supplier': const('1'),
'category_id/id': tags_from_fields(self.TABLE_CONTACT_TAG, ['agreed_commission_c', 'agreed_introducer_commission_c', 'ambassador_c', 'consultant_type_c', 'consultant_type_other_c', 'england_c', 'ethnicity_c', 'europe_c', 'first_language_c', 'gender_c', 'other_languages_c', 'religion_c', 'role_c', 'role_type_c', 'skill_set_c', 'specialism_c', 'status_live_c', 'status_live_new_c', 'trainer_type_c', 'training_experience_c', 'willing_to_travel_c', ]),
'comment': ppconcat(
'description',
'phone_home',
'phone_mobile',
'phone_work',
'phone_other',
'phone_fax',
'personal_email_c',
'business_email_c',
'other_email_c',
'home_telephone_c',
'business_telephone_c',
'personal_mobile_phone_c',
'personal_telephone_c',
'home_phone_c',
'mobile_phone_c',
'other_phone_c',
'email_c',
'email_2_c',
'company_phone_c',
'company_mobile_phone_c',
'company_fax_c',
'company_phone_other_c',
'company_email_c',
'prg_email_issued_c',
'email_address_permanent_c',
'prg_email_c',
'cjsm_email_address_c',
)
}
}]
}
def table_case(self):
t1 = merge(DataFrame(self.get_data('cases')),
DataFrame(self.get_data('cases_cstm')),
left_on='id',
right_on='id_c'
)
#t1 = t1[:10] # for debug
return t1
case_priority_mapping = {
'P1': '0',
'P2': '1',
'P3': '2'
}
case_state_mapping = {
'Awaiting Payment':'awaiting_payment',
'Cancelled':'cancelled',
'Completed':'close',
'Deferred':'pending',
'Live':'open',
'Lost':'lost',
'Pipeline':'pipeline_reactive',
'Pipeline - Proactive':'pipeline_proactive',
'Provisional':'draft',
'To be Invoiced':'to_be_invoiced',
}
def field_estimated_close_date_c(self, external_values):
estimated_close_date_c = external_values.get('estimated_close_date_c')
date = external_values.get('end_date_c')
return ''
def finalize_case(self):
ids = self.pool['account.analytic.account'].search(self.cr, self.uid, [('user_id_tmp', '!=', False)])
for r in self.pool['account.analytic.account'].read(self.cr, self.uid, ids, ['id', 'user_id_tmp']):
project_id = self.pool['project.project'].search(self.cr, self.uid, [('analytic_account_id','=', int(r['id']))], context=self.context)
self.pool['project.project'].write(self.cr, self.uid, project_id, {'user_id':r['user_id_tmp'][0]}, context=self.context)
def get_mapping_case(self):
#mysql> select case_status_c, count(*) from cases_cstm group by case_status_c;
#+----------------------+----------+
#| case_status_c | count(*) |
#+----------------------+----------+
#| NULL | 2 |
#| | 40 |
#| Awaiting Payment | 10 |
#| Cancelled | 182 |
#| Completed | 339 |
#| Deferred | 125 |
#| Live | 25 |
#| Lost | 419 |
#| Pipeline | 60 |
#| Pipeline - Proactive | 73 |
#| Provisional | 2 |
#| To be Invoiced | 7 |
#+----------------------+----------+
def partner_participant(prefix, suffix):
return {'model' : 'res.partner',
'hook': self.get_hook_ignore_empty('%scase_participant%s'%(prefix, suffix)),
'context':self.context_partner,
'fields': {
'id': xml_id(self.TABLE_CASE + '_%s%s'%(prefix, suffix), 'id'),
'name': '%scase_participant%s'%(prefix, suffix),
'phone': '%sparticipant_phone%s'%(prefix, suffix),
'function': '%sparticipant_role%s'%(prefix, suffix),
'participate_in_contract_ids/id': xml_id(self.TABLE_CASE, 'id'),
'customer': const('0'),
'supplier': const('0'),
},
}
def partner(prefix, suffix):
return {'model' : 'res.partner',
'hook': self.get_hook_ignore_empty('%scontact%s'%(prefix, suffix)),
'context':self.context_partner,
'fields': {
'id': xml_id(self.TABLE_CASE + '_%s%s'%(prefix, suffix), 'id'),
'name': '%scontact%s'%(prefix, suffix),
'phone': '%sphone%s'%(prefix, suffix),
'mobile': '%smobile%s'%(prefix, suffix),
'function': '%srole%s'%(prefix, suffix),
'customer': const('0'),
'supplier': const('0'),
},
}
partner_participant_list = [
partner_participant('', '_c'),
partner_participant('', '_2_c'),
partner_participant('', '_3_c'),
]
partner_list = [
partner('primary_', '_c'),
partner('secondary_', '_c'),
]
tag_list = [
self.tag('contract.category', self.TABLE_CASE_TAG, 'business_type_c'),
self.tag('contract.category', self.TABLE_CASE_TAG, 'probability_of_closing_c'),
self.tag('contract.category', self.TABLE_CASE_TAG, 'production_funnel_c'),
self.tag('contract.category', self.TABLE_CASE_TAG, 'product_area_c'),
self.tag('contract.category', self.TABLE_CASE_TAG, 'product_type_c'),
self.tag('contract.category', self.TABLE_CASE_TAG, 'reason_lost_c'),
self.tag('contract.category', self.TABLE_CASE_TAG, 'source_of_referral_c'),
]
return {
'name': self.TABLE_CASE,
'table': self.table_case,
'dependencies' : [
self.TABLE_USER,
self.TABLE_ACCOUNT,
self.TABLE_CONTACT,
#self.TABLE_LEAD
],
'models': []+
tag_list+
partner_list+
[{
'model' : 'account.analytic.account',
'context': lambda : {'active_test':False},
'finalize': self.finalize_case,
'fields': {
'id': xml_id(self.TABLE_CASE, 'id'),
'name': concat('case_number_c', 'case_number', 'name', delimiter=' * '),
'type': const('contract'),
'use_tasks': const('1'),
'user_id_tmp/.id': user_by_login('case_manager_c'),
'support_manager_id/.id': user_by_login('support_case_manager_c'),
'notetaker_id/.id': res_id(const(self.TABLE_CONTACT), 'contact_id4_c', default=None),
'proof_reader_id/.id': res_id(const(self.TABLE_CONTACT), 'contact_id2_c', default=None),
'consultant_id/.id': res_id(const(self.TABLE_CONTACT), 'contact_id_c', default=None),
'business_manager_id/.id': res_id(const(self.TABLE_CASE + '_%s%s'%('secondary_', '_c')), 'id', default=None),
'commissioning_manager_id/.id': res_id(const(self.TABLE_CASE + '_%s%s'%('primary_', '_c')), 'id', default=None),
'category_id/id': tags_from_fields(self.TABLE_CASE_TAG, ['business_type_c', 'probability_of_closing_c', 'production_funnel_c', 'product_area_c', 'product_type_c', 'reason_lost_c', 'source_of_referral_c',]),
'create_date': 'date_entered',
'state': map_val('case_status_c', self.case_state_mapping, 'draft'),
'partner_id/id': xml_id(self.TABLE_ACCOUNT, 'account_id'),
'date_start':'end_date_c',
'date':call(self.field_estimated_close_date_c),
'description': ppconcat(
'invoiced_value_of_case_c',
),
}
}] +
partner_participant_list
}
def table_filter_modules(self, t, field_name='bean_module'):
newt = t[(t[field_name] == 'Accounts')|
(t[field_name] == 'Cases')|
(t[field_name] == 'Contacts')|
(t[field_name] == 'Notes')|
(t[field_name] == 'Emails')
]
return newt
def table_email(self):
t1 = merge(DataFrame(self.get_data('emails')),
DataFrame(self.get_data('emails_text')),
how='left',
left_on='id',
right_on='email_id'
)
t2 = merge(t1,
DataFrame(self.get_data('emails_beans')),
how='left',
left_on='id',
right_on='email_id',
suffixes = ('', '_emails_beans')
)
t3 = self.table_filter_modules(t2)
#t3 = t3[:100] # for debug
return t3
map_to_model = {
'Accounts': 'res.partner',
'Cases': 'project.project',
'Contacts': 'res.partner',
'Prospects': 'TODO',
'Emails': 'mail.message',
#'Notes': 'ir.attachment',
}
map_to_table = {
'Accounts': TABLE_ACCOUNT,
'Cases': TABLE_CASE,
'Contacts': TABLE_CONTACT,
'Prospects': 'TODO',
'Emails': TABLE_EMAIL,
#'Notes': TABLE_NOTE,
}
#mysql> select parent_type, count(*) from notes group by parent_type;
#+-------------+----------+
#| parent_type | count(*) |
#+-------------+----------+
#| NULL | 604 |
#| Accounts | 6385 |
#| Cases | 12149 |
#| Contacts | 41 |
#| Emails | 12445 |
#| Leads | 355 |
#| Meetings | 2 |
#+-------------+----------+
#7 rows in set (0.30 sec)
#
def get_mapping_email(self):
# mysql> select bean_module, count(*) from emails_beans group by bean_module;
# +---------------+----------+
# | bean_module | count(*) |
# +---------------+----------+
# | Accounts | 182 |
# | Cases | 1746 |
# | Contacts | 493 |
# | Leads | 102 |
# | Opportunities | 1 |
# | Prospects | 16819 |
# +---------------+----------+
# 6 rows in set (0.56 sec)
return {
'name': self.TABLE_EMAIL,
'table': self.table_email,
'dependencies' : [
self.TABLE_USER,
self.TABLE_ACCOUNT,
self.TABLE_CONTACT,
self.TABLE_CASE,
#self.TABLE_LEAD,
#self.TABLE_OPPORTUNITY,
#self.TABLE_MEETING,
#self.TABLE_CALL
],
'models':[{
'model' : 'mail.message',
'hook': self.hook_email,
'fields': {
'id': xml_id(self.TABLE_EMAIL, 'id'),
'type':const('email'),
#mysql> select type, count(*) from emails group by type;
#+----------+----------+
#| type | count(*) |
#+----------+----------+
#| archived | 17119 |
#| draft | 8 |
#| inbound | 3004 |
#| out | 75 |
#+----------+----------+
#4 rows in set (0.76 sec)
'email_from': 'from_addr_name',
'reply_to': 'reply_to_addr',
#'same_thread': 'TODO',
'author_id/id': user2partner(self.TABLE_USER, 'created_by'),
#'partner_ids' #many2many
#attachment_ids' #many2many
#'parent_id': 'TODO',
'model': 'model',
'res_id': 'res_id',
#record_name
'subject':'name',
'date':'date_sent',
'message_id': 'message_id',
'body': call(lambda vals, html, txt: fix_email(html or txt or ''),
value('description_html'), value('description')),
'subtype_id/id':const('mail.mt_comment'),
'notified_partner_ids/.id': emails2partners('to_addrs'),
#'state' : const('received'),
#'email_to': 'to_addrs_names',
#'email_cc': 'cc_addrs_names',
#'email_bcc': 'bcc_addrs_names',
#'partner_id/.id': 'partner_id/.id',
#'user_id/id': ref(self.TABLE_USER, 'assigned_user_id'),
}
}]
}
def table_note(self):
t = DataFrame(self.get_data('notes'))
t = self.table_filter_modules(t, 'parent_type')
t = t.dropna(subset=['filename'])
#t = t[:10] # for debug
return t
def table_note_internal(self):
t = DataFrame(self.get_data('notes'))
t = self.table_filter_modules(t, 'parent_type')
t = t[(t['parent_type'] != 'Emails')]
#t = t[:100] # for debug
return t
def get_id_model(self, external_values, field_name='parent_id', parent_field_name='parent_type'):
id = res_id(map_val(parent_field_name, self.map_to_table), field_name)
id.set_parent(self)
id = id(external_values)
model = map_val(parent_field_name, self.map_to_model)
model = model(external_values)
if model=='project.project':
id = self.pool['project.project'].search(self.cr, self.uid, [('analytic_account_id','=', int(id))], context=self.context)
if isinstance(id, list):
id=id[0]
return str(id),model
def hook_email(self, external_values):
id,model = self.get_id_model(external_values, field_name='bean_id', parent_field_name='bean_module')
external_values['res_id']=id
external_values['model']=model
return external_values
def hook_note(self, external_values):
parent_type = external_values.get('parent_type')
contact_id = external_values.get('contact_id')
if parent_type == 'Accounts' and contact_id:
external_values['parent_type'] = 'Contacts'
id,model = self.get_id_model(external_values, field_name='contact_id')
if id:
#print 'note Accounts fixed to Contacts'
external_values['res_id'] = id
external_values['res_model'] = model
return external_values
external_values['parent_type'] = parent_type
id,model = self.get_id_model(external_values)
if not id:
#print 'Note not found', parent_type, external_values.get('parent_id')
return None
else:
#print 'Note FOUND', parent_type, external_values.get('parent_id')
pass
external_values['res_id'] = id
external_values['res_model'] = model
return external_values
map_note_to_table = {
'Emails': TABLE_EMAIL
}
def get_mapping_note(self):
return {
'name': self.TABLE_NOTE,
'table': self.table_note,
'dependencies' : [self.TABLE_EMAIL,
self.TABLE_NOTE_INTERNAL,
],
'models':[{
'model': 'ir.attachment',
'context': lambda : {'active_test':False, 'quick_import':True},
'hook': self.hook_note,
'finalize': self.finalize_note,
'fields': {
'id': xml_id(self.TABLE_NOTE, 'id'),
'name':'filename',
'datas_fname':'filename',
'res_model': 'res_model',
'res_id': 'res_id',
'res_model_tmp': const('mail.message'),
'res_id_tmp': res_id(map_val('parent_type', self.map_note_to_table, default=self.TABLE_NOTE_INTERNAL), 'id'),
'store_fname': call(lambda external_values, id_value: 'sugarcrm_files/' + id_value,
value('id')),
'type':const('binary'),
#'description': 'description',
'description': const(''),
'create_date': 'date_entered',
'create_uid/id': xml_id(self.TABLE_USER, 'create_by'),
'company_id/id': const('base.main_company'),
}
}]
}
def get_mapping_note_internal(self):
return {
'name': self.TABLE_NOTE_INTERNAL,
'table': self.table_note_internal,
'dependencies' : [self.TABLE_EMAIL,
],
'models':[{
'model': 'mail.message',
'hook': self.hook_note,
'fields': {
'id': xml_id(self.TABLE_NOTE_INTERNAL, 'id'),
'subject':concat('name', 'filename', 'date_entered', delimiter=' * '),
'body': call(lambda vals, body: fix_email(body or ''),
value('description')),
'model': 'res_model',
'res_id': 'res_id',
'type':const('email'),
'date': 'date_entered',
'author_id/id': user2partner(self.TABLE_USER, 'created_by'),
#'subtype_id/id':const('mail.mt_comment'),
}
}]
}
def get_mapping_history_attachment(self):
# is not used
res.append({
'name': self.TABLE_HISTORY_ATTACHMNET,
'model' : 'ir.attachment',
'dependencies' : [self.TABLE_USER, self.TABLE_ACCOUNT, self.TABLE_CONTACT, self.TABLE_LEAD, self.TABLE_OPPORTUNITY, self.TABLE_MEETING, self.TABLE_CALL, self.TABLE_EMAIL],
'hook' : import_history,
'models':[{
'fields': {
'name':'name',
'user_id/id': ref(self.TABLE_USER, 'created_by'),
'description': ppconcat('description', 'description_html'),
'res_id': 'res_id',
'res_model': 'model',
'partner_id/.id' : 'partner_id/.id',
'datas' : 'datas',
'datas_fname' : 'datas_fname'
}
}]
})
def get_mapping_bug():
# is not used
return {
'name': self.TABLE_BUG,
'model' : 'project.issue',
'dependencies' : [self.TABLE_USER],
'models':[{
'fields': {
'name': concat('bug_number', 'name', delimiter='-'),
'project_id/id': call(get_bug_project_id, 'sugarcrm_bugs'),
'categ_id/id': call(get_category, 'project.issue', value('type')),
'description': ppconcat('description', 'source', 'resolution', 'work_log', 'found_in_release', 'release_name', 'fixed_in_release_name', 'fixed_in_release'),
'priority': get_project_issue_priority,
'state': map_val('status', project_issue_state),
'assigned_to/id' : ref(self.TABLE_USER, 'assigned_user_id'),
}
}]
}
def get_mapping_project(self):
# is not used
return {
'name': self.TABLE_PROJECT,
'model' : 'project.project',
'dependencies' : [self.TABLE_CONTACT, self.TABLE_ACCOUNT, self.TABLE_USER],
'hook' : import_project,
'models':[{
'fields': {
'name': 'name',
'date_start': 'estimated_start_date',
'date': 'estimated_end_date',
'user_id/id': ref(self.TABLE_USER, 'assigned_user_id'),
'partner_id/.id': 'partner_id/.id',
'contact_id/.id': 'contact_id/.id',
'state': map_val('status', project_state)
}
}]
}
def get_mapping_project_task(self):
# is not used
return {
'name': self.TABLE_PROJECT_TASK,
'model' : 'project.task',
'dependencies' : [self.TABLE_USER, self.TABLE_PROJECT],
'models':[{
'fields': {
'name': 'name',
'date_start': 'date_start',
'date_end': 'date_finish',
'project_id/id': ref(self.TABLE_PROJECT, 'project_id'),
'planned_hours': 'estimated_effort',
'priority': get_project_task_priority,
'description': ppconcat('description','milestone_flag', 'project_task_id', 'task_number', 'percent_complete'),
'user_id/id': ref(self.TABLE_USER, 'assigned_user_id'),
'partner_id/id': 'partner_id/id',
'contact_id/id': 'contact_id/id',
'state': map_val('status', project_task_state)
}
}]
}
def get_mapping_task(self):
# is not used
return {
'name': self.TABLE_TASK,
'model' : 'crm.meeting',
'dependencies' : [self.TABLE_CONTACT, self.TABLE_ACCOUNT, self.TABLE_USER],
'hook' : import_task,
'models':[{
'fields': {
'name': 'name',
'date': 'date',
'date_deadline': 'date_deadline',
'user_id/id': ref(self.TABLE_USER, 'assigned_user_id'),
'categ_id/id': call(get_category, 'crm.meeting', const('Tasks')),
'partner_id/id': related_ref(self.TABLE_ACCOUNT),
'partner_address_id/id': ref(self.TABLE_CONTACT,'contact_id'),
'state': map_val('status', task_state)
}
}]
}
def get_mapping_call(self):
# is not used
return {
'name': self.TABLE_CALL,
'model' : 'crm.phonecall',
'dependencies' : [self.TABLE_ACCOUNT, self.TABLE_CONTACT, self.TABLE_OPPORTUNITY, self.TABLE_LEAD],
'models':[{
'fields': {
'name': 'name',
'date': 'date_start',
'duration': call(get_float_time, value('duration_hours'), value('duration_minutes')),
'user_id/id': ref(self.TABLE_USER, 'assigned_user_id'),
'partner_id/id': related_ref(self.TABLE_ACCOUNT),
'partner_address_id/id': related_ref(self.TABLE_CONTACT),
'categ_id/id': call(get_category, 'crm.phonecall', value('direction')),
'opportunity_id/id': related_ref(self.TABLE_OPPORTUNITY),
'description': ppconcat('description'),
'state': map_val('status', call_state)
}
}]
}
def get_mapping_meeting(self):
# is not used
return {
'name': self.TABLE_MEETING,
'model' : 'crm.meeting',
'dependencies' : [self.TABLE_CONTACT, self.TABLE_OPPORTUNITY, self.TABLE_LEAD, self.TABLE_TASK],
'hook': import_meeting,
'models':[{
'fields': {
'name': 'name',
'date': 'date_start',
'duration': call(get_float_time, value('duration_hours'), value('duration_minutes')),
'location': 'location',
'attendee_ids/id':'attendee_ids/id',
'alarm_id/id': call(get_alarm_id, value('reminder_time')),
'user_id/id': ref(self.TABLE_USER, 'assigned_user_id'),
'partner_id/id': related_ref(self.TABLE_ACCOUNT),
'partner_address_id/id': related_ref(self.TABLE_CONTACT),
'state': map_val('status', meeting_state)
}
}]
}
def get_mapping_opportunity(self):
# is not used
return {
'name': self.TABLE_OPPORTUNITY,
'model' : 'crm.lead',
'dependencies' : [self.TABLE_USER, self.TABLE_ACCOUNT, self.TABLE_CONTACT,self.TABLE_COMPAIGN],
'hook' : import_opp,
'models':[{
'fields': {
'name': 'name',
'probability': 'probability',
'partner_id/id': refbyname(self.TABLE_ACCOUNT, 'account_name', 'res.partner'),
'title_action': 'next_step',
'partner_address_id/id': 'partner_address_id/id',
'planned_revenue': 'amount',
'date_deadline': 'date_closed',
'user_id/id' : ref(self.TABLE_USER, 'assigned_user_id'),
'stage_id/id' : get_opportunity_status,
'type' : const('opportunity'),
'categ_id/id': call(get_category, 'crm.lead', value('opportunity_type')),
'email_from': 'email_from',
'state': map_val('status', opp_state),
'description' : 'description',
}
}]
}
def get_mapping_compaign(self):
# is not used
return {
'name': self.TABLE_COMPAIGN,
'model' : 'crm.case.resource.type',
'models':[{
'fields': {
'name': 'name',
}
}]
}
def get_mapping_employee(self):
# is not used
return {
'name': self.TABLE_EMPLOYEE,
'model' : 'hr.employee',
'dependencies' : [self.TABLE_USER],
'models':[{
'fields': {
'resource_id/id': get_ressource,
'name': concat('first_name', 'last_name'),
'work_phone': 'phone_work',
'mobile_phone': 'phone_mobile',
'user_id/id': ref(self.TABLE_USER, 'id'),
'address_home_id/id': get_user_address,
'notes': ppconcat('messenger_type', 'messenger_id', 'description'),
'job_id/id': get_job_id,
'work_email' : 'email1',
'coach_id/id_parent' : 'reports_to_id',
}
}]
}
| lgpl-3.0 |
jburos/stancache | stancache/stancache.py | 1 | 11994 | import os
import pickle
import dill
import pystan
import hashlib
import base64
import logging
from fnmatch import fnmatch
import ntpath
from . import seed
from time import time
from datetime import timedelta
import pandas as pd
import re
import Cython
from . import config
import types
import numpy as np
import xxhash
import sys
logger = logging.getLogger(__name__)
def _mkdir_if_not_exists(path):
try:
os.mkdir(path)
except:
pass
def _make_digest_dataframe(item):
index = tuple(item.index)
columns = tuple(item.columns)
values = tuple(tuple(x) for x in item.values)
s = _pickle_dumps_digest(tuple([index, columns, values]))
return s
def _xxhash_item(item):
h = xxhash.xxh64(item)
s = h.intdigest()
return s
def _pickle_dumps_digest(item):
s = pickle.dumps(item)
h = _digest(s)
return h
def _digest(s):
h = int(hashlib.sha1(s).hexdigest(), 16) % (10 ** 11)
return h
def _make_digest_dict(k, prefix=''):
result = dict()
if len(k) == 0:
return None
for (key, item) in sorted(k.items()):
pre_key = '{}{}'.format(prefix, key)
if isinstance(item, str) and len(item) <= 11:
logger.debug('processing item ({}) as str'.format(pre_key))
s = re.sub(string=item, pattern='[\.\-]', repl='_')
result.update({pre_key: s})
elif isinstance(item, int) and len(str(item)) <= 11:
logger.debug('processing item ({}) as int'.format(pre_key))
s = re.sub(string=str(item), pattern='[\.\-]', repl='_')
result.update({pre_key: s})
elif isinstance(item, dict):
logger.debug('processing item ({}) as dict'.format(pre_key))
item = dict(sorted(item.items()))
s = _make_digest(item, prefix=key+'-')
result.update({pre_key: _digest(s.encode())})
elif isinstance(item, pd.DataFrame):
logger.debug('processing item ({}) as dataframe'.format(pre_key))
s = _make_digest_dataframe(item)
result.update({pre_key: s})
elif isinstance(item, pd.Series):
logger.debug('processing item ({}) as pd.Series'.format(pre_key))
s = _xxhash_item(item.values)
result.update({pre_key: s})
elif isinstance(item, np.matrixlib.defmatrix.matrix):
logger.debug('processing item ({}) as np.matrixlib.defmatrix.matrix'.format(pre_key))
s = _pickle_dumps_digest(item)
result.update({pre_key: s})
elif isinstance(item, np.ndarray):
logger.debug('processing item ({}) as np.ndarray'.format(pre_key))
s = _xxhash_item(item)
result.update({pre_key: s})
elif isinstance(item, types.FunctionType):
logger.debug('processing item ({}) as function'.format(pre_key))
try:
s = _pickle_dumps_digest(str(dill.source.getsource(item)))
except:
s = 'unhashable'
result.update({pre_key: s})
else:
try:
logger.debug('processing item ({}) as other (using xxhash)'.format(pre_key))
s = _xxhash_item(item)
except:
logger.debug('processing item ({}) as other (using pickle)'.format(pre_key))
s = _pickle_dumps_digest(item)
logger.debug('note: item ({}) is of type: {}'.format(pre_key, item.__class__))
result.update({pre_key: s})
return result
def _make_digest(k, **kwargs):
"""
Creates a digest suitable for use within an :class:`phyles.FSCache`
object from the key object `k`.
>>> adict = {'a' : {'b':1}, 'f': []}
>>> make_digest(adict)
'a2VKynHgDrUIm17r6BQ5QcA5XVmqpNBmiKbZ9kTu0A'
"""
result = list()
result_dict = _make_digest_dict(k, **kwargs)
if result_dict is None:
return 'default'
else:
for (key, h) in sorted(result_dict.items()):
result.append('{}_{}'.format(key, h))
return '.'.join(result)
def _get_cache_dir(cache_dir=None):
if cache_dir is None:
cache_dir = config.get_setting_value('CACHE_DIR')
logger.debug('cache_dir set to {}'.format(cache_dir))
_mkdir_if_not_exists(cache_dir)
return cache_dir
def cached_model_file(model_name='anon_model', file=None, model_code=None, cache_dir=None,
fit_cachefile=None, include_prefix=False):
''' Given model name & stan model code/file, compute path to cached stan fit
if include_prefix, returns (model_prefix, model_cachefile)
'''
cache_dir = _get_cache_dir(cache_dir)
model_name = _sanitize_model_name(model_name)
## compute model prefix
if file:
model_code = _read_file(file)
if model_code:
model_prefix = '.'.join([model_name, _make_digest(dict(model_code=model_code,
pystan=pystan.__version__,
cython=Cython.__version__))])
else: ## handle case where no model code given
if file is not None:
logger.info('Note - no model code detected from given file: {}'.format(file))
else:
logger.info('Note - no model code detected (neither file nor model_code given)')
## parse model_prefix from fit_cachefile if given
if fit_cachefile:
# if necessary, impute cache_dir from filepath
if fit_cachefile != os.path.basename(fit_cachefile):
cache_dir, fit_cachefile = os.path.split(os.path.abspath(fit_cachefile))
# if fit_cachefile given, parse to get fit_model_prefix
fit_model_prefix = re.sub(string=os.path.basename(fit_cachefile), pattern='(.*).stanfit.*', repl='\\1')
if model_code:
if fit_model_prefix != model_prefix:
logger.warning('Computed model prefix does not match that used to estimate model. Using prefix matching fit_cachefile')
model_prefix = fit_model_prefix
# compute path to model cachefile
model_cachefile = '.'.join([model_prefix, 'stanmodel', 'pkl'])
if include_prefix:
return model_prefix, model_cachefile
return model_cachefile
def cached_stan_file(model_name='anon_model', file=None, model_code=None,
cache_dir=None, fit_cachefile=None, cache_only=None, force=False,
include_modelfile=False, prefix_only=False,
**kwargs
):
''' Given inputs to cached_stan_fit, compute pickle file containing cached fit
'''
model_prefix, model_cachefile = cached_model_file(model_name=model_name, file=file, model_code=model_code,
cache_dir=cache_dir, fit_cachefile=fit_cachefile, include_prefix=True)
if not fit_cachefile:
fit_cachefile = '.'.join([model_prefix, 'stanfit', _make_digest(dict(**kwargs)), 'pkl'])
if include_modelfile:
return model_cachefile, fit_cachefile
if prefix_only:
fit_cachefile = re.sub(string=fit_cachefile, pattern='.pkl$', repl='')
return fit_cachefile
def _sanitize_model_name(model_name):
if model_name:
model_name = re.sub(string=model_name, pattern='[\.\-]', repl='_')
return model_name
def _get_model_code(model_code=None, file=None):
## compute model prefix
if file:
model_code = _read_file(file)
if not model_code:
if file is not None:
logger.info('Note - no model code detected from given file: {}'.format(file))
else:
logger.info('Note - no model code detected (neither file nor model_code given)')
return model_code
def _cached_stan_fit(model_name='anon_model', file=None, model_code=None,
force=False, cache_dir=None, cache_only=None,
fit_cachefile=None, **kwargs):
''' Cache fit stan model, by storing pickled objects in filesystem
per following warning:
07: UserWarning: Pickling fit objects is an experimental feature!
The relevant StanModel instance must be pickled along with this fit object.
When unpickling the StanModel must be unpickled first.
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
'''
if fit_cachefile and cache_only is None:
cache_only = True
model_cachefile, fit_cachefile = cached_stan_file(model_name=model_name, file=file, model_code=model_code,
cache_dir=cache_dir, fit_cachefile=fit_cachefile,
include_modelfile=True, **kwargs)
cache_dir = _get_cache_dir(cache_dir)
model_name = _sanitize_model_name(model_name)
model_code = _get_model_code(model_code=model_code, file=file)
logger.info('Step 1: Get compiled model code, possibly from cache')
stan_model = cached(func=pystan.StanModel,
cache_filename=model_cachefile,
model_code=model_code,
cache_dir=cache_dir,
model_name=model_name,
cache_only=cache_only,
force=force)
## either pull fitted model from cache, or fit model
logger.info('Step 2: Get posterior draws from model, possibly from cache')
fit = cached(func=stan_model.sampling,
cache_filename=fit_cachefile,
cache_dir=cache_dir,
force=force,
cache_only=cache_only,
**kwargs)
return fit
def _read_file(filepath):
with open(filepath, 'r') as myfile:
data = myfile.read()
return data
def cached_stan_fit(iter=2000, chains=4, seed=None, *args, **kwargs):
arglist = list(*args)
if len(arglist)>0:
raise ValueError('unnamed args not permitted')
if seed is None:
seed = config.get_setting_value('SEED')
return _cached_stan_fit(seed=seed, iter=iter, chains=chains, **kwargs)
def cached(func, file_prefix='cached', cache_filename=None,
cache_dir=None, force=False, cache_only=False,
compute_hash=True, *args, **kwargs):
cache_dir = _get_cache_dir(cache_dir)
if not cache_filename:
arglist = list(*args)
if len(arglist)>0:
raise ValueError('unnamed args not permitted')
cache_filename = '.'.join([func.__name__, file_prefix, _make_digest(dict(**kwargs)), 'pkl'])
logger.info('{}: cache_filename set to {}'.format(func.__name__, cache_filename))
cache_filepath = os.path.join(cache_dir, cache_filename)
logger.debug('{}: cache_filepath set to {}'.format(func.__name__, cache_filepath))
if not force and os.path.exists(cache_filepath):
try:
logger.info('{}: Loading result from cache'.format(func.__name__))
res = pickle.load(open(cache_filepath, 'rb'))
except ImportError as e:
print('{}: Error loading from cache: {}'.format(func.__name__, str(e)))
else:
return res
if cache_only:
raise ValueError('{}: Cachefile does not exist and cache_only == True. Exiting with failure.'.format(func.__name__))
logger.info('{}: Starting execution'.format(func.__name__))
start = time()
res = func(**kwargs)
end = time()
elapsed = str(timedelta(seconds=end-start))
logger.info('{}: Execution completed ({} elapsed)'.format(func.__name__, elapsed))
try:
_mkdir_if_not_exists(cache_dir)
logger.info('{}: Saving results to cache'.format(func.__name__))
pickle.dump(res, open(cache_filepath, 'wb'), pickle.HIGHEST_PROTOCOL)
except IOError as e:
logger.warning("{}: I/O error saving to cache ({}): {}".format(func.__name__, e.errno, e.strerror))
except:
logger.warning('{}: Unexpected error saving to cache: {}'.format(func.__name__, sys.exc_info()[0]))
return res
| apache-2.0 |
PascalSteger/twiddle | analysis/plot_isobartest.py | 1 | 1061 | #!/usr/bin/env python3
## \file
# plot abundances from cat rectest.log|grep "Y:"|cut -d":" -f2|pr -s -t -l9|tee rectest.col
import sys
infile = sys.argv[1]; outfile = sys.argv[2]
from matplotlib import pyplot as PLT
fig = PLT.figure()
ax1 = fig.add_subplot(111)
import numpy as NP
with open(infile) as f:
v = NP.loadtxt(f, dtype='float', comments="#", skiprows=0, unpack=True)#delimiter=",", usecols=[col]
print(v)
import math
import scipy
z = 1/v[0]-1
PLT.plot(z,v[4],c='black',label='e')
PLT.plot(z,v[5],c='red',label='HI')
PLT.plot(z,v[6],c='orange',label='HII')
PLT.plot(z,v[7],c='green',label='HeI')
PLT.plot(z,v[8],c='cyan',label='HeII')
PLT.plot(z,v[9],c='violet',label='HeIII')
PLT.plot(z,v[10],c='black',label='H-')
PLT.plot(z,v[11],c='blue',label='H2')
PLT.plot(z,v[12],c='brown',label='H2*')
PLT.xscale('log'); PLT.yscale('log')
#PLT.xlim(0.5,512)
#PLT.ylim(10**-24,10**0)
PLT.xlabel(r'z')
#PLT.xticks(NP.logspace(0,2,3),['1','10','100'])
PLT.ylabel(r'$Y_i$')
#PLT.yticks(NP.logspace(-7,3,6))
PLT.legend(loc=2)
PLT.savefig(outfile)
| gpl-2.0 |
kartikp1995/gnuradio | gr-fec/python/fec/polar/decoder.py | 24 | 10396 | #!/usr/bin/env python
#
# Copyright 2015 Free Software Foundation, Inc.
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
import numpy as np
from common import PolarCommon
# for dev
from encoder import PolarEncoder
from matplotlib import pyplot as plt
class PolarDecoder(PolarCommon):
def __init__(self, n, k, frozen_bit_position, frozenbits=None):
PolarCommon.__init__(self, n, k, frozen_bit_position, frozenbits)
self.error_probability = 0.1 # this is kind of a dummy value. usually chosen individually.
self.lrs = ((1 - self.error_probability) / self.error_probability, self.error_probability / (1 - self.error_probability))
self.llrs = np.log(self.lrs)
def _llr_bit(self, bit):
return self.llrs[bit]
def _llr_odd(self, la, lb):
# this functions uses the min-sum approximation
# exact formula: np.log((np.exp(la + lb) + 1) / (np.exp(la) + np.exp(lb)))
return np.sign(la) * np.sign(lb) * np.minimum(np.abs(la), np.abs(lb))
_f_vals = np.array((1.0, -1.0), dtype=float)
def _llr_even(self, la, lb, f):
return (la * self._f_vals[f]) + lb
def _llr_bit_decision(self, llr):
if llr < 0.0:
ui = int(1)
else:
ui = int(0)
return ui
def _retrieve_bit_from_llr(self, lr, pos):
f_index = np.where(self.frozen_bit_position == pos)[0]
if not f_index.size == 0:
ui = self.frozenbits[f_index][0]
else:
ui = self._llr_bit_decision(lr)
return ui
def _lr_bit(self, bit):
return self.lrs[bit]
def _lr_odd(self, la, lb):
# la is upper branch and lb is lower branch
return (la * lb + 1) / (la + lb)
def _lr_even(self, la, lb, f):
# la is upper branch and lb is lower branch, f is last decoded bit.
return (la ** (1 - (2 * f))) * lb
def _lr_bit_decision(self, lr):
if lr < 1:
return int(1)
return int(0)
def _get_even_indices_values(self, u_hat):
# looks like overkill for some indexing, but zero and one based indexing mix-up gives you haedaches.
return u_hat[1::2]
def _get_odd_indices_values(self, u_hat):
return u_hat[0::2]
def _calculate_lrs(self, y, u):
ue = self._get_even_indices_values(u)
uo = self._get_odd_indices_values(u)
ya = y[0:y.size//2]
yb = y[(y.size//2):]
la = self._lr_decision_element(ya, (ue + uo) % 2)
lb = self._lr_decision_element(yb, ue)
return la, lb
def _lr_decision_element(self, y, u):
if y.size == 1:
return self._llr_bit(y[0])
if u.size % 2 == 0: # use odd branch formula
la, lb = self._calculate_lrs(y, u)
return self._llr_odd(la, lb)
else:
ui = u[-1]
la, lb = self._calculate_lrs(y, u[0:-1])
return self._llr_even(la, lb, ui)
def _retrieve_bit_from_lr(self, lr, pos):
f_index = np.where(self.frozen_bit_position == pos)[0]
if not f_index.size == 0:
ui = self.frozenbits[f_index][0]
else:
ui = self._lr_bit_decision(lr)
return ui
def _lr_sc_decoder(self, y):
# this is the standard SC decoder as derived from the formulas. It sticks to natural bit order.
u = np.array([], dtype=int)
for i in range(y.size):
lr = self._lr_decision_element(y, u)
ui = self._retrieve_bit_from_llr(lr, i)
u = np.append(u, ui)
return u
def _llr_retrieve_bit(self, llr, pos):
f_index = np.where(self.frozen_bit_position == pos)[0]
if not f_index.size == 0:
ui = self.frozenbits[f_index][0]
else:
ui = self._llr_bit_decision(llr)
return ui
def _butterfly_decode_bits(self, pos, graph, u):
bit_num = u.size
llr = graph[pos][0]
ui = self._llr_retrieve_bit(llr, bit_num)
# ui = self._llr_bit_decision(llr)
u = np.append(u, ui)
lower_right = pos + (self.N // 2)
la = graph[pos][1]
lb = graph[lower_right][1]
graph[lower_right][0] = self._llr_even(la, lb, ui)
llr = graph[lower_right][0]
# ui = self._llr_bit_decision(llr)
ui = self._llr_retrieve_bit(llr, u.size)
u = np.append(u, ui)
return graph, u
def _lr_sc_decoder_efficient(self, y):
graph = np.full((self.N, self.power + 1), np.NaN, dtype=float)
for i in range(self.N):
graph[i][self.power] = self._llr_bit(y[i])
decode_order = self._vector_bit_reversed(np.arange(self.N), self.power)
decode_order = np.delete(decode_order, np.where(decode_order >= self.N // 2))
u = np.array([], dtype=int)
for pos in decode_order:
graph = self._butterfly(pos, 0, graph, u)
graph, u = self._butterfly_decode_bits(pos, graph, u)
return u
def _stop_propagation(self, bf_entry_row, stage):
# calculate break condition
modulus = 2 ** (self.power - stage)
# stage_size = self.N // (2 ** stage)
# half_stage_size = stage_size // 2
half_stage_size = self.N // (2 ** (stage + 1))
stage_pos = bf_entry_row % modulus
return stage_pos >= half_stage_size
def _butterfly(self, bf_entry_row, stage, graph, u):
if not self.power > stage:
return graph
if self._stop_propagation(bf_entry_row, stage):
upper_right = bf_entry_row - self.N // (2 ** (stage + 1))
la = graph[upper_right][stage + 1]
lb = graph[bf_entry_row][stage + 1]
ui = u[-1]
graph[bf_entry_row][stage] = self._llr_even(la, lb, ui)
return graph
# activate right side butterflies
u_even = self._get_even_indices_values(u)
u_odd = self._get_odd_indices_values(u)
graph = self._butterfly(bf_entry_row, stage + 1, graph, (u_even + u_odd) % 2)
lower_right = bf_entry_row + self.N // (2 ** (stage + 1))
graph = self._butterfly(lower_right, stage + 1, graph, u_even)
la = graph[bf_entry_row][stage + 1]
lb = graph[lower_right][stage + 1]
graph[bf_entry_row][stage] = self._llr_odd(la, lb)
return graph
def decode(self, data, is_packed=False):
if not len(data) == self.N:
raise ValueError("len(data)={0} is not equal to n={1}!".format(len(data), self.N))
if is_packed:
data = np.unpackbits(data)
data = self._lr_sc_decoder_efficient(data)
data = self._extract_info_bits(data)
if is_packed:
data = np.packbits(data)
return data
def _extract_info_bits_reversed(self, y):
info_bit_positions_reversed = self._vector_bit_reversed(self.info_bit_position, self.power)
return y[info_bit_positions_reversed]
def decode_systematic(self, data):
if not len(data) == self.N:
raise ValueError("len(data)={0} is not equal to n={1}!".format(len(data), self.N))
# data = self._reverse_bits(data)
data = self._lr_sc_decoder_efficient(data)
data = self._encode_natural_order(data)
data = self._extract_info_bits_reversed(data)
return data
def test_systematic_decoder():
ntests = 1000
n = 16
k = 8
frozenbitposition = np.array((0, 1, 2, 3, 4, 5, 8, 9), dtype=int)
encoder = PolarEncoder(n, k, frozenbitposition)
decoder = PolarDecoder(n, k, frozenbitposition)
for i in range(ntests):
bits = np.random.randint(2, size=k)
y = encoder.encode_systematic(bits)
u_hat = decoder.decode_systematic(y)
assert (bits == u_hat).all()
def test_reverse_enc_dec():
n = 16
k = 8
frozenbits = np.zeros(n - k)
frozenbitposition = np.array((0, 1, 2, 3, 4, 5, 8, 9), dtype=int)
bits = np.random.randint(2, size=k)
encoder = PolarEncoder(n, k, frozenbitposition, frozenbits)
decoder = PolarDecoder(n, k, frozenbitposition, frozenbits)
encoded = encoder.encode(bits)
print 'encoded:', encoded
rx = decoder.decode(encoded)
print 'bits:', bits
print 'rx :', rx
print (bits == rx).all()
def compare_decoder_impls():
print '\nthis is decoder test'
n = 8
k = 4
frozenbits = np.zeros(n - k)
# frozenbitposition16 = np.array((0, 1, 2, 3, 4, 5, 8, 9), dtype=int)
frozenbitposition = np.array((0, 1, 2, 4), dtype=int)
bits = np.random.randint(2, size=k)
print 'bits:', bits
encoder = PolarEncoder(n, k, frozenbitposition, frozenbits)
decoder = PolarDecoder(n, k, frozenbitposition, frozenbits)
encoded = encoder.encode(bits)
print 'encoded:', encoded
rx_st = decoder._lr_sc_decoder(encoded)
rx_eff = decoder._lr_sc_decoder_efficient(encoded)
print 'standard :', rx_st
print 'efficient:', rx_eff
print (rx_st == rx_eff).all()
def main():
# power = 3
# n = 2 ** power
# k = 4
# frozenbits = np.zeros(n - k, dtype=int)
# frozenbitposition = np.array((0, 1, 2, 4), dtype=int)
# frozenbitposition4 = np.array((0, 1), dtype=int)
#
#
# encoder = PolarEncoder(n, k, frozenbitposition, frozenbits)
# decoder = PolarDecoder(n, k, frozenbitposition, frozenbits)
#
# bits = np.ones(k, dtype=int)
# print "bits: ", bits
# evec = encoder.encode(bits)
# print "froz: ", encoder._insert_frozen_bits(bits)
# print "evec: ", evec
#
# evec[1] = 0
# deced = decoder._lr_sc_decoder(evec)
# print 'SC decoded:', deced
#
# test_reverse_enc_dec()
# compare_decoder_impls()
test_systematic_decoder()
if __name__ == '__main__':
main()
| gpl-3.0 |
xju2/HZZ_llvv_ws | HZZ_llvv_ws/interpolate_acceptance.py | 1 | 1820 | # -*- coding: utf-8 -*-
import sys
import os
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(os.path.realpath(__file__))), '..')))
from HZZ_llvv_ws import helper
import matplotlib.pyplot as plt
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
from scipy import interpolate
import numpy as np
def my_interpolate(file_name, prod='ggH'):
r""" input is the text file for Yields
"""
yields_dic, categories = helper.read_yield_input(file_name)
#print yields_dic
print categories
masses = [300, 400, 500, 600, 700, 800, 900, 1000, 1200]
all_yields_dic = {}
for ich, category in enumerate(categories):
all_yields_dic[category] = list(map((lambda x: yields_dic['Ae_{}{}'.format(prod, x)][ich]), masses))
# plot all acceptances
interp_list = {}
for category, yields in all_yields_dic.iteritems():
f = interpolate.interp1d(masses, yields, 'slinear')
interp_list[category] = f
# don't make plots..
continue
new_masses = np.arange(300, 1200, 10)
new_yields = f(new_masses)
plt.plot(masses, yields, 'o')
plt.plot(new_masses, new_yields, '-')
plt.xlabel('$m_{H}$ [GeV]')
plt.ylabel('Yields for 1 $fb^{-1}$')
plt.savefig('plots/acceptance/acceptance_{}_{}.png'.format(prod, category))
plt.close()
out_text = ""
for mass in range(300, 1210, 10):
out_text += "Ae_{}{} & ".format(prod, mass)
out_text += ' & '.join([np.array_str(interp_list[x](mass)) for x in categories])
out_text += '\n'
with open('yields_{}.txt'.format(prod), 'w') as f:
f.write(out_text)
if __name__ == "__main__":
my_interpolate('inputs/Yields_13TeV.txt', 'ggH')
my_interpolate('inputs/Yields_13TeV.txt', "VBFH")
| mit |
rs2/pandas | pandas/tests/plotting/test_boxplot_method.py | 2 | 18166 | import itertools
import string
import numpy as np
from numpy import random
import pytest
import pandas.util._test_decorators as td
from pandas import DataFrame, MultiIndex, Series, date_range, timedelta_range
import pandas._testing as tm
from pandas.tests.plotting.common import TestPlotBase, _check_plot_works
import pandas.plotting as plotting
""" Test cases for .boxplot method """
@td.skip_if_no_mpl
class TestDataFramePlots(TestPlotBase):
@pytest.mark.slow
def test_boxplot_legacy1(self):
df = DataFrame(
np.random.randn(6, 4),
index=list(string.ascii_letters[:6]),
columns=["one", "two", "three", "four"],
)
df["indic"] = ["foo", "bar"] * 3
df["indic2"] = ["foo", "bar", "foo"] * 2
_check_plot_works(df.boxplot, return_type="dict")
_check_plot_works(df.boxplot, column=["one", "two"], return_type="dict")
# _check_plot_works adds an ax so catch warning. see GH #13188
with tm.assert_produces_warning(UserWarning):
_check_plot_works(df.boxplot, column=["one", "two"], by="indic")
_check_plot_works(df.boxplot, column="one", by=["indic", "indic2"])
with tm.assert_produces_warning(UserWarning):
_check_plot_works(df.boxplot, by="indic")
with tm.assert_produces_warning(UserWarning):
_check_plot_works(df.boxplot, by=["indic", "indic2"])
_check_plot_works(plotting._core.boxplot, data=df["one"], return_type="dict")
_check_plot_works(df.boxplot, notch=1, return_type="dict")
with tm.assert_produces_warning(UserWarning):
_check_plot_works(df.boxplot, by="indic", notch=1)
@pytest.mark.slow
def test_boxplot_legacy2(self):
df = DataFrame(np.random.rand(10, 2), columns=["Col1", "Col2"])
df["X"] = Series(["A", "A", "A", "A", "A", "B", "B", "B", "B", "B"])
df["Y"] = Series(["A"] * 10)
with tm.assert_produces_warning(UserWarning):
_check_plot_works(df.boxplot, by="X")
# When ax is supplied and required number of axes is 1,
# passed ax should be used:
fig, ax = self.plt.subplots()
axes = df.boxplot("Col1", by="X", ax=ax)
ax_axes = ax.axes
assert ax_axes is axes
fig, ax = self.plt.subplots()
axes = df.groupby("Y").boxplot(ax=ax, return_type="axes")
ax_axes = ax.axes
assert ax_axes is axes["A"]
# Multiple columns with an ax argument should use same figure
fig, ax = self.plt.subplots()
with tm.assert_produces_warning(UserWarning):
axes = df.boxplot(
column=["Col1", "Col2"], by="X", ax=ax, return_type="axes"
)
assert axes["Col1"].get_figure() is fig
# When by is None, check that all relevant lines are present in the
# dict
fig, ax = self.plt.subplots()
d = df.boxplot(ax=ax, return_type="dict")
lines = list(itertools.chain.from_iterable(d.values()))
assert len(ax.get_lines()) == len(lines)
@pytest.mark.slow
def test_boxplot_return_type_none(self):
# GH 12216; return_type=None & by=None -> axes
result = self.hist_df.boxplot()
assert isinstance(result, self.plt.Axes)
@pytest.mark.slow
def test_boxplot_return_type_legacy(self):
# API change in https://github.com/pandas-dev/pandas/pull/7096
import matplotlib as mpl # noqa
df = DataFrame(
np.random.randn(6, 4),
index=list(string.ascii_letters[:6]),
columns=["one", "two", "three", "four"],
)
with pytest.raises(ValueError):
df.boxplot(return_type="NOTATYPE")
result = df.boxplot()
self._check_box_return_type(result, "axes")
with tm.assert_produces_warning(False):
result = df.boxplot(return_type="dict")
self._check_box_return_type(result, "dict")
with tm.assert_produces_warning(False):
result = df.boxplot(return_type="axes")
self._check_box_return_type(result, "axes")
with tm.assert_produces_warning(False):
result = df.boxplot(return_type="both")
self._check_box_return_type(result, "both")
@pytest.mark.slow
def test_boxplot_axis_limits(self):
def _check_ax_limits(col, ax):
y_min, y_max = ax.get_ylim()
assert y_min <= col.min()
assert y_max >= col.max()
df = self.hist_df.copy()
df["age"] = np.random.randint(1, 20, df.shape[0])
# One full row
height_ax, weight_ax = df.boxplot(["height", "weight"], by="category")
_check_ax_limits(df["height"], height_ax)
_check_ax_limits(df["weight"], weight_ax)
assert weight_ax._sharey == height_ax
# Two rows, one partial
p = df.boxplot(["height", "weight", "age"], by="category")
height_ax, weight_ax, age_ax = p[0, 0], p[0, 1], p[1, 0]
dummy_ax = p[1, 1]
_check_ax_limits(df["height"], height_ax)
_check_ax_limits(df["weight"], weight_ax)
_check_ax_limits(df["age"], age_ax)
assert weight_ax._sharey == height_ax
assert age_ax._sharey == height_ax
assert dummy_ax._sharey is None
@pytest.mark.slow
def test_boxplot_empty_column(self):
df = DataFrame(np.random.randn(20, 4))
df.loc[:, 0] = np.nan
_check_plot_works(df.boxplot, return_type="axes")
@pytest.mark.slow
def test_figsize(self):
df = DataFrame(np.random.rand(10, 5), columns=["A", "B", "C", "D", "E"])
result = df.boxplot(return_type="axes", figsize=(12, 8))
assert result.figure.bbox_inches.width == 12
assert result.figure.bbox_inches.height == 8
def test_fontsize(self):
df = DataFrame({"a": [1, 2, 3, 4, 5, 6]})
self._check_ticks_props(
df.boxplot("a", fontsize=16), xlabelsize=16, ylabelsize=16
)
def test_boxplot_numeric_data(self):
# GH 22799
df = DataFrame(
{
"a": date_range("2012-01-01", periods=100),
"b": np.random.randn(100),
"c": np.random.randn(100) + 2,
"d": date_range("2012-01-01", periods=100).astype(str),
"e": date_range("2012-01-01", periods=100, tz="UTC"),
"f": timedelta_range("1 days", periods=100),
}
)
ax = df.plot(kind="box")
assert [x.get_text() for x in ax.get_xticklabels()] == ["b", "c"]
@pytest.mark.parametrize(
"colors_kwd, expected",
[
(
dict(boxes="r", whiskers="b", medians="g", caps="c"),
dict(boxes="r", whiskers="b", medians="g", caps="c"),
),
(dict(boxes="r"), dict(boxes="r")),
("r", dict(boxes="r", whiskers="r", medians="r", caps="r")),
],
)
def test_color_kwd(self, colors_kwd, expected):
# GH: 26214
df = DataFrame(random.rand(10, 2))
result = df.boxplot(color=colors_kwd, return_type="dict")
for k, v in expected.items():
assert result[k][0].get_color() == v
@pytest.mark.parametrize(
"dict_colors, msg",
[(dict(boxes="r", invalid_key="r"), "invalid key 'invalid_key'")],
)
def test_color_kwd_errors(self, dict_colors, msg):
# GH: 26214
df = DataFrame(random.rand(10, 2))
with pytest.raises(ValueError, match=msg):
df.boxplot(color=dict_colors, return_type="dict")
@pytest.mark.parametrize(
"props, expected",
[
("boxprops", "boxes"),
("whiskerprops", "whiskers"),
("capprops", "caps"),
("medianprops", "medians"),
],
)
def test_specified_props_kwd(self, props, expected):
# GH 30346
df = DataFrame({k: np.random.random(100) for k in "ABC"})
kwd = {props: dict(color="C1")}
result = df.boxplot(return_type="dict", **kwd)
assert result[expected][0].get_color() == "C1"
@td.skip_if_no_mpl
class TestDataFrameGroupByPlots(TestPlotBase):
@pytest.mark.slow
def test_boxplot_legacy1(self):
grouped = self.hist_df.groupby(by="gender")
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(grouped.boxplot, return_type="axes")
self._check_axes_shape(list(axes.values), axes_num=2, layout=(1, 2))
axes = _check_plot_works(grouped.boxplot, subplots=False, return_type="axes")
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
@pytest.mark.slow
def test_boxplot_legacy2(self):
tuples = zip(string.ascii_letters[:10], range(10))
df = DataFrame(np.random.rand(10, 3), index=MultiIndex.from_tuples(tuples))
grouped = df.groupby(level=1)
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(grouped.boxplot, return_type="axes")
self._check_axes_shape(list(axes.values), axes_num=10, layout=(4, 3))
axes = _check_plot_works(grouped.boxplot, subplots=False, return_type="axes")
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
@pytest.mark.slow
def test_boxplot_legacy3(self):
tuples = zip(string.ascii_letters[:10], range(10))
df = DataFrame(np.random.rand(10, 3), index=MultiIndex.from_tuples(tuples))
grouped = df.unstack(level=1).groupby(level=0, axis=1)
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(grouped.boxplot, return_type="axes")
self._check_axes_shape(list(axes.values), axes_num=3, layout=(2, 2))
axes = _check_plot_works(grouped.boxplot, subplots=False, return_type="axes")
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
@pytest.mark.slow
def test_grouped_plot_fignums(self):
n = 10
weight = Series(np.random.normal(166, 20, size=n))
height = Series(np.random.normal(60, 10, size=n))
with tm.RNGContext(42):
gender = np.random.choice(["male", "female"], size=n)
df = DataFrame({"height": height, "weight": weight, "gender": gender})
gb = df.groupby("gender")
res = gb.plot()
assert len(self.plt.get_fignums()) == 2
assert len(res) == 2
tm.close()
res = gb.boxplot(return_type="axes")
assert len(self.plt.get_fignums()) == 1
assert len(res) == 2
tm.close()
# now works with GH 5610 as gender is excluded
res = df.groupby("gender").hist()
tm.close()
@pytest.mark.slow
def test_grouped_box_return_type(self):
df = self.hist_df
# old style: return_type=None
result = df.boxplot(by="gender")
assert isinstance(result, np.ndarray)
self._check_box_return_type(
result, None, expected_keys=["height", "weight", "category"]
)
# now for groupby
result = df.groupby("gender").boxplot(return_type="dict")
self._check_box_return_type(result, "dict", expected_keys=["Male", "Female"])
columns2 = "X B C D A G Y N Q O".split()
df2 = DataFrame(random.randn(50, 10), columns=columns2)
categories2 = "A B C D E F G H I J".split()
df2["category"] = categories2 * 5
for t in ["dict", "axes", "both"]:
returned = df.groupby("classroom").boxplot(return_type=t)
self._check_box_return_type(returned, t, expected_keys=["A", "B", "C"])
returned = df.boxplot(by="classroom", return_type=t)
self._check_box_return_type(
returned, t, expected_keys=["height", "weight", "category"]
)
returned = df2.groupby("category").boxplot(return_type=t)
self._check_box_return_type(returned, t, expected_keys=categories2)
returned = df2.boxplot(by="category", return_type=t)
self._check_box_return_type(returned, t, expected_keys=columns2)
@pytest.mark.slow
def test_grouped_box_layout(self):
df = self.hist_df
msg = "Layout of 1x1 must be larger than required size 2"
with pytest.raises(ValueError, match=msg):
df.boxplot(column=["weight", "height"], by=df.gender, layout=(1, 1))
msg = "The 'layout' keyword is not supported when 'by' is None"
with pytest.raises(ValueError, match=msg):
df.boxplot(
column=["height", "weight", "category"],
layout=(2, 1),
return_type="dict",
)
msg = "At least one dimension of layout must be positive"
with pytest.raises(ValueError, match=msg):
df.boxplot(column=["weight", "height"], by=df.gender, layout=(-1, -1))
# _check_plot_works adds an ax so catch warning. see GH #13188
with tm.assert_produces_warning(UserWarning):
box = _check_plot_works(
df.groupby("gender").boxplot, column="height", return_type="dict"
)
self._check_axes_shape(self.plt.gcf().axes, axes_num=2, layout=(1, 2))
with tm.assert_produces_warning(UserWarning):
box = _check_plot_works(
df.groupby("category").boxplot, column="height", return_type="dict"
)
self._check_axes_shape(self.plt.gcf().axes, axes_num=4, layout=(2, 2))
# GH 6769
with tm.assert_produces_warning(UserWarning):
box = _check_plot_works(
df.groupby("classroom").boxplot, column="height", return_type="dict"
)
self._check_axes_shape(self.plt.gcf().axes, axes_num=3, layout=(2, 2))
# GH 5897
axes = df.boxplot(
column=["height", "weight", "category"], by="gender", return_type="axes"
)
self._check_axes_shape(self.plt.gcf().axes, axes_num=3, layout=(2, 2))
for ax in [axes["height"]]:
self._check_visible(ax.get_xticklabels(), visible=False)
self._check_visible([ax.xaxis.get_label()], visible=False)
for ax in [axes["weight"], axes["category"]]:
self._check_visible(ax.get_xticklabels())
self._check_visible([ax.xaxis.get_label()])
box = df.groupby("classroom").boxplot(
column=["height", "weight", "category"], return_type="dict"
)
self._check_axes_shape(self.plt.gcf().axes, axes_num=3, layout=(2, 2))
with tm.assert_produces_warning(UserWarning):
box = _check_plot_works(
df.groupby("category").boxplot,
column="height",
layout=(3, 2),
return_type="dict",
)
self._check_axes_shape(self.plt.gcf().axes, axes_num=4, layout=(3, 2))
with tm.assert_produces_warning(UserWarning):
box = _check_plot_works(
df.groupby("category").boxplot,
column="height",
layout=(3, -1),
return_type="dict",
)
self._check_axes_shape(self.plt.gcf().axes, axes_num=4, layout=(3, 2))
box = df.boxplot(
column=["height", "weight", "category"], by="gender", layout=(4, 1)
)
self._check_axes_shape(self.plt.gcf().axes, axes_num=3, layout=(4, 1))
box = df.boxplot(
column=["height", "weight", "category"], by="gender", layout=(-1, 1)
)
self._check_axes_shape(self.plt.gcf().axes, axes_num=3, layout=(3, 1))
box = df.groupby("classroom").boxplot(
column=["height", "weight", "category"], layout=(1, 4), return_type="dict"
)
self._check_axes_shape(self.plt.gcf().axes, axes_num=3, layout=(1, 4))
box = df.groupby("classroom").boxplot( # noqa
column=["height", "weight", "category"], layout=(1, -1), return_type="dict"
)
self._check_axes_shape(self.plt.gcf().axes, axes_num=3, layout=(1, 3))
@pytest.mark.slow
def test_grouped_box_multiple_axes(self):
# GH 6970, GH 7069
df = self.hist_df
# check warning to ignore sharex / sharey
# this check should be done in the first function which
# passes multiple axes to plot, hist or boxplot
# location should be changed if other test is added
# which has earlier alphabetical order
with tm.assert_produces_warning(UserWarning):
fig, axes = self.plt.subplots(2, 2)
df.groupby("category").boxplot(column="height", return_type="axes", ax=axes)
self._check_axes_shape(self.plt.gcf().axes, axes_num=4, layout=(2, 2))
fig, axes = self.plt.subplots(2, 3)
with tm.assert_produces_warning(UserWarning):
returned = df.boxplot(
column=["height", "weight", "category"],
by="gender",
return_type="axes",
ax=axes[0],
)
returned = np.array(list(returned.values))
self._check_axes_shape(returned, axes_num=3, layout=(1, 3))
tm.assert_numpy_array_equal(returned, axes[0])
assert returned[0].figure is fig
# draw on second row
with tm.assert_produces_warning(UserWarning):
returned = df.groupby("classroom").boxplot(
column=["height", "weight", "category"], return_type="axes", ax=axes[1]
)
returned = np.array(list(returned.values))
self._check_axes_shape(returned, axes_num=3, layout=(1, 3))
tm.assert_numpy_array_equal(returned, axes[1])
assert returned[0].figure is fig
with pytest.raises(ValueError):
fig, axes = self.plt.subplots(2, 3)
# pass different number of axes from required
with tm.assert_produces_warning(UserWarning):
axes = df.groupby("classroom").boxplot(ax=axes)
def test_fontsize(self):
df = DataFrame({"a": [1, 2, 3, 4, 5, 6], "b": [0, 0, 0, 1, 1, 1]})
self._check_ticks_props(
df.boxplot("a", by="b", fontsize=16), xlabelsize=16, ylabelsize=16
)
| bsd-3-clause |
Mogeng/IOHMM | tests/test_OLS.py | 2 | 33960 | from __future__ import print_function
from __future__ import division
# import json
from past.utils import old_div
import unittest
import numpy as np
import statsmodels.api as sm
from IOHMM import OLS
# //TODO sample weight all zero
# Corner cases
# General
# 1. sample_weight is all zero
# 2. sample_weight is all one
# 3. sample_weight is a scale of all one
# 4. sample_weight is mixed of 0 and 1
# 6. when number of data is 1/or very small, less than the number of features
# 7. standard dataset compare with sklearn/statsmodels
# 8. output dimensions
# 9. collinearty in X
# 10. to/from json
# MultivariateOLS
# 1. Y is not column/row independent
# Discrete/CrossEntropyMNL
# 1. number of class is 1
# 2. number of class is 2
class UnivariateOLSTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.data_longley = sm.datasets.longley.load()
def test_ols(self):
self.model = OLS(
solver='pinv', fit_intercept=True, est_stderr=True,
reg_method=None, alpha=0, l1_ratio=0, tol=1e-4, max_iter=100,
coef=None, stderr=None, dispersion=None)
self.model.fit(self.data_longley.exog, self.data_longley.endog)
# coefficient
self.assertEqual(self.model.coef.shape, (1, 7))
np.testing.assert_array_almost_equal(
self.model.coef,
np.array([-3482258.63459582, 15.0618722713733, -0.358191792925910E-01,
-2.02022980381683, -1.03322686717359, -0.511041056535807E-01,
1829.15146461355]).reshape(1, -1),
decimal=3)
# std.err of coefficient (calibrated by df_resid)
self.assertEqual(self.model.stderr.shape, (1, 7))
np.testing.assert_array_almost_equal(
old_div(self.model.stderr, np.sqrt(old_div(9., self.data_longley.exog.shape[0]))),
np.array([890420.383607373, 84.9149257747669, 0.03349,
0.488399681651699, 0.214274163161675, 0.226073200069370,
455.478499142212]).reshape(1, -1),
decimal=2)
# scale
self.assertEqual(self.model.dispersion.shape, (1, 1))
np.testing.assert_array_almost_equal(
old_div(self.model.dispersion, (old_div(9., self.data_longley.exog.shape[0]))),
np.array([[92936.0061673238]]),
decimal=3)
# predict
np.testing.assert_array_almost_equal(
self.data_longley.endog.reshape(-1, 1) - self.model.predict(self.data_longley.exog),
np.array([267.34003, -94.01394, 46.28717, -410.11462,
309.71459, -249.31122, -164.04896, -13.18036, 14.30477, 455.39409,
-17.26893, -39.05504, -155.54997, -85.67131, 341.93151,
-206.75783]).reshape(-1, 1),
decimal=3)
# loglike/_per_sample
self.assertAlmostEqual(
self.model.loglike(self.data_longley.exog, self.data_longley.endog),
-109.61743480849013,
places=3)
# to_json
json_dict = self.model.to_json('./tests/linear_models/OLS/UnivariateOLS/')
self.assertEqual(json_dict['properties']['solver'], 'pinv')
# from_json
self.model_from_json = OLS.from_json(json_dict)
np.testing.assert_array_almost_equal(
self.model.coef,
self.model_from_json.coef,
decimal=3)
np.testing.assert_array_almost_equal(
self.model.stderr,
self.model_from_json.stderr,
decimal=3)
self.assertEqual(
self.model.dispersion,
self.model_from_json.dispersion)
def test_ols_l1_regularized(self):
# sklearn elastic net and l1 does not take sample_weights, will not test
pass
def test_ols_l2_regularized(self):
# there is a bug in sklearn with weights, it can only use list right now
self.model = OLS(
solver='auto', fit_intercept=True, est_stderr=True,
reg_method='l2', alpha=0.1, l1_ratio=0, tol=1e-4, max_iter=100,
coef=None, stderr=None, dispersion=None)
self.model.fit(self.data_longley.exog, self.data_longley.endog, sample_weight=0.5)
# coefficient
np.testing.assert_array_almost_equal(
self.model.coef,
np.array([-2.0172203, -52.14364269, 0.07089677, -0.42552125,
-0.57305292, -0.41272483, 48.32484052]).reshape(1, -1),
decimal=3)
# std.err of coefficient (calibrated by df_resid)
self.assertTrue(self.model.stderr is None)
# scale
self.assertEqual(self.model.dispersion.shape, (1, 1))
np.testing.assert_array_almost_equal(
old_div(self.model.dispersion, (old_div(9., self.data_longley.exog.shape[0]))),
np.array([[250870.081]]),
decimal=3)
# predict
np.testing.assert_array_almost_equal(
self.data_longley.endog.reshape(-1, 1) - self.model.predict(self.data_longley.exog),
np.array([[280.31871146],
[-131.6981265],
[90.64414685],
[-400.10244445],
[-440.59604167],
[-543.88595187],
[200.70483416],
[215.88629903],
[74.9456573],
[913.85128645],
[424.15996133],
[-9.5797488],
[-360.96841852],
[27.214226],
[150.87705909],
[-492.17489392]]),
decimal=3)
# loglike/_per_sample
self.assertAlmostEqual(
self.model.loglike(self.data_longley.exog, self.data_longley.endog),
-117.561627187,
places=3)
self.assertEqual(
self.model.loglike_per_sample(self.data_longley.exog, self.data_longley.endog).shape,
(16, ))
def test_ols_elastic_net_regularized(self):
# sklearn elastic net and l1 does not take sample_weights, will not test
pass
def test_ols_sample_weight_all_half(self):
self.model = OLS(
solver='pinv', fit_intercept=True, est_stderr=True,
reg_method=None, alpha=0, l1_ratio=0, tol=1e-4, max_iter=100,
coef=None, stderr=None, dispersion=None)
self.model.fit(self.data_longley.exog, self.data_longley.endog, sample_weight=0.5)
# coefficient
np.testing.assert_array_almost_equal(
self.model.coef,
np.array((-3482258.63459582, 15.0618722713733, -0.358191792925910E-01,
-2.02022980381683, -1.03322686717359, -0.511041056535807E-01,
1829.15146461355)).reshape(1, -1),
decimal=3)
# std.err of coefficient (calibrated by df_resid)
np.testing.assert_array_almost_equal(
old_div(self.model.stderr, np.sqrt(old_div(9., self.data_longley.exog.shape[0]))),
np.array((890420.383607373, 84.9149257747669, 0.334910077722432E-01,
0.488399681651699, 0.214274163161675, 0.226073200069370,
455.478499142212)).reshape(1, -1),
decimal=1)
# scale
np.testing.assert_array_almost_equal(
old_div(self.model.dispersion, (old_div(9., self.data_longley.exog.shape[0]))),
np.array((92936.0061673238)))
# predict
np.testing.assert_array_almost_equal(
self.data_longley.endog.reshape(-1, 1) - self.model.predict(self.data_longley.exog),
np.array((267.34003, -94.01394, 46.28717, -410.11462,
309.71459, -249.31122, -164.04896, -13.18036, 14.30477, 455.39409,
-17.26893, -39.05504, -155.54997, -85.67131, 341.93151,
-206.75783)).reshape(-1, 1),
decimal=3)
# loglike/_per_sample
self.assertAlmostEqual(
self.model.loglike(self.data_longley.exog, self.data_longley.endog),
-109.61743480849013,
places=3)
self.assertEqual(
self.model.loglike_per_sample(self.data_longley.exog, self.data_longley.endog).shape,
(16, ))
def test_ols_sample_weight_all_zero(self):
self.model = OLS(
solver='pinv', fit_intercept=True, est_stderr=True,
reg_method=None, alpha=0, l1_ratio=0, tol=1e-4, max_iter=100,
coef=None, stderr=None, dispersion=None)
self.assertRaises(ValueError, self.model.fit,
self.data_longley.exog, self.data_longley.endog, 0)
def test_ols_sample_weight_half_zero_half_one(self):
self.model = OLS(
solver='pinv', fit_intercept=True, est_stderr=True,
reg_method=None, alpha=0, l1_ratio=0, tol=1e-4, max_iter=100,
coef=None, stderr=None, dispersion=None)
len_half = 8
self.model.fit(self.data_longley.exog, self.data_longley.endog,
sample_weight=np.array([1] * len_half +
[0] * (self.data_longley.exog.shape[0] - len_half)))
self.model_half = OLS(
solver='pinv', fit_intercept=True, est_stderr=True,
reg_method=None, alpha=0, l1_ratio=0, max_iter=100,
coef=None, stderr=None, dispersion=None)
self.model_half.fit(self.data_longley.exog[:len_half], self.data_longley.endog[:len_half])
# coefficient
np.testing.assert_array_almost_equal(
self.model.coef,
self.model_half.coef,
decimal=3)
# std.err
np.testing.assert_array_almost_equal(
self.model.stderr,
self.model_half.stderr,
decimal=3)
# scale
np.testing.assert_array_almost_equal(
self.model.dispersion,
self.model_half.dispersion,
decimal=3)
# corner cases
def test_ols_one_data_point(self):
self.model = OLS(
solver='pinv', fit_intercept=True, est_stderr=True,
reg_method=None, alpha=0, l1_ratio=0, tol=1e-4, max_iter=100,
coef=None, stderr=None, dispersion=None)
self.model.fit(self.data_longley.exog[0:1, :],
self.data_longley.endog[0:1, ], sample_weight=0.5)
# coef
self.assertEqual(self.model.coef.shape, (1, 7))
# scale
np.testing.assert_array_almost_equal(self.model.dispersion, np.array([[0]]))
# loglike_per_sample
np.testing.assert_array_equal(self.model.loglike_per_sample(
self.data_longley.exog[0:1, :], self.data_longley.endog[0:1, ]), np.array([0]))
np.testing.assert_array_almost_equal(self.model.loglike_per_sample(
np.array(self.data_longley.exog[0:1, :].tolist() * 6),
np.array([60323, 0, 60323, 60322, 60322, 60323])),
np.array([0, -np.Infinity, 0, -np.Infinity, -np.Infinity, 0]), decimal=3)
def test_ols_multicolinearty(self):
self.model_col = OLS(
solver='pinv', fit_intercept=False, est_stderr=True,
reg_method=None, alpha=0, l1_ratio=0, tol=1e-4, max_iter=100,
coef=None, stderr=None, dispersion=None)
X = np.hstack([self.data_longley.exog[:, 0:1], self.data_longley.exog[:, 0:1]])
self.model_col.fit(X,
self.data_longley.endog, sample_weight=0.8)
self.model = OLS(
solver='pinv', fit_intercept=False, est_stderr=True,
reg_method=None, alpha=0, l1_ratio=0, tol=1e-4, max_iter=100,
coef=None, stderr=None, dispersion=None)
self.model.fit(self.data_longley.exog[:, 0:1],
self.data_longley.endog, sample_weight=0.8)
# coef
np.testing.assert_array_almost_equal(
self.model_col.coef, np.array([319.47969664, 319.47969664]).reshape(1, -1), decimal=3)
# stderr
self.assertEqual(self.model_col.stderr, None)
# scale
np.testing.assert_array_almost_equal(
self.model_col.dispersion, self.model.dispersion, decimal=3)
# loglike_per_sample
np.testing.assert_array_almost_equal(
self.model_col.loglike_per_sample(X, self.data_longley.endog),
self.model.loglike_per_sample(self.data_longley.exog[:, 0:1],
self.data_longley.endog), decimal=3)
np.testing.assert_array_almost_equal(
self.model_col.predict(X),
self.model.predict(self.data_longley.exog[:, 0:1]), decimal=3)
class IndependentMultivariateOLSTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
np.random.seed(0)
cls.X = np.random.normal(size=(1000, 1))
cls.Y = np.random.normal(size=(cls.X.shape[0], 2))
def test_ols(self):
self.model = OLS(
solver='pinv', fit_intercept=True, est_stderr=True,
reg_method=None, alpha=0, l1_ratio=0, tol=1e-4, max_iter=100,
coef=None, stderr=None, dispersion=None)
self.model.fit(self.X, self.Y)
# coefficient
self.assertEqual(self.model.coef.shape, (2, 2))
np.testing.assert_array_almost_equal(
self.model.coef,
np.array([[-0.02924966, -0.03484827],
[-0.00978688, 0.00336316]]).reshape(2, -1),
decimal=3)
# std.err of coefficient (calibrated by df_resid)
self.assertEqual(self.model.stderr.shape, (2, 2))
np.testing.assert_array_almost_equal(
self.model.stderr,
np.array([[0.03083908, 0.03121143],
[0.03002101, 0.03038348]]).reshape(2, -1),
decimal=2)
# scale
self.assertEqual(self.model.dispersion.shape, (2, 2))
np.testing.assert_array_almost_equal(
self.model.dispersion,
np.array([[0.94905363, 0.0164185],
[0.0164185, 0.89937019]]),
decimal=3)
# loglike/_per_sample
self.assertAlmostEqual(
self.model.loglike(self.X, self.Y),
-2758.54387369,
places=3)
# to_json
json_dict = self.model.to_json('./tests/linear_models/OLS/MultivariateOLS/')
self.assertEqual(json_dict['properties']['solver'], 'pinv')
# from_json
self.model_from_json = OLS.from_json(json_dict)
np.testing.assert_array_almost_equal(
self.model.coef,
self.model_from_json.coef,
decimal=3)
np.testing.assert_array_almost_equal(
self.model.stderr,
self.model_from_json.stderr,
decimal=3)
np.testing.assert_array_almost_equal(
self.model.dispersion,
self.model_from_json.dispersion,
decimal=3)
def test_ols_l2_regularized(self):
self.model = OLS(
solver='auto', fit_intercept=True, est_stderr=True,
reg_method='l2', alpha=0.1, l1_ratio=1, tol=1e-4, max_iter=100,
coef=None, stderr=None, dispersion=None)
self.model.fit(self.X, self.Y)
# coefficient
self.assertEqual(self.model.coef.shape, (2, 2))
np.testing.assert_array_almost_equal(
self.model.coef,
np.array([[-0.0292465, -0.03484456],
[-0.00978591, 0.00336286]]).reshape(2, -1),
decimal=3)
# std.err of coefficient (calibrated by df_resid)
self.assertTrue(self.model.stderr is None)
# scale
self.assertEqual(self.model.dispersion.shape, (2, 2))
np.testing.assert_array_almost_equal(
self.model.dispersion,
np.array([[0.94905363, 0.0164185],
[0.0164185, 0.89937019]]),
decimal=3)
# loglike/_per_sample
self.assertAlmostEqual(
self.model.loglike(self.X, self.Y),
-2758.5438737,
places=3)
def test_ols_l1_regularized(self):
# sklearn l1 and elstic net does not support sample weight
pass
def test_ols_sample_weight_all_half(self):
self.model = OLS(
solver='pinv', fit_intercept=True, est_stderr=True,
reg_method=None, alpha=0, l1_ratio=0, tol=1e-4, max_iter=100,
coef=None, stderr=None, dispersion=None)
self.model.fit(self.X, self.Y, sample_weight=0.5)
# coefficient
self.assertEqual(self.model.coef.shape, (2, 2))
np.testing.assert_array_almost_equal(
self.model.coef,
np.array([[-0.02924966, -0.03484827],
[-0.00978688, 0.00336316]]).reshape(2, -1),
decimal=3)
# std.err of coefficient (calibrated by df_resid)
self.assertEqual(self.model.stderr.shape, (2, 2))
np.testing.assert_array_almost_equal(
self.model.stderr,
np.array([[0.03083908, 0.03121143],
[0.03002101, 0.03038348]]).reshape(2, -1),
decimal=2)
# scale
self.assertEqual(self.model.dispersion.shape, (2, 2))
np.testing.assert_array_almost_equal(
self.model.dispersion,
np.array([[0.94905363, 0.0164185],
[0.0164185, 0.89937019]]),
decimal=3)
# loglike/_per_sample
self.assertAlmostEqual(
self.model.loglike(self.X, self.Y, 0.5),
old_div(-2758.54387369, 2.),
places=3)
self.assertEqual(
self.model.loglike_per_sample(self.X, self.Y).shape,
(1000, ))
def test_ols_sample_weight_all_zero(self):
self.model = OLS(
solver='pinv', fit_intercept=True, est_stderr=True,
reg_method=None, alpha=0, l1_ratio=0, tol=1e-4, max_iter=100,
coef=None, stderr=None, dispersion=None)
self.assertRaises(ValueError, self.model.fit, self.X, self.Y, 0)
def test_ols_sample_weight_half_zero_half_one(self):
self.model = OLS(
solver='pinv', fit_intercept=True, est_stderr=True,
reg_method=None, alpha=0, l1_ratio=0, tol=1e-4, max_iter=100,
coef=None, stderr=None, dispersion=None)
len_half = 500
self.model.fit(self.X, self.Y,
sample_weight=np.array([1] * len_half +
[0] * (self.X.shape[0] - len_half)))
self.model_half = OLS(
solver='pinv', fit_intercept=True, est_stderr=True,
reg_method=None, alpha=0, l1_ratio=0, max_iter=100,
coef=None, stderr=None, dispersion=None)
self.model_half.fit(self.X[:len_half], self.Y[:len_half])
# coefficient
np.testing.assert_array_almost_equal(
self.model.coef,
self.model_half.coef,
decimal=3)
# std.err
np.testing.assert_array_almost_equal(
self.model.stderr,
self.model_half.stderr,
decimal=3)
# scale
np.testing.assert_array_almost_equal(
self.model.dispersion,
self.model_half.dispersion,
decimal=3)
# corner cases
def test_ols_one_data_point(self):
self.model = OLS(
solver='pinv', fit_intercept=True, est_stderr=True,
reg_method=None, alpha=0, l1_ratio=0, tol=1e-4, max_iter=100,
coef=None, stderr=None, dispersion=None)
self.model.fit(self.X[0:1, :],
self.Y[0:1, ], sample_weight=0.5)
# coef
self.assertEqual(self.model.coef.shape, (2, 2))
# scale
np.testing.assert_array_almost_equal(
self.model.dispersion, np.array([[0, 0], [0, 0]]), decimal=6)
# loglike_per_sample
np.testing.assert_array_equal(self.model.loglike_per_sample(
self.X[0:1, :], self.Y[0:1, ]), np.array([0]))
np.testing.assert_array_almost_equal(self.model.loglike_per_sample(
np.array(self.X[0:1, :].tolist() * 6),
np.array([self.Y[0, ], self.Y[1, ], self.Y[0, ],
self.Y[1, ], self.Y[1, ], self.Y[0, ]])),
np.array([0, -np.Infinity, 0, -np.Infinity, -np.Infinity, 0]), decimal=3)
def test_ols_multicolinearty(self):
self.model_col = OLS(
solver='pinv', fit_intercept=False, est_stderr=True,
reg_method=None, alpha=0, l1_ratio=0, tol=1e-4, max_iter=100,
coef=None, stderr=None, dispersion=None)
X = np.hstack([self.X[:, 0:1], self.X[:, 0:1]])
self.model_col.fit(X,
self.Y, sample_weight=0.5)
self.model = OLS(
solver='pinv', fit_intercept=False, est_stderr=True,
reg_method=None, alpha=0, l1_ratio=0, tol=1e-4, max_iter=100,
coef=None, stderr=None, dispersion=None)
self.model.fit(self.X[:, 0:1],
self.Y, sample_weight=0.5)
# stderr
self.assertEqual(self.model_col.stderr, None)
# scale
np.testing.assert_array_almost_equal(
self.model_col.dispersion, self.model.dispersion, decimal=3)
# loglike_per_sample
np.testing.assert_array_almost_equal(
self.model_col.loglike_per_sample(X, self.Y),
self.model.loglike_per_sample(self.X[:, 0:1],
self.Y), decimal=0)
np.testing.assert_array_almost_equal(
self.model_col.predict(X),
self.model.predict(self.X[:, 0:1]), decimal=1)
class PerfectCorrelationMultivariateOLSTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
np.random.seed(0)
cls.data_longley = sm.datasets.longley.load()
cls.X = cls.data_longley.exog
cls.Y = np.hstack((cls.data_longley.endog.reshape(-1, 1),
cls.data_longley.endog.reshape(-1, 1)))
def test_ols(self):
self.model = OLS(
solver='auto', fit_intercept=True, est_stderr=True,
reg_method=None, alpha=0, l1_ratio=0, tol=1e-4, max_iter=100,
coef=None, stderr=None, dispersion=None)
self.model.fit(self.X, self.Y)
# coefficient
self.assertEqual(self.model.coef.shape, (2, 7))
np.testing.assert_array_almost_equal(
self.model.coef,
np.array([[-3482258.63459582, 15.0618722713733, -0.358191792925910E-01,
-2.02022980381683, -1.03322686717359, -0.511041056535807E-01,
1829.15146461355],
[-3482258.63459582, 15.0618722713733, -0.358191792925910E-01,
-2.02022980381683, -1.03322686717359, -0.511041056535807E-01,
1829.15146461355]]).reshape(2, -1),
decimal=3)
# std.err of coefficient (calibrated by df_resid)
self.assertEqual(self.model.stderr.shape, (2, 7))
np.testing.assert_array_almost_equal(
old_div(self.model.stderr, np.sqrt(old_div(9., self.data_longley.exog.shape[0]))),
np.array([[890420.383607373, 84.9149257747669, 0.03349,
0.488399681651699, 0.214274163161675, 0.226073200069370,
455.478499142212],
[890420.383607373, 84.9149257747669, 0.03349,
0.488399681651699, 0.214274163161675, 0.226073200069370,
455.478499142212]]).reshape(2, -1),
decimal=2)
# scale
self.assertEqual(self.model.dispersion.shape, (2, 2))
np.testing.assert_array_almost_equal(
old_div(self.model.dispersion, (old_div(9., self.data_longley.exog.shape[0]))),
np.array([[92936.0061673238, 92936.0061673238],
[92936.0061673238, 92936.0061673238]]),
decimal=3)
# predict
np.testing.assert_array_almost_equal(
self.Y - self.model.predict(self.X),
np.hstack((np.array([267.34003, -94.01394, 46.28717, -410.11462,
309.71459, -249.31122, -164.04896, -13.18036, 14.30477, 455.39409,
-17.26893, -39.05504, -155.54997, -85.67131, 341.93151,
-206.75783]).reshape(-1, 1),
np.array([267.34003, -94.01394, 46.28717, -410.11462,
309.71459, -249.31122, -164.04896, -13.18036, 14.30477, 455.39409,
-17.26893, -39.05504, -155.54997, -85.67131, 341.93151,
-206.75783]).reshape(-1, 1))),
decimal=3)
# loglike/_per_sample
self.assertRaises(ValueError,
self.model.loglike_per_sample, self.X, self.Y)
def test_ols_l1_regularized(self):
# sklearn elastic net and l1 does not take sample_weights, will not test
pass
def test_ols_l2_regularized(self):
# there is a bug in sklearn with weights, it can only use list right now
self.model = OLS(
solver='auto', fit_intercept=True, est_stderr=True,
reg_method='l2', alpha=0.1, l1_ratio=0, tol=1e-4, max_iter=100,
coef=None, stderr=None, dispersion=None)
self.model.fit(self.X, self.Y, sample_weight=0.5)
# coefficient
np.testing.assert_array_almost_equal(
self.model.coef,
np.array([[-2.0172203, -52.14364269, 0.07089677, -0.42552125,
-0.57305292, -0.41272483, 48.32484052],
[-2.0172203, -52.14364269, 0.07089677, -0.42552125,
-0.57305292, -0.41272483, 48.32484052]]).reshape(2, -1),
decimal=3)
# std.err of coefficient (calibrated by df_resid)
self.assertTrue(self.model.stderr is None)
# scale
self.assertEqual(self.model.dispersion.shape, (2, 2))
np.testing.assert_array_almost_equal(
old_div(self.model.dispersion, (old_div(9., self.data_longley.exog.shape[0]))),
np.array([[250870.081, 250870.081],
[250870.081, 250870.081]]),
decimal=3)
# predict
res = np.array([[280.31871146],
[-131.6981265],
[90.64414685],
[-400.10244445],
[-440.59604167],
[-543.88595187],
[200.70483416],
[215.88629903],
[74.9456573],
[913.85128645],
[424.15996133],
[-9.5797488],
[-360.96841852],
[27.214226],
[150.87705909],
[-492.17489392]])
np.testing.assert_array_almost_equal(
self.Y - self.model.predict(self.X),
np.hstack((res, res)),
decimal=3)
# loglike/_per_sample
self.assertRaises(ValueError,
self.model.loglike, self.X, self.Y)
def test_ols_elastic_net_regularized(self):
# sklearn elastic net and l1 does not take sample_weights, will not test
pass
def test_ols_sample_weight_all_half(self):
self.model = OLS(
solver='pinv', fit_intercept=True, est_stderr=True,
reg_method=None, alpha=0, l1_ratio=0, tol=1e-4, max_iter=100,
coef=None, stderr=None, dispersion=None)
self.model.fit(self.X, self.Y, sample_weight=0.5)
# coefficient
np.testing.assert_array_almost_equal(
self.model.coef,
np.array(((-3482258.63459582, 15.0618722713733, -0.358191792925910E-01,
-2.02022980381683, -1.03322686717359, -0.511041056535807E-01,
1829.15146461355),
(-3482258.63459582, 15.0618722713733, -0.358191792925910E-01,
-2.02022980381683, -1.03322686717359, -0.511041056535807E-01,
1829.15146461355))).reshape(2, -1),
decimal=3)
# std.err of coefficient (calibrated by df_resid)
np.testing.assert_array_almost_equal(
old_div(self.model.stderr, np.sqrt(old_div(9., self.data_longley.exog.shape[0]))),
np.array(((890420.383607373, 84.9149257747669, 0.334910077722432E-01,
0.488399681651699, 0.214274163161675, 0.226073200069370,
455.478499142212),
(890420.383607373, 84.9149257747669, 0.334910077722432E-01,
0.488399681651699, 0.214274163161675, 0.226073200069370,
455.478499142212))).reshape(2, -1),
decimal=1)
# scale
np.testing.assert_array_almost_equal(
old_div(self.model.dispersion, (old_div(9., self.data_longley.exog.shape[0]))),
np.array(((92936.0061673238, 92936.0061673238),
(92936.0061673238, 92936.0061673238))),
decimal=3)
# predict
res = np.array((267.34003, -94.01394, 46.28717, -410.11462,
309.71459, -249.31122, -164.04896, -13.18036, 14.30477, 455.39409,
-17.26893, -39.05504, -155.54997, -85.67131, 341.93151,
-206.75783)).reshape(-1, 1)
np.testing.assert_array_almost_equal(
self.Y - self.model.predict(self.X),
np.hstack((res, res)),
decimal=3)
# loglike/_per_sample
self.assertRaises(ValueError,
self.model.loglike, self.X, self.Y)
def test_ols_sample_weight_all_zero(self):
self.model = OLS(
solver='pinv', fit_intercept=True, est_stderr=True,
reg_method=None, alpha=0, l1_ratio=0, tol=1e-4, max_iter=100,
coef=None, stderr=None, dispersion=None)
self.assertRaises(ValueError, self.model.fit, self.X, self.Y, 0)
def test_ols_sample_weight_half_zero_half_one(self):
self.model = OLS(
solver='pinv', fit_intercept=True, est_stderr=True,
reg_method=None, alpha=0, l1_ratio=0, tol=1e-4, max_iter=100,
coef=None, stderr=None, dispersion=None)
len_half = 8
self.model.fit(self.X, self.Y,
sample_weight=np.array([1] * len_half +
[0] * (self.data_longley.exog.shape[0] - len_half)))
self.model_half = OLS(
solver='pinv', fit_intercept=True, est_stderr=True,
reg_method=None, alpha=0, l1_ratio=0, max_iter=100,
coef=None, stderr=None, dispersion=None)
self.model_half.fit(self.X[:len_half], self.Y[:len_half])
# coefficient
np.testing.assert_array_almost_equal(
self.model.coef,
self.model_half.coef,
decimal=3)
# std.err
np.testing.assert_array_almost_equal(
self.model.stderr,
self.model_half.stderr,
decimal=3)
# scale
np.testing.assert_array_almost_equal(
self.model.dispersion,
self.model_half.dispersion,
decimal=3)
# corner cases
def test_ols_one_data_point(self):
self.model = OLS(
solver='pinv', fit_intercept=True, est_stderr=True,
reg_method=None, alpha=0, l1_ratio=0, tol=1e-4, max_iter=100,
coef=None, stderr=None, dispersion=None)
self.model.fit(self.X[0:1, :],
self.Y[0:1, ], sample_weight=0.5)
# coef
self.assertEqual(self.model.coef.shape, (2, 7))
# scale
np.testing.assert_array_almost_equal(
self.model.dispersion, np.array([[0, 0], [0, 0]]), decimal=6)
# loglike_per_sample
np.testing.assert_array_equal(self.model.loglike_per_sample(
self.X[0:1, :], self.Y[0:1, ]), np.array([0]))
np.testing.assert_array_almost_equal(self.model.loglike_per_sample(
np.array(self.X[0:1, :].tolist() * 6),
np.array([[60323, 60323], [0, 60323], [60323, 60323],
[60322, 60323], [60322, 60322], [60323, 60323]])),
np.array([0, -np.Infinity, 0, -np.Infinity, -np.Infinity, 0]), decimal=3)
def test_ols_multicolinearty(self):
self.model_col = OLS(
solver='pinv', fit_intercept=False, est_stderr=True,
reg_method=None, alpha=0, l1_ratio=0, tol=1e-4, max_iter=100,
coef=None, stderr=None, dispersion=None)
X = np.hstack([self.X[:, 0:1], self.X[:, 0:1]])
self.model_col.fit(X,
self.Y, sample_weight=0.8)
self.model = OLS(
solver='pinv', fit_intercept=False, est_stderr=True,
reg_method=None, alpha=0, l1_ratio=0, tol=1e-4, max_iter=100,
coef=None, stderr=None, dispersion=None)
self.model.fit(self.X[:, 0:1],
self.Y, sample_weight=0.8)
# coef
np.testing.assert_array_almost_equal(
self.model_col.coef, np.array([[319.47969664, 319.47969664],
[319.47969664, 319.47969664]]).reshape(2, -1), decimal=3)
# stderr
self.assertEqual(self.model_col.stderr, None)
# scale
np.testing.assert_array_almost_equal(
self.model_col.dispersion, self.model.dispersion, decimal=3)
# loglike_per_sample
self.assertRaises(ValueError,
self.model_col.loglike, X, self.Y)
np.testing.assert_array_almost_equal(
self.model_col.predict(X),
self.model.predict(self.X[:, 0:1]), decimal=3)
| bsd-3-clause |
astronomeara/xastropy-old | xastropy/spec/analysis.py | 1 | 4788 | """
#;+
#; NAME:
#; analysis
#; Version 1.0
#;
#; PURPOSE:
#; Module for Analysis of Spectra
#; 07-Sep-2014 by JXP
#;-
#;------------------------------------------------------------------------------
"""
from __future__ import print_function, absolute_import, division, unicode_literals
import barak
import xastropy
import numpy as np
import matplotlib.pyplot as plt
import pdb
from astropy import constants as const
import xastropy.atomic as xatom
from xastropy.xutils import xdebug as xdb
#class Spectral_Line(object):
#def pixminmax(spec, zabs, wrest, vmnx):
#def x_contifit(specfil, outfil=None, savfil=None, redshift=0., divmult=1, forest_divmult=1):
# Class for Ionic columns of a given line
class Spectral_Line(object):
"""Class for analysis of a given spectral line
Attributes:
wrest: float
Rest wavelength of the spectral feature
"""
# Initialize with wavelength
def __init__(self, wrest, clm_file=None):
self.wrest = wrest
self.atomic = {} # Atomic Data
self.analy = {} # Analysis inputs (from .clm file or AbsID)
self.measure = {} # Measured quantities (e.g. column, EW, centroid)
# Fill
self.fill()
# Fill Analy
def fill(self):
import xastropy.spec.abs_line as xspa
# Data
self.atomic = xspa.abs_line_data(self.wrest)
#
self.analy['VLIM'] = [0., 0.] # km/s
self.analy['FLG_ANLY'] = 1 # Analyze
self.analy['FLG_EYE'] = 0
self.analy['FLG_LIMIT'] = 0 # No limit
self.analy['DATFIL'] = ''
self.analy['IONNM'] = self.atomic['name']
# Output
def __repr__(self):
return ('[{:s}: wrest={:g}]'.format(
self.__class__.__name__, self.wrest))
#### ###############################
def pixminmax(*args):
''' Soon to be deprecated..
Use Spectrum1D.pix_minmax()
'''
xdb.set_trace()
#### ###############################
# Calls plotvel (Crighton)
# Adapted from N. Tejos scripts
#
def velplt(specfil):
''' Soon to be deprecated..
'''
# Imports
from plotspec import plotvel_util as pspv
reload(pspv)
import xastropy as xa
from subprocess import Popen
# Initialize
if 'f26_fil' not in locals():
f26_fil = 'tmp.f26'
command = ['touch',f26_fil]
print(Popen(command))
print('xa.spec.analysis.velplt: Generated a dummy f26 file -- ', f26_fil)
if 'transfil' not in locals():
path = xa.__path__
transfil = path[0]+'/spec/Data/initial_search.lines'
# Call
pspv.main([specfil, 'f26='+f26_fil, 'transitions='+transfil])
#### ###############################
# Calls Barak routines to fit the continuum
# Stolen from N. Tejos by JXP
#
def x_contifit(specfil, outfil=None, savfil=None, redshift=0., divmult=1, forest_divmult=1):
import os
import barak.fitcont as bf
from barak.spec import read
from barak.io import saveobj, loadobj
import xastropy.spec.readwrite as xsr
reload(xsr)
reload(bf)
# Initialize
if savfil == None:
savfil = 'conti.sav'
if outfil == None:
outfil = 'conti.fits'
# Read spectrum + convert to Barak format
sp = xsr.readspec(specfil)
# Fit spline continuum:
if os.path.lexists(savfil): #'contfit_' + name + '.sav'):
option = raw_input('Adjust old continuum? (y)/n: ')
if option.lower() != 'n':
co_old, knots_old = loadobj(savfil) #'contfit_' + name + '.sav')
co, knots = bf.fitqsocont(sp.wa, sp.fl, sp.er, redshift,
oldco=co_old, knots=knots_old,
divmult=divmult,
forest_divmult=forest_divmult)
else:
co, knots = bf.fitqsocont(sp.wa, sp.fl, sp.er, redshift,
divmult=divmult,
forest_divmult=forest_divmult)
else:
co, knots = bf.fitqsocont(sp.wa, sp.fl, sp.er, redshift,
divmult=divmult,
forest_divmult=forest_divmult)
os.remove('_knots.sav')
# Save continuum:
saveobj(savfil, (co, knots), overwrite=1)
# Check continuum:
print('Plotting new continuum')
plt.clf()
plt.plot(sp.wa, sp.fl, drawstyle='steps-mid')
plt.plot(sp.wa, sp.co, color='r')
plt.show()
# Repeat?
confirm = raw_input('Keep continuum? (y)/n: ')
if confirm == 'y':
fits.writeto(outfil, sp, clobber=True)
else:
print('Writing to tmp.fits anyhow!')
fits.writeto('tmp.fits', sp, clobber=True)
#print name
## Output
# Data file with continuum
| bsd-3-clause |
soneoed/naowalkoptimiser | server/MCLLocalisation.py | 2 | 26832 | """ An SIR Particle Filter based localisation system for tracking a robot with ambiguous bearing
Jason Kulk
"""
from NAO import NAO
import numpy, time
class Localisation:
X = 0
Y = 1
THETA = 2
XDOT = 3
YDOT = 4
THETADOT = 5
STATE_LENGTH = 6
VEL_PAST_LENGTH = 13
def __init__(self, M = 1000):
""" """
Localisation.NUM_PARTICLES = M
self.reset = True
self.time = time.time()
self.previoustime = self.time
self.control = numpy.zeros(3) # the current control
self.previouscontrol = self.control # the previous control
self.measurement = numpy.zeros(Localisation.STATE_LENGTH) # the current measurement of the state
self.previousmeasurement = self.measurement # the previous measurement of the state
self.previousmeasurementsigma = numpy.zeros(Localisation.STATE_LENGTH)
self.States = numpy.zeros((Localisation.NUM_PARTICLES, Localisation.STATE_LENGTH)) # the (states) particles
self.PreviousStates = self.States # the previous state of each particle (used for derivative calculations)
self.Weights = (1.0/Localisation.NUM_PARTICLES)*numpy.ones(Localisation.NUM_PARTICLES) # the weights of each particle
self.GUIWeights = (1.0/Localisation.NUM_PARTICLES)*numpy.ones(Localisation.NUM_PARTICLES) # the weights of each particle before resampling
self.State = self.States[0]
# Variables for the control model:
self.accelerationduration = numpy.array([1.5, 1.5, 0.5]) # the duration an acceleration is applied (s)
self.accelerationmagnitudes = numpy.array([15, 15, 0.7])/self.accelerationduration # the magnitude of the accelerations [forward, sideward, turn] (cm/s/s, rad/s)
self.accelerations = numpy.zeros((Localisation.NUM_PARTICLES, 3)) # the current acceleration (cm/s/s) for each particle
self.accelendtimes = numpy.zeros((Localisation.NUM_PARTICLES, 3)) # the times the accelerations will be set to zero given no change in control (s)
self.startedcontrol = False # a small hack that will prevent resampling until the control has started
# Variables for additional velocity filtering!
self.PastVX = list(numpy.zeros(Localisation.VEL_PAST_LENGTH))
self.PastVY = list(numpy.zeros(Localisation.VEL_PAST_LENGTH))
def update(self, control, nao):
""" """
self.time = time.time()
self.control = control
self.measurement = self.__naoToState(nao)
self.measurementsigma = self.__naoToSigma(nao)
if self.reset:
self.__initParticles()
self.reset = False
else:
self.predict()
self.updateWeights()
self.estimateState()
self.resample()
self.previoustime = self.time
self.previousmeasurement = self.measurement
self.previousmeasurementsigma = self.measurementsigma
self.PreviousStates = numpy.copy(self.States)
def predict(self):
""" Updates each of the particles based on system and control model """
self.modelSystem()
self.modelControl()
def updateWeights(self):
""" """
if not self.startedcontrol: ## this hack prevents particles from disappearing before the robot starts moving
return
# calculate variances for the measurement
# the variance in the velocity is the sum of the current and previous variance in the position measurements
self.measurementsigma[Localisation.XDOT] = max(4.0, (self.measurementsigma[Localisation.X] + self.previousmeasurementsigma[Localisation.X]))
self.measurementsigma[Localisation.YDOT] = max(4.0, (self.measurementsigma[Localisation.Y] + self.previousmeasurementsigma[Localisation.Y]))
self.measurementsigma[Localisation.THETADOT] = max(1.0, 1.5*(self.measurementsigma[Localisation.THETA] + self.previousmeasurementsigma[Localisation.THETA]))
# calculate the weights based on a measurement model
self.Weights *= self._gauss(self.States[:,Localisation.X] - self.measurement[Localisation.X], self.measurementsigma[Localisation.X]) # 1.73
self.Weights *= self._gauss(self.States[:,Localisation.Y] - self.measurement[Localisation.Y], self.measurementsigma[Localisation.Y]) # 1.73
# I need a little outlier rejection here:
anglediff = numpy.fabs(self.measurement[Localisation.THETA] - self.previousmeasurement[Localisation.THETA])
if anglediff > 5*numpy.pi/12 and anglediff < 7*numpy.pi/12:
self.measurementsigma[Localisation.THETA] += 1.0
self.measurementsigma[Localisation.THETADOT] += 15
elif anglediff > numpy.pi/3 and anglediff < 2*numpy.pi/3:
self.measurementsigma[Localisation.THETA] += 0.4
self.measurementsigma[Localisation.THETADOT] += 5
self.Weights *= self._gauss(self.States[:,Localisation.THETA] - self.measurement[Localisation.THETA], self.measurementsigma[Localisation.THETA]) + self._gauss(self.States[:,Localisation.THETA] + numpy.pi - self.measurement[Localisation.THETA], self.measurementsigma[Localisation.THETA]) + self._gauss(self.States[:,Localisation.THETA] - numpy.pi - self.measurement[Localisation.THETA], self.measurementsigma[Localisation.THETA]) + self._gauss(self.States[:,Localisation.THETA] + 2*numpy.pi - self.measurement[Localisation.THETA], self.measurementsigma[Localisation.THETA]) # 0.02 + 0.07
self.Weights *= self._gauss(self.States[:,Localisation.THETADOT] - self.measurement[Localisation.THETADOT], self.measurementsigma[Localisation.THETADOT])
self.Weights *= self._gauss(self.States[:,Localisation.XDOT] - self.measurement[Localisation.XDOT], self.measurementsigma[Localisation.XDOT]) # 2.95 + 1.5
self.Weights *= self._gauss(self.States[:,Localisation.YDOT] - self.measurement[Localisation.YDOT], self.measurementsigma[Localisation.YDOT])
controlvector = self.__controlToVelocityVector()
if controlvector != None:
diffs = numpy.arctan2(self.States[:,Localisation.YDOT], self.States[:,Localisation.XDOT]) - self.States[:,Localisation.THETA]
diffs = numpy.arctan2(numpy.sin(diffs), numpy.cos(diffs)) ## I need to normalise the diffs
self.Weights *= self._gauss(diffs - self.__controlToVelocityVector(), 0.707)
# normalise the weights so that their sum is one
sum = numpy.sum(self.Weights)
if sum != 0:
self.Weights /= sum
else:
print "Oh Noes: All of the weights are zero!"
print "Measurements:", self.measurement, self.previousmeasurement
print "State:", self.State
tempweights = (1.0/Localisation.NUM_PARTICLES)*numpy.ones(Localisation.NUM_PARTICLES)
tempweights *= self._gauss(self.States[:,Localisation.X] - self.measurement[Localisation.X], 3.73) # 1.73
print "X:", numpy.average(tempweights)
tempweights *= self._gauss(self.States[:,Localisation.Y] - self.measurement[Localisation.Y], 3.73) # 1.73
print "Y:", numpy.average(tempweights)
anglediff = numpy.fabs(self.measurement[Localisation.THETA] - self.previousmeasurement[Localisation.THETA])
if anglediff < 5*numpy.pi/12 or anglediff > 7*numpy.pi/12:
tempweights *= self._gauss(self.States[:,Localisation.THETA] - self.measurement[Localisation.THETA], 0.09) + self._gauss(self.States[:,Localisation.THETA] + numpy.pi - self.measurement[Localisation.THETA], 0.09) + self._gauss(self.States[:,Localisation.THETA] - numpy.pi - self.measurement[Localisation.THETA], 0.09) + self._gauss(self.States[:,Localisation.THETA] + 2*numpy.pi - self.measurement[Localisation.THETA], 0.09) # 0.02 + 0.07
print "THETA:", numpy.average(tempweights)
self.Weights *= self._gauss(self.States[:,Localisation.THETADOT] - self.measurement[Localisation.THETADOT], 0.6)
print "THETADOT:", numpy.average(tempweights)
tempweights *= self._gauss(self.States[:,Localisation.XDOT] - self.measurement[Localisation.XDOT], 4.45) # 2.95 + 1.5
print "XDOT:", numpy.average(tempweights)
tempweights *= self._gauss(self.States[:,Localisation.YDOT] - self.measurement[Localisation.YDOT], 4.45)
print "YDOT:", numpy.average(tempweights)
if controlvector != None:
diffs = numpy.arctan2(self.States[:,Localisation.YDOT], self.States[:,Localisation.XDOT]) - self.States[:,Localisation.THETA]
diffs = numpy.arctan2(numpy.sin(diffs), numpy.cos(diffs)) ## I need to normalise the diffs
tempweights *= self._gauss(diffs - self.__controlToVelocityVector(), 0.707)
print "CTRL:", numpy.average(tempweights)
self.__initParticles()
self.Weights = (1.0/Localisation.NUM_PARTICLES)*numpy.ones(Localisation.NUM_PARTICLES)
self.GUIWeights = numpy.copy(self.Weights)
def __calculateWeight(self, state):
""" Only use this function for debug purposes. """
weight = self._gauss(state[Localisation.X] - self.measurement[Localisation.X], 1.73) # 1.73
weight *= self._gauss(state[Localisation.Y] - self.measurement[Localisation.Y], 1.73) # 1.73
weight *= self._gauss(state[Localisation.THETA] - self.measurement[Localisation.THETA], 0.09) + self._gauss(state[Localisation.THETA] - (self.measurement[Localisation.THETA] - numpy.pi), 0.09) + self._gauss(state[Localisation.THETA] - numpy.pi - self.measurement[Localisation.THETA], 0.09) # 0.02 + 0.07
weight *= self._gauss(state[Localisation.XDOT] - self.measurement[Localisation.XDOT], 4.45) # 2.95 + 1.5
weight *= self._gauss(state[Localisation.YDOT] - self.measurement[Localisation.YDOT], 4.45)
weight *= numpy.where(numpy.fabs(state[Localisation.THETADOT] - self.measurement[Localisation.THETADOT]) < 2, self._gauss(state[Localisation.THETADOT] - self.measurement[Localisation.THETADOT], 0.7), 1)
controlvector = self.__controlToVelocityVector()
if controlvector != None:
diff = numpy.arctan2(state[Localisation.YDOT], state[Localisation.XDOT]) - state[Localisation.THETA]
diff = numpy.arctan2(numpy.sin(diff), numpy.cos(diff)) ## I need to normalise the diffs
weight *= self._gauss(diff - self.__controlToVelocityVector(), 0.707)
return weight
def resample(self):
""" """
# An SIS filter resamples only when necessary
Neff = 1.0/numpy.sum(self.Weights**2)
Ns = Localisation.NUM_PARTICLES
if self.startedcontrol:# and Neff < 0.3*Ns:
#print "Resample:", Neff, "<", Ns
NsInv = 1.0/Ns
c = numpy.cumsum(self.Weights)
u = NsInv*numpy.arange(Ns) + numpy.random.uniform(0, NsInv)
# I want to put in a fancy velocity check in the resample that spins particles around that are moving in the opposite direction
controlvector = self.__controlToVelocityVector()
if controlvector != None:
diffs = numpy.arctan2(self.States[:,Localisation.YDOT], self.States[:,Localisation.XDOT]) - self.States[:,Localisation.THETA]
diffs = numpy.arctan2(numpy.sin(diffs), numpy.cos(diffs)) ## I need to normalise the diffs
diffs = numpy.fabs(diffs - self.__controlToVelocityVector())
vc = numpy.fabs(diffs - numpy.pi) < 0.15
i = 0
#print "Pre resample:"
#print self.States[:,0:3]
for j in range(Ns):
while u[j] > c[i]:
i = i + 1
self.States[j] = numpy.copy(self.States[i])
self.PreviousStates[j] = numpy.copy(self.PreviousStates[i])
self.accelerations[j] = numpy.copy(self.accelerations[i])
self.accelendtimes[j] = numpy.copy(self.accelendtimes[i])
if controlvector != None and vc[i]:
print "Flipping Particle:", j, self.States[j, Localisation.THETA]
#print self.__calculateWeight(self.States[j]), self.__calculateWeight(self.States[i])
self.States[j, Localisation.THETA] = numpy.arctan2(numpy.sin(self.States[j,Localisation.THETA] - numpy.pi), numpy.cos(self.States[j,Localisation.THETA] - numpy.pi))
self.PreviousStates[j, Localisation.THETA] = numpy.arctan2(numpy.sin(self.PreviousStates[j,Localisation.THETA] - numpy.pi), numpy.cos(self.PreviousStates[j,Localisation.THETA] - numpy.pi))
#print "Post resample:"
#print self.States[:,0:3]
self.Weights = NsInv*numpy.ones(Ns)
def modelSystem(self):
""" Updates each particle based on the system model """
dt = self.time - self.previoustime
if not self.startedcontrol:
sdxdot = 0.05
sdydot = 0.05
sdthetadot = 0.01
else:
sdxdot = 1.5 # these weights can probably be a bit lower!
sdydot = 1.5
sdthetadot = 0.2
xdot = self.PreviousStates[:,Localisation.XDOT] + numpy.random.normal(0, sdxdot, size=self.PreviousStates.shape[0])
ydot = self.PreviousStates[:,Localisation.YDOT] + numpy.random.normal(0, sdydot, size=self.PreviousStates.shape[0])
thetadot = self.PreviousStates[:,Localisation.THETADOT] + numpy.random.normal(0, sdthetadot/2.0, size=self.PreviousStates.shape[0])
self.States[:,Localisation.XDOT] = xdot*numpy.cos(thetadot*dt) - ydot*numpy.sin(thetadot*dt)
self.States[:,Localisation.YDOT] = ydot*numpy.cos(thetadot*dt) + xdot*numpy.sin(thetadot*dt)
self.States[:,Localisation.THETADOT] = thetadot + numpy.random.normal(0, sdthetadot/2.0, size=self.PreviousStates.shape[0])
self.States[:,Localisation.THETA] = self.PreviousStates[:,Localisation.THETA] + thetadot*dt
self.States[:,Localisation.X] = self.PreviousStates[:,Localisation.X] + xdot*dt*numpy.cos(self.States[:,Localisation.THETADOT]*dt) - ydot*dt*numpy.sin(self.States[:,Localisation.THETADOT]*dt)
self.States[:,Localisation.Y] = self.PreviousStates[:,Localisation.Y] + ydot*dt*numpy.cos(self.States[:,Localisation.THETADOT]*dt) + xdot*dt*numpy.sin(self.States[:,Localisation.THETADOT]*dt)
# make sure that theta is between +/- pi
self.States[:,Localisation.THETA] = numpy.arctan2(numpy.sin(self.States[:,Localisation.THETA]), numpy.cos(self.States[:,Localisation.THETA]))
def modelControl(self):
""" Updates each particle based on the control model """
# my model for control, is that a change in control will effect the state by
# introducing a constant acceleration over the next 1 second (2 steps)
deltacontrol = self.control - self.previouscontrol
sdx = 1 # noise on estimate of acceleration magnitude (in cm/s/s)
sdy = 1 # noise on estimate of acceleration magnitude (in cm/s/s)
sdtheta = 0.2 # noise on estimate of acceleration magnitude (in rad/s/s)
if self.control[0] == 0 and self.previouscontrol[0] != 0: # if I was previously walking and now I want to stop, deaccelerate
self.accelerations[:,0] = (-self.PreviousStates[:,Localisation.XDOT]/self.accelerationduration[0]) + numpy.random.normal(0, sdx, size=self.PreviousStates.shape[0])
self.accelendtimes[:,0] = self.time + self.accelerationduration[0]
self.accelerations[:,1] = (-self.PreviousStates[:,Localisation.YDOT]/self.accelerationduration[0]) + numpy.random.normal(0, sdy, size=self.PreviousStates.shape[0])
self.accelendtimes[:,1] = self.time + self.accelerationduration[0]
self.accelerations[:,2] = (-self.PreviousStates[:,Localisation.THETADOT]/self.accelerationduration[2]) + numpy.random.normal(0, sdtheta, size=self.PreviousStates.shape[0])
self.accelendtimes[:,2] = self.time + self.accelerationduration[0]
elif self.control[0] !=0 and self.previouscontrol[0] == 0: # if I was previously stopped and now I want to start, accelerate
self.startedcontrol = True
self.accelerations[:,0] = self.accelerationmagnitudes[0]*numpy.cos(self.PreviousStates[:,Localisation.THETA]) + numpy.random.normal(0, sdx, size=self.PreviousStates.shape[0])
self.accelendtimes[:,0] = self.time + self.accelerationduration[0]
self.accelerations[:,1] = self.accelerationmagnitudes[0]*numpy.sin(self.PreviousStates[:,Localisation.THETA]) + numpy.random.normal(0, sdy, size=self.PreviousStates.shape[0])
self.accelendtimes[:,1] = self.time + self.accelerationduration[0]
# put a bit of spin on the robot if the desired bearing changes
if abs(deltacontrol[1]) > 0 and abs(self.control[1]) > 0.1:
self.accelerations[:,2] += deltacontrol[1] + numpy.random.normal(0, sdtheta, size=self.PreviousStates.shape[0])
self.accelendtimes[:,2] = self.time + self.accelerationduration[2]
# put a bit of spin on the robot if the final orientation changes
if self.control[2] < 1000 and abs(self.control[0]) < 10 and abs(deltacontrol[2]) > 0:
if self.previouscontrol[2] > 1000:
self.accelerations[:,2] += self.control[2] + numpy.random.normal(0, sdtheta, size=self.PreviousStates.shape[0])
else:
self.accelerations[:,2] += deltacontrol[2] + numpy.random.normal(0, sdtheta, size=self.PreviousStates.shape[0])
self.accelendtimes[:,2] = self.time + self.accelerationduration[2]
self.accelerations = numpy.where(self.accelendtimes > self.time, self.accelerations, 0)
numpy.clip(self.accelerations[:,2], -0.7, 0.7, self.accelerations[:,2])
#numpy.clip(self.accelerations[:,1], -20, 20, self.accelerations[:,1])
#numpy.clip(self.accelerations[:,0], -20, 20, self.accelerations[:,0])
# calculate the controls contribution to the state velocity
self.States[:,Localisation.XDOT:] += self.accelerations*(self.time - self.previoustime)
self.previouscontrol = self.control
def estimateState(self):
""" Updates the estimate of the state """
best = numpy.argmax(self.Weights)
beststate = self.States[best,:]
#print "Best State:", beststate
cond = (numpy.sum(numpy.fabs(self.States - beststate), axis=1) < 1)
beststates = numpy.compress(cond, self.States, axis=0)
bestweights = numpy.compress(cond, self.Weights)
#print "States", self.States
#print "States within window:", cond
#print "States close to best", len(beststates), beststates
#print "Weights close to best", bestweights
#print "Product:", (bestweights*beststates.T).T
bestweights /= numpy.sum(bestweights)
self.State = numpy.sum((bestweights*beststates.T).T, axis=0)
#print "Estimate:", self.State
#print numpy.fabs(numpy.arctan2(self.State[Localisation.YDOT], self.State[Localisation.XDOT]) - self.State[Localisation.THETA]) - self.__controlToVelocityVector()
if numpy.isnan(self.State[0]):
print "FAIL"
self.__updateAttributesFromState()
def __initParticles(self):
""" Initialises self.Particles to contain Localisation.NUM_PARTICLES particles around the current measurement """
print "Initialising Particles around", self.measurement
self.States = numpy.zeros((Localisation.NUM_PARTICLES, Localisation.STATE_LENGTH))
self.States += self.measurement
# I know for certain that at the beginning the robot is not moving, so all of the velocities should be zero. The Position however should get some noise
self.States[:,Localisation.X] += numpy.random.normal(0, 1.73, size=self.States.shape[0])
self.States[:,Localisation.Y] += numpy.random.normal(0, 1.73, size=self.States.shape[0])
self.States[:,Localisation.THETA] += numpy.random.normal(0, 0.09, size=self.States.shape[0])
# now swap half of the orientations
self.States[:, Localisation.THETA] = numpy.where(numpy.random.uniform(0,1, size=self.States.shape[0]) < 0.5, self.States[:, Localisation.THETA], self.States[:, Localisation.THETA] - numpy.pi)
#print self.States
def __getStateNearMeasurement(self):
""" """
state = self.measurement + numpy.random.normal(0, 0.15, len(self.measurement))
if numpy.random.uniform(0,1) < 0.5:
state[Localisation.THETA] -= numpy.pi
return state
def __naoToState(self, nao):
state = numpy.zeros(Localisation.STATE_LENGTH)
if nao != None:
state[Localisation.X] = nao.X
state[Localisation.Y] = nao.Y
state[Localisation.THETA] = nao.Orientation
state[Localisation.XDOT] = nao.VX
state[Localisation.YDOT] = nao.VY
state[Localisation.THETADOT] = nao.VOrientation
else:
state[Localisation.X] = self.previousmeasurement[Localisation.X]
state[Localisation.Y] = self.previousmeasurement[Localisation.Y]
state[Localisation.THETA] = self.previousmeasurement[Localisation.THETA]
return state
def __naoToSigma(self, nao):
""" """
state = numpy.zeros(Localisation.STATE_LENGTH)
if nao != None:
state[Localisation.X] = nao.sigmaX
state[Localisation.Y] = nao.sigmaY
state[Localisation.THETA] = nao.sigmaOrientation
state[Localisation.XDOT] = nao.sigmaVX
state[Localisation.YDOT] = nao.sigmaVY
state[Localisation.THETADOT] = nao.sigmaVOrientation
else:
state[Localisation.X] = 3.73
state[Localisation.Y] = 3.73
state[Localisation.THETA] = 0.09
state[Localisation.XDOT] = 4.45
state[Localisation.YDOT] = 4.45
state[Localisation.THETADOT] = 0.6
return state
def __updateAttributesFromState(self):
""" I have a bunch of convienent attributes for accessing the state. I need to keep them for backward compatiblity purposes. """
self.X = self.State[Localisation.X]
self.Y = self.State[Localisation.Y]
self.Orientation = self.State[Localisation.THETA]
# I want the velocities to be robot relative
vx = self.State[Localisation.XDOT]
vy = self.State[Localisation.YDOT]
vx = self.measurement[Localisation.XDOT] # I am usually happier using the actual velocity measurements
vy = self.measurement[Localisation.YDOT]
relativevx = vx*numpy.cos(self.Orientation) + vy*numpy.sin(self.Orientation)
relativevy = -vx*numpy.sin(self.Orientation) + vy*numpy.cos(self.Orientation)
self.VX = self._filter(self.PastVX, relativevx)
self.VY = self._filter(self.PastVY, relativevy)
self.VOrientation = self.State[Localisation.THETADOT]
self.V = numpy.sqrt(self.VX**2 + self.VY**2)
def __controlToVelocityVector(self):
""" Returns the velocity vector expected given the control.
Returns None when I am not sure what to expect """
if self.control[0] < 0:
absdirection = abs(self.control[1])
if absdirection < 0.19:
return 0
elif absdirection < 0.5:
return self.control[1]
else:
return None
else:
return None
def _gauss(self, x, sigma):
return (1.0/numpy.sqrt(2*numpy.pi*sigma))*numpy.exp(-(x**2)/(2*sigma**2))
def _filter(self, pastvalues, measurement):
""" """
pastvalues.append(measurement)
pastvalues.pop(0)
return numpy.average(pastvalues, weights=numpy.arange(len(pastvalues)))
if __name__ == '__main__':
import matplotlib
matplotlib.use('WXAgg')
matplotlib.rcParams['toolbar'] = 'None'
import pylab, psyco, wx
psyco.full()
x = list()
y = list()
o = list()
localisation = Localisation(1000)
#pylab.figure()
#p = numpy.arange(-numpy.pi, numpy.pi, 0.01)
#pylab.plot(p, localisation._gauss(p - 1, 0.02) + localisation._gauss(p + 2.14, 0.02))
#pylab.figure()
loopcount = 0
control = numpy.zeros(3)
ax = pylab.subplot(111)
canvas = ax.figure.canvas
particleplot, = pylab.plot([0,0],[0,0], marker='o', color='k', linewidth=0, markersize=2, animated=True)
estimateplot, = pylab.plot([0,0],[0,0], marker='o', animated=True)
ax.set_xlim(-200, 200)
ax.set_ylim(-200, 200)
canvas.draw()
canvas.gui_repaint()
def update_plot(*args):
""" hmm """
global control, loopcount, localisation
if update_plot.background is None:
update_plot.background = canvas.copy_from_bbox(ax.bbox)
starttime = time.time()
localisation.update(control, None)
x.append(localisation.State[0])
y.append(localisation.State[1])
o.append(localisation.State[2])
loopcount += 1
if loopcount == 2:
print "Starting"
control = numpy.array([-1,0.3,0])
elif loopcount == 50:
control = numpy.array([0,0,0])
canvas.restore_region(update_plot.background)
estimateplot.set_data(x,y)
particleplot.set_data(numpy.array(localisation.States[:,Localisation.Y]), numpy.array(localisation.States[:,Localisation.X]))
ax.draw_artist(particleplot)
#ax.draw_artist(estimateplot)
canvas.blit(ax.bbox)
time.sleep(max(0,0.1 - (time.time() - starttime)))
wx.WakeUpIdle()
update_plot.background = None
wx.EVT_IDLE(wx.GetApp(), update_plot)
pylab.show()
| gpl-3.0 |
TaikiGoto/master | ch06/overfit_weight_decay.py | 3 | 2080 | # coding: utf-8
import os
import sys
sys.path.append(os.pardir) # 親ディレクトリのファイルをインポートするための設定
import numpy as np
import matplotlib.pyplot as plt
from dataset.mnist import load_mnist
from common.multi_layer_net import MultiLayerNet
from common.optimizer import SGD
(x_train, t_train), (x_test, t_test) = load_mnist(normalize=True)
# 過学習を再現するために、学習データを削減
x_train = x_train[:300]
t_train = t_train[:300]
# weight decay(荷重減衰)の設定 =======================
#weight_decay_lambda = 0 # weight decayを使用しない場合
weight_decay_lambda = 0.1
# ====================================================
network = MultiLayerNet(input_size=784, hidden_size_list=[100, 100, 100, 100, 100, 100], output_size=10,
weight_decay_lambda=weight_decay_lambda)
optimizer = SGD(lr=0.01)
max_epochs = 201
train_size = x_train.shape[0]
batch_size = 100
train_loss_list = []
train_acc_list = []
test_acc_list = []
iter_per_epoch = max(train_size / batch_size, 1)
epoch_cnt = 0
for i in range(1000000000):
batch_mask = np.random.choice(train_size, batch_size)
x_batch = x_train[batch_mask]
t_batch = t_train[batch_mask]
grads = network.gradient(x_batch, t_batch)
optimizer.update(network.params, grads)
if i % iter_per_epoch == 0:
train_acc = network.accuracy(x_train, t_train)
test_acc = network.accuracy(x_test, t_test)
train_acc_list.append(train_acc)
test_acc_list.append(test_acc)
print("epoch:" + str(epoch_cnt) + ", train acc:" + str(train_acc) + ", test acc:" + str(test_acc))
epoch_cnt += 1
if epoch_cnt >= max_epochs:
break
# 3.グラフの描画==========
markers = {'train': 'o', 'test': 's'}
x = np.arange(max_epochs)
plt.plot(x, train_acc_list, marker='o', label='train', markevery=10)
plt.plot(x, test_acc_list, marker='s', label='test', markevery=10)
plt.xlabel("epochs")
plt.ylabel("accuracy")
plt.ylim(0, 1.0)
plt.legend(loc='lower right')
plt.show() | mit |
alisidd/tensorflow | tensorflow/python/client/notebook.py | 109 | 4791 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Notebook front-end to TensorFlow.
When you run this binary, you'll see something like below, which indicates
the serving URL of the notebook:
The IPython Notebook is running at: http://127.0.0.1:8888/
Press "Shift+Enter" to execute a cell
Press "Enter" on a cell to go into edit mode.
Press "Escape" to go back into command mode and use arrow keys to navigate.
Press "a" in command mode to insert cell above or "b" to insert cell below.
Your root notebooks directory is FLAGS.notebook_dir
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import socket
import sys
from tensorflow.python.platform import app
# pylint: disable=g-import-not-at-top
# Official recommended way of turning on fast protocol buffers as of 10/21/14
os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "cpp"
os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION_VERSION"] = "2"
FLAGS = None
ORIG_ARGV = sys.argv
# Main notebook process calls itself with argv[1]="kernel" to start kernel
# subprocesses.
IS_KERNEL = len(sys.argv) > 1 and sys.argv[1] == "kernel"
def main(unused_argv):
sys.argv = ORIG_ARGV
if not IS_KERNEL:
# Drop all flags.
sys.argv = [sys.argv[0]]
# NOTE(sadovsky): For some reason, putting this import at the top level
# breaks inline plotting. It's probably a bug in the stone-age version of
# matplotlib.
from IPython.html.notebookapp import NotebookApp # pylint: disable=g-import-not-at-top
notebookapp = NotebookApp.instance()
notebookapp.open_browser = True
# password functionality adopted from quality/ranklab/main/tools/notebook.py
# add options to run with "password"
if FLAGS.password:
from IPython.lib import passwd # pylint: disable=g-import-not-at-top
notebookapp.ip = "0.0.0.0"
notebookapp.password = passwd(FLAGS.password)
else:
print ("\nNo password specified; Notebook server will only be available"
" on the local machine.\n")
notebookapp.initialize(argv=["--notebook-dir", FLAGS.notebook_dir])
if notebookapp.ip == "0.0.0.0":
proto = "https" if notebookapp.certfile else "http"
url = "%s://%s:%d%s" % (proto, socket.gethostname(), notebookapp.port,
notebookapp.base_project_url)
print("\nNotebook server will be publicly available at: %s\n" % url)
notebookapp.start()
return
# Drop the --flagfile flag so that notebook doesn't complain about an
# "unrecognized alias" when parsing sys.argv.
sys.argv = ([sys.argv[0]] +
[z for z in sys.argv[1:] if not z.startswith("--flagfile")])
from IPython.kernel.zmq.kernelapp import IPKernelApp # pylint: disable=g-import-not-at-top
kernelapp = IPKernelApp.instance()
kernelapp.initialize()
# Enable inline plotting. Equivalent to running "%matplotlib inline".
ipshell = kernelapp.shell
ipshell.enable_matplotlib("inline")
kernelapp.start()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--password",
type=str,
default=None,
help="""\
Password to require. If set, the server will allow public access. Only
used if notebook config file does not exist.\
""")
parser.add_argument(
"--notebook_dir",
type=str,
default="experimental/brain/notebooks",
help="root location where to store notebooks")
# When the user starts the main notebook process, we don't touch sys.argv.
# When the main process launches kernel subprocesses, it writes all flags
# to a tmpfile and sets --flagfile to that tmpfile, so for kernel
# subprocesses here we drop all flags *except* --flagfile, then call
# app.run(), and then (in main) restore all flags before starting the
# kernel app.
if IS_KERNEL:
# Drop everything except --flagfile.
sys.argv = ([sys.argv[0]] +
[x for x in sys.argv[1:] if x.startswith("--flagfile")])
FLAGS, unparsed = parser.parse_known_args()
app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
Akshay0724/scikit-learn | sklearn/neural_network/tests/test_mlp.py | 28 | 22183 | """
Testing for Multi-layer Perceptron module (sklearn.neural_network)
"""
# Author: Issam H. Laradji
# License: BSD 3 clause
import sys
import warnings
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_equal
from sklearn.datasets import load_digits, load_boston, load_iris
from sklearn.datasets import make_regression, make_multilabel_classification
from sklearn.exceptions import ConvergenceWarning
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.metrics import roc_auc_score
from sklearn.neural_network import MLPClassifier
from sklearn.neural_network import MLPRegressor
from sklearn.preprocessing import LabelBinarizer
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from scipy.sparse import csr_matrix
from sklearn.utils.testing import (assert_raises, assert_greater, assert_equal,
assert_false, ignore_warnings)
from sklearn.utils.testing import assert_raise_message
np.seterr(all='warn')
ACTIVATION_TYPES = ["identity", "logistic", "tanh", "relu"]
digits_dataset_multi = load_digits(n_class=3)
X_digits_multi = MinMaxScaler().fit_transform(digits_dataset_multi.data[:200])
y_digits_multi = digits_dataset_multi.target[:200]
digits_dataset_binary = load_digits(n_class=2)
X_digits_binary = MinMaxScaler().fit_transform(
digits_dataset_binary.data[:200])
y_digits_binary = digits_dataset_binary.target[:200]
classification_datasets = [(X_digits_multi, y_digits_multi),
(X_digits_binary, y_digits_binary)]
boston = load_boston()
Xboston = StandardScaler().fit_transform(boston.data)[: 200]
yboston = boston.target[:200]
iris = load_iris()
X_iris = iris.data
y_iris = iris.target
def test_alpha():
# Test that larger alpha yields weights closer to zero
X = X_digits_binary[:100]
y = y_digits_binary[:100]
alpha_vectors = []
alpha_values = np.arange(2)
absolute_sum = lambda x: np.sum(np.abs(x))
for alpha in alpha_values:
mlp = MLPClassifier(hidden_layer_sizes=10, alpha=alpha, random_state=1)
with ignore_warnings(category=ConvergenceWarning):
mlp.fit(X, y)
alpha_vectors.append(np.array([absolute_sum(mlp.coefs_[0]),
absolute_sum(mlp.coefs_[1])]))
for i in range(len(alpha_values) - 1):
assert (alpha_vectors[i] > alpha_vectors[i + 1]).all()
def test_fit():
# Test that the algorithm solution is equal to a worked out example.
X = np.array([[0.6, 0.8, 0.7]])
y = np.array([0])
mlp = MLPClassifier(solver='sgd', learning_rate_init=0.1, alpha=0.1,
activation='logistic', random_state=1, max_iter=1,
hidden_layer_sizes=2, momentum=0)
# set weights
mlp.coefs_ = [0] * 2
mlp.intercepts_ = [0] * 2
mlp.n_outputs_ = 1
mlp.coefs_[0] = np.array([[0.1, 0.2], [0.3, 0.1], [0.5, 0]])
mlp.coefs_[1] = np.array([[0.1], [0.2]])
mlp.intercepts_[0] = np.array([0.1, 0.1])
mlp.intercepts_[1] = np.array([1.0])
mlp._coef_grads = [] * 2
mlp._intercept_grads = [] * 2
# Initialize parameters
mlp.n_iter_ = 0
mlp.learning_rate_ = 0.1
# Compute the number of layers
mlp.n_layers_ = 3
# Pre-allocate gradient matrices
mlp._coef_grads = [0] * (mlp.n_layers_ - 1)
mlp._intercept_grads = [0] * (mlp.n_layers_ - 1)
mlp.out_activation_ = 'logistic'
mlp.t_ = 0
mlp.best_loss_ = np.inf
mlp.loss_curve_ = []
mlp._no_improvement_count = 0
mlp._intercept_velocity = [np.zeros_like(intercepts) for
intercepts in
mlp.intercepts_]
mlp._coef_velocity = [np.zeros_like(coefs) for coefs in
mlp.coefs_]
mlp.partial_fit(X, y, classes=[0, 1])
# Manually worked out example
# h1 = g(X1 * W_i1 + b11) = g(0.6 * 0.1 + 0.8 * 0.3 + 0.7 * 0.5 + 0.1)
# = 0.679178699175393
# h2 = g(X2 * W_i2 + b12) = g(0.6 * 0.2 + 0.8 * 0.1 + 0.7 * 0 + 0.1)
# = 0.574442516811659
# o1 = g(h * W2 + b21) = g(0.679 * 0.1 + 0.574 * 0.2 + 1)
# = 0.7654329236196236
# d21 = -(0 - 0.765) = 0.765
# d11 = (1 - 0.679) * 0.679 * 0.765 * 0.1 = 0.01667
# d12 = (1 - 0.574) * 0.574 * 0.765 * 0.2 = 0.0374
# W1grad11 = X1 * d11 + alpha * W11 = 0.6 * 0.01667 + 0.1 * 0.1 = 0.0200
# W1grad11 = X1 * d12 + alpha * W12 = 0.6 * 0.0374 + 0.1 * 0.2 = 0.04244
# W1grad21 = X2 * d11 + alpha * W13 = 0.8 * 0.01667 + 0.1 * 0.3 = 0.043336
# W1grad22 = X2 * d12 + alpha * W14 = 0.8 * 0.0374 + 0.1 * 0.1 = 0.03992
# W1grad31 = X3 * d11 + alpha * W15 = 0.6 * 0.01667 + 0.1 * 0.5 = 0.060002
# W1grad32 = X3 * d12 + alpha * W16 = 0.6 * 0.0374 + 0.1 * 0 = 0.02244
# W2grad1 = h1 * d21 + alpha * W21 = 0.679 * 0.765 + 0.1 * 0.1 = 0.5294
# W2grad2 = h2 * d21 + alpha * W22 = 0.574 * 0.765 + 0.1 * 0.2 = 0.45911
# b1grad1 = d11 = 0.01667
# b1grad2 = d12 = 0.0374
# b2grad = d21 = 0.765
# W1 = W1 - eta * [W1grad11, .., W1grad32] = [[0.1, 0.2], [0.3, 0.1],
# [0.5, 0]] - 0.1 * [[0.0200, 0.04244], [0.043336, 0.03992],
# [0.060002, 0.02244]] = [[0.098, 0.195756], [0.2956664,
# 0.096008], [0.4939998, -0.002244]]
# W2 = W2 - eta * [W2grad1, W2grad2] = [[0.1], [0.2]] - 0.1 *
# [[0.5294], [0.45911]] = [[0.04706], [0.154089]]
# b1 = b1 - eta * [b1grad1, b1grad2] = 0.1 - 0.1 * [0.01667, 0.0374]
# = [0.098333, 0.09626]
# b2 = b2 - eta * b2grad = 1.0 - 0.1 * 0.765 = 0.9235
assert_almost_equal(mlp.coefs_[0], np.array([[0.098, 0.195756],
[0.2956664, 0.096008],
[0.4939998, -0.002244]]),
decimal=3)
assert_almost_equal(mlp.coefs_[1], np.array([[0.04706], [0.154089]]),
decimal=3)
assert_almost_equal(mlp.intercepts_[0],
np.array([0.098333, 0.09626]), decimal=3)
assert_almost_equal(mlp.intercepts_[1], np.array(0.9235), decimal=3)
# Testing output
# h1 = g(X1 * W_i1 + b11) = g(0.6 * 0.098 + 0.8 * 0.2956664 +
# 0.7 * 0.4939998 + 0.098333) = 0.677
# h2 = g(X2 * W_i2 + b12) = g(0.6 * 0.195756 + 0.8 * 0.096008 +
# 0.7 * -0.002244 + 0.09626) = 0.572
# o1 = h * W2 + b21 = 0.677 * 0.04706 +
# 0.572 * 0.154089 + 0.9235 = 1.043
# prob = sigmoid(o1) = 0.739
assert_almost_equal(mlp.predict_proba(X)[0, 1], 0.739, decimal=3)
def test_gradient():
# Test gradient.
# This makes sure that the activation functions and their derivatives
# are correct. The numerical and analytical computation of the gradient
# should be close.
for n_labels in [2, 3]:
n_samples = 5
n_features = 10
X = np.random.random((n_samples, n_features))
y = 1 + np.mod(np.arange(n_samples) + 1, n_labels)
Y = LabelBinarizer().fit_transform(y)
for activation in ACTIVATION_TYPES:
mlp = MLPClassifier(activation=activation, hidden_layer_sizes=10,
solver='lbfgs', alpha=1e-5,
learning_rate_init=0.2, max_iter=1,
random_state=1)
mlp.fit(X, y)
theta = np.hstack([l.ravel() for l in mlp.coefs_ +
mlp.intercepts_])
layer_units = ([X.shape[1]] + [mlp.hidden_layer_sizes] +
[mlp.n_outputs_])
activations = []
deltas = []
coef_grads = []
intercept_grads = []
activations.append(X)
for i in range(mlp.n_layers_ - 1):
activations.append(np.empty((X.shape[0],
layer_units[i + 1])))
deltas.append(np.empty((X.shape[0],
layer_units[i + 1])))
fan_in = layer_units[i]
fan_out = layer_units[i + 1]
coef_grads.append(np.empty((fan_in, fan_out)))
intercept_grads.append(np.empty(fan_out))
# analytically compute the gradients
def loss_grad_fun(t):
return mlp._loss_grad_lbfgs(t, X, Y, activations, deltas,
coef_grads, intercept_grads)
[value, grad] = loss_grad_fun(theta)
numgrad = np.zeros(np.size(theta))
n = np.size(theta, 0)
E = np.eye(n)
epsilon = 1e-5
# numerically compute the gradients
for i in range(n):
dtheta = E[:, i] * epsilon
numgrad[i] = ((loss_grad_fun(theta + dtheta)[0] -
loss_grad_fun(theta - dtheta)[0]) /
(epsilon * 2.0))
assert_almost_equal(numgrad, grad)
def test_lbfgs_classification():
# Test lbfgs on classification.
# It should achieve a score higher than 0.95 for the binary and multi-class
# versions of the digits dataset.
for X, y in classification_datasets:
X_train = X[:150]
y_train = y[:150]
X_test = X[150:]
expected_shape_dtype = (X_test.shape[0], y_train.dtype.kind)
for activation in ACTIVATION_TYPES:
mlp = MLPClassifier(solver='lbfgs', hidden_layer_sizes=50,
max_iter=150, shuffle=True, random_state=1,
activation=activation)
mlp.fit(X_train, y_train)
y_predict = mlp.predict(X_test)
assert_greater(mlp.score(X_train, y_train), 0.95)
assert_equal((y_predict.shape[0], y_predict.dtype.kind),
expected_shape_dtype)
def test_lbfgs_regression():
# Test lbfgs on the boston dataset, a regression problems.
X = Xboston
y = yboston
for activation in ACTIVATION_TYPES:
mlp = MLPRegressor(solver='lbfgs', hidden_layer_sizes=50,
max_iter=150, shuffle=True, random_state=1,
activation=activation)
mlp.fit(X, y)
if activation == 'identity':
assert_greater(mlp.score(X, y), 0.84)
else:
# Non linear models perform much better than linear bottleneck:
assert_greater(mlp.score(X, y), 0.95)
def test_learning_rate_warmstart():
# Tests that warm_start reuse past solutions.
X = [[3, 2], [1, 6], [5, 6], [-2, -4]]
y = [1, 1, 1, 0]
for learning_rate in ["invscaling", "constant"]:
mlp = MLPClassifier(solver='sgd', hidden_layer_sizes=4,
learning_rate=learning_rate, max_iter=1,
power_t=0.25, warm_start=True)
with ignore_warnings(category=ConvergenceWarning):
mlp.fit(X, y)
prev_eta = mlp._optimizer.learning_rate
mlp.fit(X, y)
post_eta = mlp._optimizer.learning_rate
if learning_rate == 'constant':
assert_equal(prev_eta, post_eta)
elif learning_rate == 'invscaling':
assert_equal(mlp.learning_rate_init / pow(8 + 1, mlp.power_t),
post_eta)
def test_multilabel_classification():
# Test that multi-label classification works as expected.
# test fit method
X, y = make_multilabel_classification(n_samples=50, random_state=0,
return_indicator=True)
mlp = MLPClassifier(solver='lbfgs', hidden_layer_sizes=50, alpha=1e-5,
max_iter=150, random_state=0, activation='logistic',
learning_rate_init=0.2)
mlp.fit(X, y)
assert_equal(mlp.score(X, y), 1)
# test partial fit method
mlp = MLPClassifier(solver='sgd', hidden_layer_sizes=50, max_iter=150,
random_state=0, activation='logistic', alpha=1e-5,
learning_rate_init=0.2)
for i in range(100):
mlp.partial_fit(X, y, classes=[0, 1, 2, 3, 4])
assert_greater(mlp.score(X, y), 0.9)
def test_multioutput_regression():
# Test that multi-output regression works as expected
X, y = make_regression(n_samples=200, n_targets=5)
mlp = MLPRegressor(solver='lbfgs', hidden_layer_sizes=50, max_iter=200,
random_state=1)
mlp.fit(X, y)
assert_greater(mlp.score(X, y), 0.9)
def test_partial_fit_classes_error():
# Tests that passing different classes to partial_fit raises an error
X = [[3, 2]]
y = [0]
clf = MLPClassifier(solver='sgd')
clf.partial_fit(X, y, classes=[0, 1])
assert_raises(ValueError, clf.partial_fit, X, y, classes=[1, 2])
def test_partial_fit_classification():
# Test partial_fit on classification.
# `partial_fit` should yield the same results as 'fit' for binary and
# multi-class classification.
for X, y in classification_datasets:
X = X
y = y
mlp = MLPClassifier(solver='sgd', max_iter=100, random_state=1,
tol=0, alpha=1e-5, learning_rate_init=0.2)
with ignore_warnings(category=ConvergenceWarning):
mlp.fit(X, y)
pred1 = mlp.predict(X)
mlp = MLPClassifier(solver='sgd', random_state=1, alpha=1e-5,
learning_rate_init=0.2)
for i in range(100):
mlp.partial_fit(X, y, classes=np.unique(y))
pred2 = mlp.predict(X)
assert_array_equal(pred1, pred2)
assert_greater(mlp.score(X, y), 0.95)
def test_partial_fit_unseen_classes():
# Non regression test for bug 6994
# Tests for labeling errors in partial fit
clf = MLPClassifier(random_state=0)
clf.partial_fit([[1], [2], [3]], ["a", "b", "c"],
classes=["a", "b", "c", "d"])
clf.partial_fit([[4]], ["d"])
assert_greater(clf.score([[1], [2], [3], [4]], ["a", "b", "c", "d"]), 0)
def test_partial_fit_regression():
# Test partial_fit on regression.
# `partial_fit` should yield the same results as 'fit' for regression.
X = Xboston
y = yboston
for momentum in [0, .9]:
mlp = MLPRegressor(solver='sgd', max_iter=100, activation='relu',
random_state=1, learning_rate_init=0.01,
batch_size=X.shape[0], momentum=momentum)
with warnings.catch_warnings(record=True):
# catch convergence warning
mlp.fit(X, y)
pred1 = mlp.predict(X)
mlp = MLPRegressor(solver='sgd', activation='relu',
learning_rate_init=0.01, random_state=1,
batch_size=X.shape[0], momentum=momentum)
for i in range(100):
mlp.partial_fit(X, y)
pred2 = mlp.predict(X)
assert_almost_equal(pred1, pred2, decimal=2)
score = mlp.score(X, y)
assert_greater(score, 0.75)
def test_partial_fit_errors():
# Test partial_fit error handling.
X = [[3, 2], [1, 6]]
y = [1, 0]
# no classes passed
assert_raises(ValueError,
MLPClassifier(solver='sgd').partial_fit, X, y, classes=[2])
# lbfgs doesn't support partial_fit
assert_false(hasattr(MLPClassifier(solver='lbfgs'), 'partial_fit'))
def test_params_errors():
# Test that invalid parameters raise value error
X = [[3, 2], [1, 6]]
y = [1, 0]
clf = MLPClassifier
assert_raises(ValueError, clf(hidden_layer_sizes=-1).fit, X, y)
assert_raises(ValueError, clf(max_iter=-1).fit, X, y)
assert_raises(ValueError, clf(shuffle='true').fit, X, y)
assert_raises(ValueError, clf(alpha=-1).fit, X, y)
assert_raises(ValueError, clf(learning_rate_init=-1).fit, X, y)
assert_raises(ValueError, clf(momentum=2).fit, X, y)
assert_raises(ValueError, clf(momentum=-0.5).fit, X, y)
assert_raises(ValueError, clf(nesterovs_momentum='invalid').fit, X, y)
assert_raises(ValueError, clf(early_stopping='invalid').fit, X, y)
assert_raises(ValueError, clf(validation_fraction=1).fit, X, y)
assert_raises(ValueError, clf(validation_fraction=-0.5).fit, X, y)
assert_raises(ValueError, clf(beta_1=1).fit, X, y)
assert_raises(ValueError, clf(beta_1=-0.5).fit, X, y)
assert_raises(ValueError, clf(beta_2=1).fit, X, y)
assert_raises(ValueError, clf(beta_2=-0.5).fit, X, y)
assert_raises(ValueError, clf(epsilon=-0.5).fit, X, y)
assert_raises(ValueError, clf(solver='hadoken').fit, X, y)
assert_raises(ValueError, clf(learning_rate='converge').fit, X, y)
assert_raises(ValueError, clf(activation='cloak').fit, X, y)
def test_predict_proba_binary():
# Test that predict_proba works as expected for binary class.
X = X_digits_binary[:50]
y = y_digits_binary[:50]
clf = MLPClassifier(hidden_layer_sizes=5)
with ignore_warnings(category=ConvergenceWarning):
clf.fit(X, y)
y_proba = clf.predict_proba(X)
y_log_proba = clf.predict_log_proba(X)
(n_samples, n_classes) = y.shape[0], 2
proba_max = y_proba.argmax(axis=1)
proba_log_max = y_log_proba.argmax(axis=1)
assert_equal(y_proba.shape, (n_samples, n_classes))
assert_array_equal(proba_max, proba_log_max)
assert_array_equal(y_log_proba, np.log(y_proba))
assert_equal(roc_auc_score(y, y_proba[:, 1]), 1.0)
def test_predict_proba_multiclass():
# Test that predict_proba works as expected for multi class.
X = X_digits_multi[:10]
y = y_digits_multi[:10]
clf = MLPClassifier(hidden_layer_sizes=5)
with ignore_warnings(category=ConvergenceWarning):
clf.fit(X, y)
y_proba = clf.predict_proba(X)
y_log_proba = clf.predict_log_proba(X)
(n_samples, n_classes) = y.shape[0], np.unique(y).size
proba_max = y_proba.argmax(axis=1)
proba_log_max = y_log_proba.argmax(axis=1)
assert_equal(y_proba.shape, (n_samples, n_classes))
assert_array_equal(proba_max, proba_log_max)
assert_array_equal(y_log_proba, np.log(y_proba))
def test_predict_proba_multilabel():
# Test that predict_proba works as expected for multilabel.
# Multilabel should not use softmax which makes probabilities sum to 1
X, Y = make_multilabel_classification(n_samples=50, random_state=0,
return_indicator=True)
n_samples, n_classes = Y.shape
clf = MLPClassifier(solver='lbfgs', hidden_layer_sizes=30,
random_state=0)
clf.fit(X, Y)
y_proba = clf.predict_proba(X)
assert_equal(y_proba.shape, (n_samples, n_classes))
assert_array_equal(y_proba > 0.5, Y)
y_log_proba = clf.predict_log_proba(X)
proba_max = y_proba.argmax(axis=1)
proba_log_max = y_log_proba.argmax(axis=1)
assert_greater((y_proba.sum(1) - 1).dot(y_proba.sum(1) - 1), 1e-10)
assert_array_equal(proba_max, proba_log_max)
assert_array_equal(y_log_proba, np.log(y_proba))
def test_sparse_matrices():
# Test that sparse and dense input matrices output the same results.
X = X_digits_binary[:50]
y = y_digits_binary[:50]
X_sparse = csr_matrix(X)
mlp = MLPClassifier(solver='lbfgs', hidden_layer_sizes=15,
random_state=1)
mlp.fit(X, y)
pred1 = mlp.predict(X)
mlp.fit(X_sparse, y)
pred2 = mlp.predict(X_sparse)
assert_almost_equal(pred1, pred2)
pred1 = mlp.predict(X)
pred2 = mlp.predict(X_sparse)
assert_array_equal(pred1, pred2)
def test_tolerance():
# Test tolerance.
# It should force the solver to exit the loop when it converges.
X = [[3, 2], [1, 6]]
y = [1, 0]
clf = MLPClassifier(tol=0.5, max_iter=3000, solver='sgd')
clf.fit(X, y)
assert_greater(clf.max_iter, clf.n_iter_)
def test_verbose_sgd():
# Test verbose.
X = [[3, 2], [1, 6]]
y = [1, 0]
clf = MLPClassifier(solver='sgd', max_iter=2, verbose=10,
hidden_layer_sizes=2)
old_stdout = sys.stdout
sys.stdout = output = StringIO()
with ignore_warnings(category=ConvergenceWarning):
clf.fit(X, y)
clf.partial_fit(X, y)
sys.stdout = old_stdout
assert 'Iteration' in output.getvalue()
def test_early_stopping():
X = X_digits_binary[:100]
y = y_digits_binary[:100]
tol = 0.2
clf = MLPClassifier(tol=tol, max_iter=3000, solver='sgd',
early_stopping=True)
clf.fit(X, y)
assert_greater(clf.max_iter, clf.n_iter_)
valid_scores = clf.validation_scores_
best_valid_score = clf.best_validation_score_
assert_equal(max(valid_scores), best_valid_score)
assert_greater(best_valid_score + tol, valid_scores[-2])
assert_greater(best_valid_score + tol, valid_scores[-1])
def test_adaptive_learning_rate():
X = [[3, 2], [1, 6]]
y = [1, 0]
clf = MLPClassifier(tol=0.5, max_iter=3000, solver='sgd',
learning_rate='adaptive')
clf.fit(X, y)
assert_greater(clf.max_iter, clf.n_iter_)
assert_greater(1e-6, clf._optimizer.learning_rate)
@ignore_warnings(RuntimeError)
def test_warm_start():
X = X_iris
y = y_iris
y_2classes = np.array([0] * 75 + [1] * 75)
y_3classes = np.array([0] * 40 + [1] * 40 + [2] * 70)
y_3classes_alt = np.array([0] * 50 + [1] * 50 + [3] * 50)
y_4classes = np.array([0] * 37 + [1] * 37 + [2] * 38 + [3] * 38)
y_5classes = np.array([0] * 30 + [1] * 30 + [2] * 30 + [3] * 30 + [4] * 30)
# No error raised
clf = MLPClassifier(hidden_layer_sizes=2, solver='lbfgs',
warm_start=True).fit(X, y)
clf.fit(X, y)
clf.fit(X, y_3classes)
for y_i in (y_2classes, y_3classes_alt, y_4classes, y_5classes):
clf = MLPClassifier(hidden_layer_sizes=2, solver='lbfgs',
warm_start=True).fit(X, y)
message = ('warm_start can only be used where `y` has the same '
'classes as in the previous call to fit.'
' Previously got [0 1 2], `y` has %s' % np.unique(y_i))
assert_raise_message(ValueError, message, clf.fit, X, y_i)
| bsd-3-clause |
pratapvardhan/pandas | pandas/tests/sparse/test_groupby.py | 18 | 1736 | # -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import pandas.util.testing as tm
class TestSparseGroupBy(object):
def setup_method(self, method):
self.dense = pd.DataFrame({'A': ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'three',
'two', 'two', 'one', 'three'],
'C': np.random.randn(8),
'D': np.random.randn(8),
'E': [np.nan, np.nan, 1, 2,
np.nan, 1, np.nan, np.nan]})
self.sparse = self.dense.to_sparse()
def test_first_last_nth(self):
# tests for first / last / nth
sparse_grouped = self.sparse.groupby('A')
dense_grouped = self.dense.groupby('A')
tm.assert_frame_equal(sparse_grouped.first(),
dense_grouped.first())
tm.assert_frame_equal(sparse_grouped.last(),
dense_grouped.last())
tm.assert_frame_equal(sparse_grouped.nth(1),
dense_grouped.nth(1))
def test_aggfuncs(self):
sparse_grouped = self.sparse.groupby('A')
dense_grouped = self.dense.groupby('A')
tm.assert_frame_equal(sparse_grouped.mean(),
dense_grouped.mean())
# ToDo: sparse sum includes str column
# tm.assert_frame_equal(sparse_grouped.sum(),
# dense_grouped.sum())
tm.assert_frame_equal(sparse_grouped.count(),
dense_grouped.count())
| bsd-3-clause |
lbishal/scikit-learn | examples/plot_multilabel.py | 236 | 4157 | # Authors: Vlad Niculae, Mathieu Blondel
# License: BSD 3 clause
"""
=========================
Multilabel classification
=========================
This example simulates a multi-label document classification problem. The
dataset is generated randomly based on the following process:
- pick the number of labels: n ~ Poisson(n_labels)
- n times, choose a class c: c ~ Multinomial(theta)
- pick the document length: k ~ Poisson(length)
- k times, choose a word: w ~ Multinomial(theta_c)
In the above process, rejection sampling is used to make sure that n is more
than 2, and that the document length is never zero. Likewise, we reject classes
which have already been chosen. The documents that are assigned to both
classes are plotted surrounded by two colored circles.
The classification is performed by projecting to the first two principal
components found by PCA and CCA for visualisation purposes, followed by using
the :class:`sklearn.multiclass.OneVsRestClassifier` metaclassifier using two
SVCs with linear kernels to learn a discriminative model for each class.
Note that PCA is used to perform an unsupervised dimensionality reduction,
while CCA is used to perform a supervised one.
Note: in the plot, "unlabeled samples" does not mean that we don't know the
labels (as in semi-supervised learning) but that the samples simply do *not*
have a label.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_multilabel_classification
from sklearn.multiclass import OneVsRestClassifier
from sklearn.svm import SVC
from sklearn.preprocessing import LabelBinarizer
from sklearn.decomposition import PCA
from sklearn.cross_decomposition import CCA
def plot_hyperplane(clf, min_x, max_x, linestyle, label):
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(min_x - 5, max_x + 5) # make sure the line is long enough
yy = a * xx - (clf.intercept_[0]) / w[1]
plt.plot(xx, yy, linestyle, label=label)
def plot_subfigure(X, Y, subplot, title, transform):
if transform == "pca":
X = PCA(n_components=2).fit_transform(X)
elif transform == "cca":
X = CCA(n_components=2).fit(X, Y).transform(X)
else:
raise ValueError
min_x = np.min(X[:, 0])
max_x = np.max(X[:, 0])
min_y = np.min(X[:, 1])
max_y = np.max(X[:, 1])
classif = OneVsRestClassifier(SVC(kernel='linear'))
classif.fit(X, Y)
plt.subplot(2, 2, subplot)
plt.title(title)
zero_class = np.where(Y[:, 0])
one_class = np.where(Y[:, 1])
plt.scatter(X[:, 0], X[:, 1], s=40, c='gray')
plt.scatter(X[zero_class, 0], X[zero_class, 1], s=160, edgecolors='b',
facecolors='none', linewidths=2, label='Class 1')
plt.scatter(X[one_class, 0], X[one_class, 1], s=80, edgecolors='orange',
facecolors='none', linewidths=2, label='Class 2')
plot_hyperplane(classif.estimators_[0], min_x, max_x, 'k--',
'Boundary\nfor class 1')
plot_hyperplane(classif.estimators_[1], min_x, max_x, 'k-.',
'Boundary\nfor class 2')
plt.xticks(())
plt.yticks(())
plt.xlim(min_x - .5 * max_x, max_x + .5 * max_x)
plt.ylim(min_y - .5 * max_y, max_y + .5 * max_y)
if subplot == 2:
plt.xlabel('First principal component')
plt.ylabel('Second principal component')
plt.legend(loc="upper left")
plt.figure(figsize=(8, 6))
X, Y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=True,
random_state=1)
plot_subfigure(X, Y, 1, "With unlabeled samples + CCA", "cca")
plot_subfigure(X, Y, 2, "With unlabeled samples + PCA", "pca")
X, Y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
random_state=1)
plot_subfigure(X, Y, 3, "Without unlabeled samples + CCA", "cca")
plot_subfigure(X, Y, 4, "Without unlabeled samples + PCA", "pca")
plt.subplots_adjust(.04, .02, .97, .94, .09, .2)
plt.show()
| bsd-3-clause |
ettm2012/MissionPlanner | Lib/site-packages/scipy/signal/ltisys.py | 53 | 23848 | """
ltisys -- a collection of classes and functions for modeling linear
time invariant systems.
"""
#
# Author: Travis Oliphant 2001
#
# Feb 2010: Warren Weckesser
# Rewrote lsim2 and added impulse2.
#
from filter_design import tf2zpk, zpk2tf, normalize
import numpy
from numpy import product, zeros, array, dot, transpose, ones, \
nan_to_num, zeros_like, linspace
#import scipy.interpolate as interpolate
import scipy.integrate as integrate
import scipy.linalg as linalg
from numpy import r_, eye, real, atleast_1d, atleast_2d, poly, \
squeeze, diag, asarray
def tf2ss(num, den):
"""Transfer function to state-space representation.
Parameters
----------
num, den : array_like
Sequences representing the numerator and denominator
polynomials.
Returns
-------
A, B, C, D : ndarray
State space representation of the system.
"""
# Controller canonical state-space representation.
# if M+1 = len(num) and K+1 = len(den) then we must have M <= K
# states are found by asserting that X(s) = U(s) / D(s)
# then Y(s) = N(s) * X(s)
#
# A, B, C, and D follow quite naturally.
#
num, den = normalize(num, den) # Strips zeros, checks arrays
nn = len(num.shape)
if nn == 1:
num = asarray([num], num.dtype)
M = num.shape[1]
K = len(den)
if (M > K):
raise ValueError("Improper transfer function.")
if (M == 0 or K == 0): # Null system
return array([],float), array([], float), array([], float), \
array([], float)
# pad numerator to have same number of columns has denominator
num = r_['-1',zeros((num.shape[0],K-M), num.dtype), num]
if num.shape[-1] > 0:
D = num[:,0]
else:
D = array([],float)
if K == 1:
return array([], float), array([], float), array([], float), D
frow = -array([den[1:]])
A = r_[frow, eye(K-2, K-1)]
B = eye(K-1, 1)
C = num[:,1:] - num[:,0] * den[1:]
return A, B, C, D
def _none_to_empty(arg):
if arg is None:
return []
else:
return arg
def abcd_normalize(A=None, B=None, C=None, D=None):
"""Check state-space matrices and ensure they are rank-2.
"""
A, B, C, D = map(_none_to_empty, (A, B, C, D))
A, B, C, D = map(atleast_2d, (A, B, C, D))
if ((len(A.shape) > 2) or (len(B.shape) > 2) or \
(len(C.shape) > 2) or (len(D.shape) > 2)):
raise ValueError("A, B, C, D arrays can be no larger than rank-2.")
MA, NA = A.shape
MB, NB = B.shape
MC, NC = C.shape
MD, ND = D.shape
if (MC == 0) and (NC == 0) and (MD != 0) and (NA != 0):
MC, NC = MD, NA
C = zeros((MC, NC))
if (MB == 0) and (NB == 0) and (MA != 0) and (ND != 0):
MB, NB = MA, ND
B = zeros(MB, NB)
if (MD == 0) and (ND == 0) and (MC != 0) and (NB != 0):
MD, ND = MC, NB
D = zeros(MD, ND)
if (MA == 0) and (NA == 0) and (MB != 0) and (NC != 0):
MA, NA = MB, NC
A = zeros(MA, NA)
if MA != NA:
raise ValueError("A must be square.")
if MA != MB:
raise ValueError("A and B must have the same number of rows.")
if NA != NC:
raise ValueError("A and C must have the same number of columns.")
if MD != MC:
raise ValueError("C and D must have the same number of rows.")
if ND != NB:
raise ValueError("B and D must have the same number of columns.")
return A, B, C, D
def ss2tf(A, B, C, D, input=0):
"""State-space to transfer function.
Parameters
----------
A, B, C, D : ndarray
State-space representation of linear system.
input : int, optional
For multiple-input systems, the input to use.
Returns
-------
num, den : 1D ndarray
Numerator and denominator polynomials (as sequences)
respectively.
"""
# transfer function is C (sI - A)**(-1) B + D
A, B, C, D = map(asarray, (A, B, C, D))
# Check consistency and
# make them all rank-2 arrays
A, B, C, D = abcd_normalize(A, B, C, D)
nout, nin = D.shape
if input >= nin:
raise ValueError("System does not have the input specified.")
# make MOSI from possibly MOMI system.
if B.shape[-1] != 0:
B = B[:,input]
B.shape = (B.shape[0],1)
if D.shape[-1] != 0:
D = D[:,input]
try:
den = poly(A)
except ValueError:
den = 1
if (product(B.shape,axis=0) == 0) and (product(C.shape,axis=0) == 0):
num = numpy.ravel(D)
if (product(D.shape,axis=0) == 0) and (product(A.shape,axis=0) == 0):
den = []
return num, den
num_states = A.shape[0]
type_test = A[:,0] + B[:,0] + C[0,:] + D
num = numpy.zeros((nout, num_states+1), type_test.dtype)
for k in range(nout):
Ck = atleast_2d(C[k,:])
num[k] = poly(A - dot(B,Ck)) + (D[k]-1)*den
return num, den
def zpk2ss(z, p, k):
"""Zero-pole-gain representation to state-space representation
Parameters
----------
z, p : sequence
Zeros and poles.
k : float
System gain.
Returns
-------
A, B, C, D : ndarray
State-space matrices.
"""
return tf2ss(*zpk2tf(z,p,k))
def ss2zpk(A, B, C, D, input=0):
"""State-space representation to zero-pole-gain representation.
Parameters
----------
A, B, C, D : ndarray
State-space representation of linear system.
input : int, optional
For multiple-input systems, the input to use.
Returns
-------
z, p : sequence
Zeros and poles.
k : float
System gain.
"""
return tf2zpk(*ss2tf(A,B,C,D,input=input))
class lti(object):
"""Linear Time Invariant class which simplifies representation.
"""
def __init__(self,*args,**kwords):
"""Initialize the LTI system using either:
(numerator, denominator)
(zeros, poles, gain)
(A, B, C, D) -- state-space.
"""
N = len(args)
if N == 2: # Numerator denominator transfer function input
self.__dict__['num'], self.__dict__['den'] = normalize(*args)
self.__dict__['zeros'], self.__dict__['poles'], \
self.__dict__['gain'] = tf2zpk(*args)
self.__dict__['A'], self.__dict__['B'], \
self.__dict__['C'], \
self.__dict__['D'] = tf2ss(*args)
self.inputs = 1
if len(self.num.shape) > 1:
self.outputs = self.num.shape[0]
else:
self.outputs = 1
elif N == 3: # Zero-pole-gain form
self.__dict__['zeros'], self.__dict__['poles'], \
self.__dict__['gain'] = args
self.__dict__['num'], self.__dict__['den'] = zpk2tf(*args)
self.__dict__['A'], self.__dict__['B'], \
self.__dict__['C'], \
self.__dict__['D'] = zpk2ss(*args)
self.inputs = 1
if len(self.zeros.shape) > 1:
self.outputs = self.zeros.shape[0]
else:
self.outputs = 1
elif N == 4: # State-space form
self.__dict__['A'], self.__dict__['B'], \
self.__dict__['C'], \
self.__dict__['D'] = abcd_normalize(*args)
self.__dict__['zeros'], self.__dict__['poles'], \
self.__dict__['gain'] = ss2zpk(*args)
self.__dict__['num'], self.__dict__['den'] = ss2tf(*args)
self.inputs = self.B.shape[-1]
self.outputs = self.C.shape[0]
else:
raise ValueError("Needs 2, 3, or 4 arguments.")
def __setattr__(self, attr, val):
if attr in ['num','den']:
self.__dict__[attr] = val
self.__dict__['zeros'], self.__dict__['poles'], \
self.__dict__['gain'] = \
tf2zpk(self.num, self.den)
self.__dict__['A'], self.__dict__['B'], \
self.__dict__['C'], \
self.__dict__['D'] = \
tf2ss(self.num, self.den)
elif attr in ['zeros', 'poles', 'gain']:
self.__dict__[attr] = val
self.__dict__['num'], self.__dict__['den'] = \
zpk2tf(self.zeros,
self.poles, self.gain)
self.__dict__['A'], self.__dict__['B'], \
self.__dict__['C'], \
self.__dict__['D'] = \
zpk2ss(self.zeros,
self.poles, self.gain)
elif attr in ['A', 'B', 'C', 'D']:
self.__dict__[attr] = val
self.__dict__['zeros'], self.__dict__['poles'], \
self.__dict__['gain'] = \
ss2zpk(self.A, self.B,
self.C, self.D)
self.__dict__['num'], self.__dict__['den'] = \
ss2tf(self.A, self.B,
self.C, self.D)
else:
self.__dict__[attr] = val
def impulse(self, X0=None, T=None, N=None):
return impulse(self, X0=X0, T=T, N=N)
def step(self, X0=None, T=None, N=None):
return step(self, X0=X0, T=T, N=N)
def output(self, U, T, X0=None):
return lsim(self, U, T, X0=X0)
def lsim2(system, U=None, T=None, X0=None, **kwargs):
"""
Simulate output of a continuous-time linear system, by using
the ODE solver `scipy.integrate.odeint`.
Parameters
----------
system : an instance of the LTI class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 2: (num, den)
* 3: (zeros, poles, gain)
* 4: (A, B, C, D)
U : array_like (1D or 2D), optional
An input array describing the input at each time T. Linear
interpolation is used between given times. If there are
multiple inputs, then each column of the rank-2 array
represents an input. If U is not given, the input is assumed
to be zero.
T : array_like (1D or 2D), optional
The time steps at which the input is defined and at which the
output is desired. The default is 101 evenly spaced points on
the interval [0,10.0].
X0 : array_like (1D), optional
The initial condition of the state vector. If `X0` is not
given, the initial conditions are assumed to be 0.
kwargs : dict
Additional keyword arguments are passed on to the function
odeint. See the notes below for more details.
Returns
-------
T : 1D ndarray
The time values for the output.
yout : ndarray
The response of the system.
xout : ndarray
The time-evolution of the state-vector.
Notes
-----
This function uses :func:`scipy.integrate.odeint` to solve the
system's differential equations. Additional keyword arguments
given to `lsim2` are passed on to `odeint`. See the documentation
for :func:`scipy.integrate.odeint` for the full list of arguments.
"""
if isinstance(system, lti):
sys = system
else:
sys = lti(*system)
if X0 is None:
X0 = zeros(sys.B.shape[0],sys.A.dtype)
if T is None:
# XXX T should really be a required argument, but U was
# changed from a required positional argument to a keyword,
# and T is after U in the argument list. So we either: change
# the API and move T in front of U; check here for T being
# None and raise an excpetion; or assign a default value to T
# here. This code implements the latter.
T = linspace(0, 10.0, 101)
T = atleast_1d(T)
if len(T.shape) != 1:
raise ValueError("T must be a rank-1 array.")
if U is not None:
U = atleast_1d(U)
if len(U.shape) == 1:
U = U.reshape(-1,1)
sU = U.shape
if sU[0] != len(T):
raise ValueError("U must have the same number of rows "
"as elements in T.")
if sU[1] != sys.inputs:
raise ValueError("The number of inputs in U (%d) is not "
"compatible with the number of system "
"inputs (%d)" % (sU[1], sys.inputs))
# Create a callable that uses linear interpolation to
# calculate the input at any time.
ufunc = interpolate.interp1d(T, U, kind='linear',
axis=0, bounds_error=False)
def fprime(x, t, sys, ufunc):
"""The vector field of the linear system."""
return dot(sys.A,x) + squeeze(dot(sys.B,nan_to_num(ufunc([t]))))
xout = integrate.odeint(fprime, X0, T, args=(sys, ufunc), **kwargs)
yout = dot(sys.C,transpose(xout)) + dot(sys.D,transpose(U))
else:
def fprime(x, t, sys):
"""The vector field of the linear system."""
return dot(sys.A,x)
xout = integrate.odeint(fprime, X0, T, args=(sys,), **kwargs)
yout = dot(sys.C,transpose(xout))
return T, squeeze(transpose(yout)), xout
def lsim(system, U, T, X0=None, interp=1):
"""
Simulate output of a continuous-time linear system.
Parameters
----------
system : an instance of the LTI class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 2: (num, den)
* 3: (zeros, poles, gain)
* 4: (A, B, C, D)
U : array_like
An input array describing the input at each time `T`
(interpolation is assumed between given times). If there are
multiple inputs, then each column of the rank-2 array
represents an input.
T : array_like
The time steps at which the input is defined and at which the
output is desired.
X0 :
The initial conditions on the state vector (zero by default).
interp : {1, 0}
Whether to use linear (1) or zero-order hold (0) interpolation.
Returns
-------
T : 1D ndarray
Time values for the output.
yout : 1D ndarray
System response.
xout : ndarray
Time-evolution of the state-vector.
"""
# system is an lti system or a sequence
# with 2 (num, den)
# 3 (zeros, poles, gain)
# 4 (A, B, C, D)
# describing the system
# U is an input vector at times T
# if system describes multiple inputs
# then U can be a rank-2 array with the number of columns
# being the number of inputs
if isinstance(system, lti):
sys = system
else:
sys = lti(*system)
U = atleast_1d(U)
T = atleast_1d(T)
if len(U.shape) == 1:
U = U.reshape((U.shape[0],1))
sU = U.shape
if len(T.shape) != 1:
raise ValueError("T must be a rank-1 array.")
if sU[0] != len(T):
raise ValueError("U must have the same number of rows "
"as elements in T.")
if sU[1] != sys.inputs:
raise ValueError("System does not define that many inputs.")
if X0 is None:
X0 = zeros(sys.B.shape[0], sys.A.dtype)
xout = zeros((len(T),sys.B.shape[0]), sys.A.dtype)
xout[0] = X0
A = sys.A
AT, BT = transpose(sys.A), transpose(sys.B)
dt = T[1]-T[0]
lam, v = linalg.eig(A)
vt = transpose(v)
vti = linalg.inv(vt)
GT = dot(dot(vti,diag(numpy.exp(dt*lam))),vt).astype(xout.dtype)
ATm1 = linalg.inv(AT)
ATm2 = dot(ATm1,ATm1)
I = eye(A.shape[0],dtype=A.dtype)
GTmI = GT-I
F1T = dot(dot(BT,GTmI),ATm1)
if interp:
F2T = dot(BT,dot(GTmI,ATm2)/dt - ATm1)
for k in xrange(1,len(T)):
dt1 = T[k] - T[k-1]
if dt1 != dt:
dt = dt1
GT = dot(dot(vti,diag(numpy.exp(dt*lam))),vt).astype(xout.dtype)
GTmI = GT-I
F1T = dot(dot(BT,GTmI),ATm1)
if interp:
F2T = dot(BT,dot(GTmI,ATm2)/dt - ATm1)
xout[k] = dot(xout[k-1],GT) + dot(U[k-1],F1T)
if interp:
xout[k] = xout[k] + dot((U[k]-U[k-1]),F2T)
yout = squeeze(dot(U,transpose(sys.D))) + squeeze(dot(xout,transpose(sys.C)))
return T, squeeze(yout), squeeze(xout)
def _default_response_times(A, n):
"""Compute a reasonable set of time samples for the response time.
This function is used by `impulse`, `impulse2`, `step` and `step2`
to compute the response time when the `T` argument to the function
is None.
Parameters
----------
A : ndarray
The system matrix, which is square.
n : int
The number of time samples to generate.
Returns
-------
t : ndarray
The 1-D array of length `n` of time samples at which the response
is to be computed.
"""
# Create a reasonable time interval. This could use some more work.
# For example, what is expected when the system is unstable?
vals = linalg.eigvals(A)
r = min(abs(real(vals)))
if r == 0.0:
r = 1.0
tc = 1.0 / r
t = linspace(0.0, 7*tc, n)
return t
def impulse(system, X0=None, T=None, N=None):
"""Impulse response of continuous-time system.
Parameters
----------
system : LTI class or tuple
If specified as a tuple, the system is described as
``(num, den)``, ``(zero, pole, gain)``, or ``(A, B, C, D)``.
X0 : array_like, optional
Initial state-vector. Defaults to zero.
T : array_like, optional
Time points. Computed if not given.
N : int, optional
The number of time points to compute (if `T` is not given).
Returns
-------
T : ndarray
A 1-D array of time points.
yout : ndarray
A 1-D array containing the impulse response of the system (except for
singularities at zero).
"""
if isinstance(system, lti):
sys = system
else:
sys = lti(*system)
if X0 is None:
B = sys.B
else:
B = sys.B + X0
if N is None:
N = 100
if T is None:
T = _default_response_times(sys.A, N)
h = zeros(T.shape, sys.A.dtype)
s,v = linalg.eig(sys.A)
vi = linalg.inv(v)
C = sys.C
for k in range(len(h)):
es = diag(numpy.exp(s*T[k]))
eA = (dot(dot(v,es),vi)).astype(h.dtype)
h[k] = squeeze(dot(dot(C,eA),B))
return T, h
def impulse2(system, X0=None, T=None, N=None, **kwargs):
"""
Impulse response of a single-input, continuous-time linear system.
Parameters
----------
system : an instance of the LTI class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
2 (num, den)
3 (zeros, poles, gain)
4 (A, B, C, D)
T : 1-D array_like, optional
The time steps at which the input is defined and at which the
output is desired. If `T` is not given, the function will
generate a set of time samples automatically.
X0 : 1-D array_like, optional
The initial condition of the state vector. Default: 0 (the
zero vector).
N : int, optional
Number of time points to compute. Default: 100.
kwargs : various types
Additional keyword arguments are passed on to the function
`scipy.signal.lsim2`, which in turn passes them on to
`scipy.integrate.odeint`; see the latter's documentation for
information about these arguments.
Returns
-------
T : ndarray
The time values for the output.
yout : ndarray
The output response of the system.
See Also
--------
impulse, lsim2, integrate.odeint
Notes
-----
The solution is generated by calling `scipy.signal.lsim2`, which uses
the differential equation solver `scipy.integrate.odeint`.
.. versionadded:: 0.8.0
Examples
--------
Second order system with a repeated root: x''(t) + 2*x(t) + x(t) = u(t)
>>> system = ([1.0], [1.0, 2.0, 1.0])
>>> t, y = impulse2(system)
>>> import matplotlib.pyplot as plt
>>> plt.plot(t, y)
"""
if isinstance(system, lti):
sys = system
else:
sys = lti(*system)
B = sys.B
if B.shape[-1] != 1:
raise ValueError("impulse2() requires a single-input system.")
B = B.squeeze()
if X0 is None:
X0 = zeros_like(B)
if N is None:
N = 100
if T is None:
T = _default_response_times(sys.A, N)
# Move the impulse in the input to the initial conditions, and then
# solve using lsim2().
U = zeros_like(T)
ic = B + X0
Tr, Yr, Xr = lsim2(sys, U, T, ic, **kwargs)
return Tr, Yr
def step(system, X0=None, T=None, N=None):
"""Step response of continuous-time system.
Parameters
----------
system : an instance of the LTI class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation.
2 (num, den)
3 (zeros, poles, gain)
4 (A, B, C, D)
X0 : array_like, optional
Initial state-vector (default is zero).
T : array_like, optional
Time points (computed if not given).
N : int
Number of time points to compute if `T` is not given.
Returns
-------
T : 1D ndarray
Output time points.
yout : 1D ndarray
Step response of system.
See also
--------
scipy.signal.step2
"""
if isinstance(system, lti):
sys = system
else:
sys = lti(*system)
if N is None:
N = 100
if T is None:
T = _default_response_times(sys.A, N)
U = ones(T.shape, sys.A.dtype)
vals = lsim(sys, U, T, X0=X0)
return vals[0], vals[1]
def step2(system, X0=None, T=None, N=None, **kwargs):
"""Step response of continuous-time system.
This function is functionally the same as `scipy.signal.step`, but
it uses the function `scipy.signal.lsim2` to compute the step
response.
Parameters
----------
system : an instance of the LTI class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation.
2 (num, den)
3 (zeros, poles, gain)
4 (A, B, C, D)
X0 : array_like, optional
Initial state-vector (default is zero).
T : array_like, optional
Time points (computed if not given).
N : int
Number of time points to compute if `T` is not given.
**kwargs :
Additional keyword arguments are passed on the function
`scipy.signal.lsim2`, which in turn passes them on to
:func:`scipy.integrate.odeint`. See the documentation for
:func:`scipy.integrate.odeint` for information about these
arguments.
Returns
-------
T : 1D ndarray
Output time points.
yout : 1D ndarray
Step response of system.
See also
--------
scipy.signal.step
Notes
-----
.. versionadded:: 0.8.0
"""
if isinstance(system, lti):
sys = system
else:
sys = lti(*system)
if N is None:
N = 100
if T is None:
T = _default_response_times(sys.A, N)
U = ones(T.shape, sys.A.dtype)
vals = lsim2(sys, U, T, X0=X0, **kwargs)
return vals[0], vals[1]
| gpl-3.0 |
vortex-ape/scikit-learn | sklearn/cluster/birch.py | 4 | 23758 | # Authors: Manoj Kumar <manojkumarsivaraj334@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Joel Nothman <joel.nothman@gmail.com>
# License: BSD 3 clause
from __future__ import division
import warnings
import numpy as np
from scipy import sparse
from math import sqrt
from ..metrics.pairwise import euclidean_distances
from ..base import TransformerMixin, ClusterMixin, BaseEstimator
from ..externals.six.moves import xrange
from ..utils import check_array
from ..utils.extmath import row_norms, safe_sparse_dot
from ..utils.validation import check_is_fitted
from ..exceptions import NotFittedError, ConvergenceWarning
from .hierarchical import AgglomerativeClustering
def _iterate_sparse_X(X):
"""This little hack returns a densified row when iterating over a sparse
matrix, instead of constructing a sparse matrix for every row that is
expensive.
"""
n_samples = X.shape[0]
X_indices = X.indices
X_data = X.data
X_indptr = X.indptr
for i in xrange(n_samples):
row = np.zeros(X.shape[1])
startptr, endptr = X_indptr[i], X_indptr[i + 1]
nonzero_indices = X_indices[startptr:endptr]
row[nonzero_indices] = X_data[startptr:endptr]
yield row
def _split_node(node, threshold, branching_factor):
"""The node has to be split if there is no place for a new subcluster
in the node.
1. Two empty nodes and two empty subclusters are initialized.
2. The pair of distant subclusters are found.
3. The properties of the empty subclusters and nodes are updated
according to the nearest distance between the subclusters to the
pair of distant subclusters.
4. The two nodes are set as children to the two subclusters.
"""
new_subcluster1 = _CFSubcluster()
new_subcluster2 = _CFSubcluster()
new_node1 = _CFNode(
threshold, branching_factor, is_leaf=node.is_leaf,
n_features=node.n_features)
new_node2 = _CFNode(
threshold, branching_factor, is_leaf=node.is_leaf,
n_features=node.n_features)
new_subcluster1.child_ = new_node1
new_subcluster2.child_ = new_node2
if node.is_leaf:
if node.prev_leaf_ is not None:
node.prev_leaf_.next_leaf_ = new_node1
new_node1.prev_leaf_ = node.prev_leaf_
new_node1.next_leaf_ = new_node2
new_node2.prev_leaf_ = new_node1
new_node2.next_leaf_ = node.next_leaf_
if node.next_leaf_ is not None:
node.next_leaf_.prev_leaf_ = new_node2
dist = euclidean_distances(
node.centroids_, Y_norm_squared=node.squared_norm_, squared=True)
n_clusters = dist.shape[0]
farthest_idx = np.unravel_index(
dist.argmax(), (n_clusters, n_clusters))
node1_dist, node2_dist = dist[(farthest_idx,)]
node1_closer = node1_dist < node2_dist
for idx, subcluster in enumerate(node.subclusters_):
if node1_closer[idx]:
new_node1.append_subcluster(subcluster)
new_subcluster1.update(subcluster)
else:
new_node2.append_subcluster(subcluster)
new_subcluster2.update(subcluster)
return new_subcluster1, new_subcluster2
class _CFNode(object):
"""Each node in a CFTree is called a CFNode.
The CFNode can have a maximum of branching_factor
number of CFSubclusters.
Parameters
----------
threshold : float
Threshold needed for a new subcluster to enter a CFSubcluster.
branching_factor : int
Maximum number of CF subclusters in each node.
is_leaf : bool
We need to know if the CFNode is a leaf or not, in order to
retrieve the final subclusters.
n_features : int
The number of features.
Attributes
----------
subclusters_ : array-like
list of subclusters for a particular CFNode.
prev_leaf_ : _CFNode
prev_leaf. Useful only if is_leaf is True.
next_leaf_ : _CFNode
next_leaf. Useful only if is_leaf is True.
the final subclusters.
init_centroids_ : ndarray, shape (branching_factor + 1, n_features)
manipulate ``init_centroids_`` throughout rather than centroids_ since
the centroids are just a view of the ``init_centroids_`` .
init_sq_norm_ : ndarray, shape (branching_factor + 1,)
manipulate init_sq_norm_ throughout. similar to ``init_centroids_``.
centroids_ : ndarray
view of ``init_centroids_``.
squared_norm_ : ndarray
view of ``init_sq_norm_``.
"""
def __init__(self, threshold, branching_factor, is_leaf, n_features):
self.threshold = threshold
self.branching_factor = branching_factor
self.is_leaf = is_leaf
self.n_features = n_features
# The list of subclusters, centroids and squared norms
# to manipulate throughout.
self.subclusters_ = []
self.init_centroids_ = np.zeros((branching_factor + 1, n_features))
self.init_sq_norm_ = np.zeros((branching_factor + 1))
self.squared_norm_ = []
self.prev_leaf_ = None
self.next_leaf_ = None
def append_subcluster(self, subcluster):
n_samples = len(self.subclusters_)
self.subclusters_.append(subcluster)
self.init_centroids_[n_samples] = subcluster.centroid_
self.init_sq_norm_[n_samples] = subcluster.sq_norm_
# Keep centroids and squared norm as views. In this way
# if we change init_centroids and init_sq_norm_, it is
# sufficient,
self.centroids_ = self.init_centroids_[:n_samples + 1, :]
self.squared_norm_ = self.init_sq_norm_[:n_samples + 1]
def update_split_subclusters(self, subcluster,
new_subcluster1, new_subcluster2):
"""Remove a subcluster from a node and update it with the
split subclusters.
"""
ind = self.subclusters_.index(subcluster)
self.subclusters_[ind] = new_subcluster1
self.init_centroids_[ind] = new_subcluster1.centroid_
self.init_sq_norm_[ind] = new_subcluster1.sq_norm_
self.append_subcluster(new_subcluster2)
def insert_cf_subcluster(self, subcluster):
"""Insert a new subcluster into the node."""
if not self.subclusters_:
self.append_subcluster(subcluster)
return False
threshold = self.threshold
branching_factor = self.branching_factor
# We need to find the closest subcluster among all the
# subclusters so that we can insert our new subcluster.
dist_matrix = np.dot(self.centroids_, subcluster.centroid_)
dist_matrix *= -2.
dist_matrix += self.squared_norm_
closest_index = np.argmin(dist_matrix)
closest_subcluster = self.subclusters_[closest_index]
# If the subcluster has a child, we need a recursive strategy.
if closest_subcluster.child_ is not None:
split_child = closest_subcluster.child_.insert_cf_subcluster(
subcluster)
if not split_child:
# If it is determined that the child need not be split, we
# can just update the closest_subcluster
closest_subcluster.update(subcluster)
self.init_centroids_[closest_index] = \
self.subclusters_[closest_index].centroid_
self.init_sq_norm_[closest_index] = \
self.subclusters_[closest_index].sq_norm_
return False
# things not too good. we need to redistribute the subclusters in
# our child node, and add a new subcluster in the parent
# subcluster to accommodate the new child.
else:
new_subcluster1, new_subcluster2 = _split_node(
closest_subcluster.child_, threshold, branching_factor)
self.update_split_subclusters(
closest_subcluster, new_subcluster1, new_subcluster2)
if len(self.subclusters_) > self.branching_factor:
return True
return False
# good to go!
else:
merged = closest_subcluster.merge_subcluster(
subcluster, self.threshold)
if merged:
self.init_centroids_[closest_index] = \
closest_subcluster.centroid_
self.init_sq_norm_[closest_index] = \
closest_subcluster.sq_norm_
return False
# not close to any other subclusters, and we still
# have space, so add.
elif len(self.subclusters_) < self.branching_factor:
self.append_subcluster(subcluster)
return False
# We do not have enough space nor is it closer to an
# other subcluster. We need to split.
else:
self.append_subcluster(subcluster)
return True
class _CFSubcluster(object):
"""Each subcluster in a CFNode is called a CFSubcluster.
A CFSubcluster can have a CFNode has its child.
Parameters
----------
linear_sum : ndarray, shape (n_features,), optional
Sample. This is kept optional to allow initialization of empty
subclusters.
Attributes
----------
n_samples_ : int
Number of samples that belong to each subcluster.
linear_sum_ : ndarray
Linear sum of all the samples in a subcluster. Prevents holding
all sample data in memory.
squared_sum_ : float
Sum of the squared l2 norms of all samples belonging to a subcluster.
centroid_ : ndarray
Centroid of the subcluster. Prevent recomputing of centroids when
``CFNode.centroids_`` is called.
child_ : _CFNode
Child Node of the subcluster. Once a given _CFNode is set as the child
of the _CFNode, it is set to ``self.child_``.
sq_norm_ : ndarray
Squared norm of the subcluster. Used to prevent recomputing when
pairwise minimum distances are computed.
"""
def __init__(self, linear_sum=None):
if linear_sum is None:
self.n_samples_ = 0
self.squared_sum_ = 0.0
self.linear_sum_ = 0
else:
self.n_samples_ = 1
self.centroid_ = self.linear_sum_ = linear_sum
self.squared_sum_ = self.sq_norm_ = np.dot(
self.linear_sum_, self.linear_sum_)
self.child_ = None
def update(self, subcluster):
self.n_samples_ += subcluster.n_samples_
self.linear_sum_ += subcluster.linear_sum_
self.squared_sum_ += subcluster.squared_sum_
self.centroid_ = self.linear_sum_ / self.n_samples_
self.sq_norm_ = np.dot(self.centroid_, self.centroid_)
def merge_subcluster(self, nominee_cluster, threshold):
"""Check if a cluster is worthy enough to be merged. If
yes then merge.
"""
new_ss = self.squared_sum_ + nominee_cluster.squared_sum_
new_ls = self.linear_sum_ + nominee_cluster.linear_sum_
new_n = self.n_samples_ + nominee_cluster.n_samples_
new_centroid = (1 / new_n) * new_ls
new_norm = np.dot(new_centroid, new_centroid)
dot_product = (-2 * new_n) * new_norm
sq_radius = (new_ss + dot_product) / new_n + new_norm
if sq_radius <= threshold ** 2:
(self.n_samples_, self.linear_sum_, self.squared_sum_,
self.centroid_, self.sq_norm_) = \
new_n, new_ls, new_ss, new_centroid, new_norm
return True
return False
@property
def radius(self):
"""Return radius of the subcluster"""
dot_product = -2 * np.dot(self.linear_sum_, self.centroid_)
return sqrt(
((self.squared_sum_ + dot_product) / self.n_samples_) +
self.sq_norm_)
class Birch(BaseEstimator, TransformerMixin, ClusterMixin):
"""Implements the Birch clustering algorithm.
It is a memory-efficient, online-learning algorithm provided as an
alternative to :class:`MiniBatchKMeans`. It constructs a tree
data structure with the cluster centroids being read off the leaf.
These can be either the final cluster centroids or can be provided as input
to another clustering algorithm such as :class:`AgglomerativeClustering`.
Read more in the :ref:`User Guide <birch>`.
Parameters
----------
threshold : float, default 0.5
The radius of the subcluster obtained by merging a new sample and the
closest subcluster should be lesser than the threshold. Otherwise a new
subcluster is started. Setting this value to be very low promotes
splitting and vice-versa.
branching_factor : int, default 50
Maximum number of CF subclusters in each node. If a new samples enters
such that the number of subclusters exceed the branching_factor then
that node is split into two nodes with the subclusters redistributed
in each. The parent subcluster of that node is removed and two new
subclusters are added as parents of the 2 split nodes.
n_clusters : int, instance of sklearn.cluster model, default 3
Number of clusters after the final clustering step, which treats the
subclusters from the leaves as new samples.
- `None` : the final clustering step is not performed and the
subclusters are returned as they are.
- `sklearn.cluster` Estimator : If a model is provided, the model is
fit treating the subclusters as new samples and the initial data is
mapped to the label of the closest subcluster.
- `int` : the model fit is :class:`AgglomerativeClustering` with
`n_clusters` set to be equal to the int.
compute_labels : bool, default True
Whether or not to compute labels for each fit.
copy : bool, default True
Whether or not to make a copy of the given data. If set to False,
the initial data will be overwritten.
Attributes
----------
root_ : _CFNode
Root of the CFTree.
dummy_leaf_ : _CFNode
Start pointer to all the leaves.
subcluster_centers_ : ndarray,
Centroids of all subclusters read directly from the leaves.
subcluster_labels_ : ndarray,
Labels assigned to the centroids of the subclusters after
they are clustered globally.
labels_ : ndarray, shape (n_samples,)
Array of labels assigned to the input data.
if partial_fit is used instead of fit, they are assigned to the
last batch of data.
Examples
--------
>>> from sklearn.cluster import Birch
>>> X = [[0, 1], [0.3, 1], [-0.3, 1], [0, -1], [0.3, -1], [-0.3, -1]]
>>> brc = Birch(branching_factor=50, n_clusters=None, threshold=0.5,
... compute_labels=True)
>>> brc.fit(X) # doctest: +NORMALIZE_WHITESPACE
Birch(branching_factor=50, compute_labels=True, copy=True, n_clusters=None,
threshold=0.5)
>>> brc.predict(X)
array([0, 0, 0, 1, 1, 1])
References
----------
* Tian Zhang, Raghu Ramakrishnan, Maron Livny
BIRCH: An efficient data clustering method for large databases.
http://www.cs.sfu.ca/CourseCentral/459/han/papers/zhang96.pdf
* Roberto Perdisci
JBirch - Java implementation of BIRCH clustering algorithm
https://code.google.com/archive/p/jbirch
Notes
-----
The tree data structure consists of nodes with each node consisting of
a number of subclusters. The maximum number of subclusters in a node
is determined by the branching factor. Each subcluster maintains a
linear sum, squared sum and the number of samples in that subcluster.
In addition, each subcluster can also have a node as its child, if the
subcluster is not a member of a leaf node.
For a new point entering the root, it is merged with the subcluster closest
to it and the linear sum, squared sum and the number of samples of that
subcluster are updated. This is done recursively till the properties of
the leaf node are updated.
"""
def __init__(self, threshold=0.5, branching_factor=50, n_clusters=3,
compute_labels=True, copy=True):
self.threshold = threshold
self.branching_factor = branching_factor
self.n_clusters = n_clusters
self.compute_labels = compute_labels
self.copy = copy
def fit(self, X, y=None):
"""
Build a CF Tree for the input data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data.
y : Ignored
"""
self.fit_, self.partial_fit_ = True, False
return self._fit(X)
def _fit(self, X):
X = check_array(X, accept_sparse='csr', copy=self.copy)
threshold = self.threshold
branching_factor = self.branching_factor
if branching_factor <= 1:
raise ValueError("Branching_factor should be greater than one.")
n_samples, n_features = X.shape
# If partial_fit is called for the first time or fit is called, we
# start a new tree.
partial_fit = getattr(self, 'partial_fit_')
has_root = getattr(self, 'root_', None)
if getattr(self, 'fit_') or (partial_fit and not has_root):
# The first root is the leaf. Manipulate this object throughout.
self.root_ = _CFNode(threshold, branching_factor, is_leaf=True,
n_features=n_features)
# To enable getting back subclusters.
self.dummy_leaf_ = _CFNode(threshold, branching_factor,
is_leaf=True, n_features=n_features)
self.dummy_leaf_.next_leaf_ = self.root_
self.root_.prev_leaf_ = self.dummy_leaf_
# Cannot vectorize. Enough to convince to use cython.
if not sparse.issparse(X):
iter_func = iter
else:
iter_func = _iterate_sparse_X
for sample in iter_func(X):
subcluster = _CFSubcluster(linear_sum=sample)
split = self.root_.insert_cf_subcluster(subcluster)
if split:
new_subcluster1, new_subcluster2 = _split_node(
self.root_, threshold, branching_factor)
del self.root_
self.root_ = _CFNode(threshold, branching_factor,
is_leaf=False,
n_features=n_features)
self.root_.append_subcluster(new_subcluster1)
self.root_.append_subcluster(new_subcluster2)
centroids = np.concatenate([
leaf.centroids_ for leaf in self._get_leaves()])
self.subcluster_centers_ = centroids
self._global_clustering(X)
return self
def _get_leaves(self):
"""
Retrieve the leaves of the CF Node.
Returns
-------
leaves : array-like
List of the leaf nodes.
"""
leaf_ptr = self.dummy_leaf_.next_leaf_
leaves = []
while leaf_ptr is not None:
leaves.append(leaf_ptr)
leaf_ptr = leaf_ptr.next_leaf_
return leaves
def partial_fit(self, X=None, y=None):
"""
Online learning. Prevents rebuilding of CFTree from scratch.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features), None
Input data. If X is not provided, only the global clustering
step is done.
y : Ignored
"""
self.partial_fit_, self.fit_ = True, False
if X is None:
# Perform just the final global clustering step.
self._global_clustering()
return self
else:
self._check_fit(X)
return self._fit(X)
def _check_fit(self, X):
is_fitted = hasattr(self, 'subcluster_centers_')
# Called by partial_fit, before fitting.
has_partial_fit = hasattr(self, 'partial_fit_')
# Should raise an error if one does not fit before predicting.
if not (is_fitted or has_partial_fit):
raise NotFittedError("Fit training data before predicting")
if is_fitted and X.shape[1] != self.subcluster_centers_.shape[1]:
raise ValueError(
"Training data and predicted data do "
"not have same number of features.")
def predict(self, X):
"""
Predict data using the ``centroids_`` of subclusters.
Avoid computation of the row norms of X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data.
Returns
-------
labels : ndarray, shape(n_samples)
Labelled data.
"""
X = check_array(X, accept_sparse='csr')
self._check_fit(X)
reduced_distance = safe_sparse_dot(X, self.subcluster_centers_.T)
reduced_distance *= -2
reduced_distance += self._subcluster_norms
return self.subcluster_labels_[np.argmin(reduced_distance, axis=1)]
def transform(self, X):
"""
Transform X into subcluster centroids dimension.
Each dimension represents the distance from the sample point to each
cluster centroid.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data.
Returns
-------
X_trans : {array-like, sparse matrix}, shape (n_samples, n_clusters)
Transformed data.
"""
check_is_fitted(self, 'subcluster_centers_')
return euclidean_distances(X, self.subcluster_centers_)
def _global_clustering(self, X=None):
"""
Global clustering for the subclusters obtained after fitting
"""
clusterer = self.n_clusters
centroids = self.subcluster_centers_
compute_labels = (X is not None) and self.compute_labels
# Preprocessing for the global clustering.
not_enough_centroids = False
if isinstance(clusterer, int):
clusterer = AgglomerativeClustering(
n_clusters=self.n_clusters)
# There is no need to perform the global clustering step.
if len(centroids) < self.n_clusters:
not_enough_centroids = True
elif (clusterer is not None and not
hasattr(clusterer, 'fit_predict')):
raise ValueError("n_clusters should be an instance of "
"ClusterMixin or an int")
# To use in predict to avoid recalculation.
self._subcluster_norms = row_norms(
self.subcluster_centers_, squared=True)
if clusterer is None or not_enough_centroids:
self.subcluster_labels_ = np.arange(len(centroids))
if not_enough_centroids:
warnings.warn(
"Number of subclusters found (%d) by Birch is less "
"than (%d). Decrease the threshold."
% (len(centroids), self.n_clusters), ConvergenceWarning)
else:
# The global clustering step that clusters the subclusters of
# the leaves. It assumes the centroids of the subclusters as
# samples and finds the final centroids.
self.subcluster_labels_ = clusterer.fit_predict(
self.subcluster_centers_)
if compute_labels:
self.labels_ = self.predict(X)
| bsd-3-clause |
rdhyee/PyTables | doc/sphinxext/plot_directive.py | 65 | 20399 | """
A special directive for generating a matplotlib plot.
.. warning::
This is a hacked version of plot_directive.py from Matplotlib.
It's very much subject to change!
Usage
-----
Can be used like this::
.. plot:: examples/example.py
.. plot::
import matplotlib.pyplot as plt
plt.plot([1,2,3], [4,5,6])
.. plot::
A plotting example:
>>> import matplotlib.pyplot as plt
>>> plt.plot([1,2,3], [4,5,6])
The content is interpreted as doctest formatted if it has a line starting
with ``>>>``.
The ``plot`` directive supports the options
format : {'python', 'doctest'}
Specify the format of the input
include-source : bool
Whether to display the source code. Default can be changed in conf.py
and the ``image`` directive options ``alt``, ``height``, ``width``,
``scale``, ``align``, ``class``.
Configuration options
---------------------
The plot directive has the following configuration options:
plot_include_source
Default value for the include-source option
plot_pre_code
Code that should be executed before each plot.
plot_basedir
Base directory, to which plot:: file names are relative to.
(If None or empty, file names are relative to the directoly where
the file containing the directive is.)
plot_formats
File formats to generate. List of tuples or strings::
[(suffix, dpi), suffix, ...]
that determine the file format and the DPI. For entries whose
DPI was omitted, sensible defaults are chosen.
plot_html_show_formats
Whether to show links to the files in HTML.
TODO
----
* Refactor Latex output; now it's plain images, but it would be nice
to make them appear side-by-side, or in floats.
"""
import sys, os, glob, shutil, imp, warnings, cStringIO, re, textwrap, traceback
import sphinx
import warnings
warnings.warn("A plot_directive module is also available under "
"matplotlib.sphinxext; expect this numpydoc.plot_directive "
"module to be deprecated after relevant features have been "
"integrated there.",
FutureWarning, stacklevel=2)
#------------------------------------------------------------------------------
# Registration hook
#------------------------------------------------------------------------------
def setup(app):
setup.app = app
setup.config = app.config
setup.confdir = app.confdir
app.add_config_value('plot_pre_code', '', True)
app.add_config_value('plot_include_source', False, True)
app.add_config_value('plot_formats', ['png', 'hires.png', 'pdf'], True)
app.add_config_value('plot_basedir', None, True)
app.add_config_value('plot_html_show_formats', True, True)
app.add_directive('plot', plot_directive, True, (0, 1, False),
**plot_directive_options)
#------------------------------------------------------------------------------
# plot:: directive
#------------------------------------------------------------------------------
from docutils.parsers.rst import directives
from docutils import nodes
def plot_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
return run(arguments, content, options, state_machine, state, lineno)
plot_directive.__doc__ = __doc__
def _option_boolean(arg):
if not arg or not arg.strip():
# no argument given, assume used as a flag
return True
elif arg.strip().lower() in ('no', '0', 'false'):
return False
elif arg.strip().lower() in ('yes', '1', 'true'):
return True
else:
raise ValueError('"%s" unknown boolean' % arg)
def _option_format(arg):
return directives.choice(arg, ('python', 'lisp'))
def _option_align(arg):
return directives.choice(arg, ("top", "middle", "bottom", "left", "center",
"right"))
plot_directive_options = {'alt': directives.unchanged,
'height': directives.length_or_unitless,
'width': directives.length_or_percentage_or_unitless,
'scale': directives.nonnegative_int,
'align': _option_align,
'class': directives.class_option,
'include-source': _option_boolean,
'format': _option_format,
}
#------------------------------------------------------------------------------
# Generating output
#------------------------------------------------------------------------------
from docutils import nodes, utils
try:
# Sphinx depends on either Jinja or Jinja2
import jinja2
def format_template(template, **kw):
return jinja2.Template(template).render(**kw)
except ImportError:
import jinja
def format_template(template, **kw):
return jinja.from_string(template, **kw)
TEMPLATE = """
{{ source_code }}
{{ only_html }}
{% if source_link or (html_show_formats and not multi_image) %}
(
{%- if source_link -%}
`Source code <{{ source_link }}>`__
{%- endif -%}
{%- if html_show_formats and not multi_image -%}
{%- for img in images -%}
{%- for fmt in img.formats -%}
{%- if source_link or not loop.first -%}, {% endif -%}
`{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__
{%- endfor -%}
{%- endfor -%}
{%- endif -%}
)
{% endif %}
{% for img in images %}
.. figure:: {{ build_dir }}/{{ img.basename }}.png
{%- for option in options %}
{{ option }}
{% endfor %}
{% if html_show_formats and multi_image -%}
(
{%- for fmt in img.formats -%}
{%- if not loop.first -%}, {% endif -%}
`{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__
{%- endfor -%}
)
{%- endif -%}
{% endfor %}
{{ only_latex }}
{% for img in images %}
.. image:: {{ build_dir }}/{{ img.basename }}.pdf
{% endfor %}
"""
class ImageFile(object):
def __init__(self, basename, dirname):
self.basename = basename
self.dirname = dirname
self.formats = []
def filename(self, format):
return os.path.join(self.dirname, "%s.%s" % (self.basename, format))
def filenames(self):
return [self.filename(fmt) for fmt in self.formats]
def run(arguments, content, options, state_machine, state, lineno):
if arguments and content:
raise RuntimeError("plot:: directive can't have both args and content")
document = state_machine.document
config = document.settings.env.config
options.setdefault('include-source', config.plot_include_source)
# determine input
rst_file = document.attributes['source']
rst_dir = os.path.dirname(rst_file)
if arguments:
if not config.plot_basedir:
source_file_name = os.path.join(rst_dir,
directives.uri(arguments[0]))
else:
source_file_name = os.path.join(setup.confdir, config.plot_basedir,
directives.uri(arguments[0]))
code = open(source_file_name, 'r').read()
output_base = os.path.basename(source_file_name)
else:
source_file_name = rst_file
code = textwrap.dedent("\n".join(map(str, content)))
counter = document.attributes.get('_plot_counter', 0) + 1
document.attributes['_plot_counter'] = counter
base, ext = os.path.splitext(os.path.basename(source_file_name))
output_base = '%s-%d.py' % (base, counter)
base, source_ext = os.path.splitext(output_base)
if source_ext in ('.py', '.rst', '.txt'):
output_base = base
else:
source_ext = ''
# ensure that LaTeX includegraphics doesn't choke in foo.bar.pdf filenames
output_base = output_base.replace('.', '-')
# is it in doctest format?
is_doctest = contains_doctest(code)
if options.has_key('format'):
if options['format'] == 'python':
is_doctest = False
else:
is_doctest = True
# determine output directory name fragment
source_rel_name = relpath(source_file_name, setup.confdir)
source_rel_dir = os.path.dirname(source_rel_name)
while source_rel_dir.startswith(os.path.sep):
source_rel_dir = source_rel_dir[1:]
# build_dir: where to place output files (temporarily)
build_dir = os.path.join(os.path.dirname(setup.app.doctreedir),
'plot_directive',
source_rel_dir)
if not os.path.exists(build_dir):
os.makedirs(build_dir)
# output_dir: final location in the builder's directory
dest_dir = os.path.abspath(os.path.join(setup.app.builder.outdir,
source_rel_dir))
# how to link to files from the RST file
dest_dir_link = os.path.join(relpath(setup.confdir, rst_dir),
source_rel_dir).replace(os.path.sep, '/')
build_dir_link = relpath(build_dir, rst_dir).replace(os.path.sep, '/')
source_link = dest_dir_link + '/' + output_base + source_ext
# make figures
try:
results = makefig(code, source_file_name, build_dir, output_base,
config)
errors = []
except PlotError, err:
reporter = state.memo.reporter
sm = reporter.system_message(
2, "Exception occurred in plotting %s: %s" % (output_base, err),
line=lineno)
results = [(code, [])]
errors = [sm]
# generate output restructuredtext
total_lines = []
for j, (code_piece, images) in enumerate(results):
if options['include-source']:
if is_doctest:
lines = ['']
lines += [row.rstrip() for row in code_piece.split('\n')]
else:
lines = ['.. code-block:: python', '']
lines += [' %s' % row.rstrip()
for row in code_piece.split('\n')]
source_code = "\n".join(lines)
else:
source_code = ""
opts = [':%s: %s' % (key, val) for key, val in options.items()
if key in ('alt', 'height', 'width', 'scale', 'align', 'class')]
only_html = ".. only:: html"
only_latex = ".. only:: latex"
if j == 0:
src_link = source_link
else:
src_link = None
result = format_template(
TEMPLATE,
dest_dir=dest_dir_link,
build_dir=build_dir_link,
source_link=src_link,
multi_image=len(images) > 1,
only_html=only_html,
only_latex=only_latex,
options=opts,
images=images,
source_code=source_code,
html_show_formats=config.plot_html_show_formats)
total_lines.extend(result.split("\n"))
total_lines.extend("\n")
if total_lines:
state_machine.insert_input(total_lines, source=source_file_name)
# copy image files to builder's output directory
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
for code_piece, images in results:
for img in images:
for fn in img.filenames():
shutil.copyfile(fn, os.path.join(dest_dir,
os.path.basename(fn)))
# copy script (if necessary)
if source_file_name == rst_file:
target_name = os.path.join(dest_dir, output_base + source_ext)
f = open(target_name, 'w')
f.write(unescape_doctest(code))
f.close()
return errors
#------------------------------------------------------------------------------
# Run code and capture figures
#------------------------------------------------------------------------------
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.image as image
from matplotlib import _pylab_helpers
import exceptions
def contains_doctest(text):
try:
# check if it's valid Python as-is
compile(text, '<string>', 'exec')
return False
except SyntaxError:
pass
r = re.compile(r'^\s*>>>', re.M)
m = r.search(text)
return bool(m)
def unescape_doctest(text):
"""
Extract code from a piece of text, which contains either Python code
or doctests.
"""
if not contains_doctest(text):
return text
code = ""
for line in text.split("\n"):
m = re.match(r'^\s*(>>>|\.\.\.) (.*)$', line)
if m:
code += m.group(2) + "\n"
elif line.strip():
code += "# " + line.strip() + "\n"
else:
code += "\n"
return code
def split_code_at_show(text):
"""
Split code at plt.show()
"""
parts = []
is_doctest = contains_doctest(text)
part = []
for line in text.split("\n"):
if (not is_doctest and line.strip() == 'plt.show()') or \
(is_doctest and line.strip() == '>>> plt.show()'):
part.append(line)
parts.append("\n".join(part))
part = []
else:
part.append(line)
if "\n".join(part).strip():
parts.append("\n".join(part))
return parts
class PlotError(RuntimeError):
pass
def run_code(code, code_path, ns=None):
# Change the working directory to the directory of the example, so
# it can get at its data files, if any.
pwd = os.getcwd()
old_sys_path = list(sys.path)
if code_path is not None:
dirname = os.path.abspath(os.path.dirname(code_path))
os.chdir(dirname)
sys.path.insert(0, dirname)
# Redirect stdout
stdout = sys.stdout
sys.stdout = cStringIO.StringIO()
# Reset sys.argv
old_sys_argv = sys.argv
sys.argv = [code_path]
try:
try:
code = unescape_doctest(code)
if ns is None:
ns = {}
if not ns:
exec setup.config.plot_pre_code in ns
exec code in ns
except (Exception, SystemExit), err:
raise PlotError(traceback.format_exc())
finally:
os.chdir(pwd)
sys.argv = old_sys_argv
sys.path[:] = old_sys_path
sys.stdout = stdout
return ns
#------------------------------------------------------------------------------
# Generating figures
#------------------------------------------------------------------------------
def out_of_date(original, derived):
"""
Returns True if derivative is out-of-date wrt original,
both of which are full file paths.
"""
return (not os.path.exists(derived)
or os.stat(derived).st_mtime < os.stat(original).st_mtime)
def makefig(code, code_path, output_dir, output_base, config):
"""
Run a pyplot script *code* and save the images under *output_dir*
with file names derived from *output_base*
"""
# -- Parse format list
default_dpi = {'png': 80, 'hires.png': 200, 'pdf': 50}
formats = []
for fmt in config.plot_formats:
if isinstance(fmt, str):
formats.append((fmt, default_dpi.get(fmt, 80)))
elif type(fmt) in (tuple, list) and len(fmt)==2:
formats.append((str(fmt[0]), int(fmt[1])))
else:
raise PlotError('invalid image format "%r" in plot_formats' % fmt)
# -- Try to determine if all images already exist
code_pieces = split_code_at_show(code)
# Look for single-figure output files first
all_exists = True
img = ImageFile(output_base, output_dir)
for format, dpi in formats:
if out_of_date(code_path, img.filename(format)):
all_exists = False
break
img.formats.append(format)
if all_exists:
return [(code, [img])]
# Then look for multi-figure output files
results = []
all_exists = True
for i, code_piece in enumerate(code_pieces):
images = []
for j in xrange(1000):
img = ImageFile('%s_%02d_%02d' % (output_base, i, j), output_dir)
for format, dpi in formats:
if out_of_date(code_path, img.filename(format)):
all_exists = False
break
img.formats.append(format)
# assume that if we have one, we have them all
if not all_exists:
all_exists = (j > 0)
break
images.append(img)
if not all_exists:
break
results.append((code_piece, images))
if all_exists:
return results
# -- We didn't find the files, so build them
results = []
ns = {}
for i, code_piece in enumerate(code_pieces):
# Clear between runs
plt.close('all')
# Run code
run_code(code_piece, code_path, ns)
# Collect images
images = []
fig_managers = _pylab_helpers.Gcf.get_all_fig_managers()
for j, figman in enumerate(fig_managers):
if len(fig_managers) == 1 and len(code_pieces) == 1:
img = ImageFile(output_base, output_dir)
else:
img = ImageFile("%s_%02d_%02d" % (output_base, i, j),
output_dir)
images.append(img)
for format, dpi in formats:
try:
figman.canvas.figure.savefig(img.filename(format), dpi=dpi)
except exceptions.BaseException, err:
raise PlotError(traceback.format_exc())
img.formats.append(format)
# Results
results.append((code_piece, images))
return results
#------------------------------------------------------------------------------
# Relative pathnames
#------------------------------------------------------------------------------
try:
from os.path import relpath
except ImportError:
# Copied from Python 2.7
if 'posix' in sys.builtin_module_names:
def relpath(path, start=os.path.curdir):
"""Return a relative version of a path"""
from os.path import sep, curdir, join, abspath, commonprefix, \
pardir
if not path:
raise ValueError("no path specified")
start_list = abspath(start).split(sep)
path_list = abspath(path).split(sep)
# Work out how much of the filepath is shared by start and path.
i = len(commonprefix([start_list, path_list]))
rel_list = [pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return curdir
return join(*rel_list)
elif 'nt' in sys.builtin_module_names:
def relpath(path, start=os.path.curdir):
"""Return a relative version of a path"""
from os.path import sep, curdir, join, abspath, commonprefix, \
pardir, splitunc
if not path:
raise ValueError("no path specified")
start_list = abspath(start).split(sep)
path_list = abspath(path).split(sep)
if start_list[0].lower() != path_list[0].lower():
unc_path, rest = splitunc(path)
unc_start, rest = splitunc(start)
if bool(unc_path) ^ bool(unc_start):
raise ValueError("Cannot mix UNC and non-UNC paths (%s and %s)"
% (path, start))
else:
raise ValueError("path is on drive %s, start on drive %s"
% (path_list[0], start_list[0]))
# Work out how much of the filepath is shared by start and path.
for i in range(min(len(start_list), len(path_list))):
if start_list[i].lower() != path_list[i].lower():
break
else:
i += 1
rel_list = [pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return curdir
return join(*rel_list)
else:
raise RuntimeError("Unsupported platform (no relpath available!)")
| bsd-3-clause |
ikaee/bfr-attendant | facerecognitionlibrary/jni-build/jni/include/tensorflow/examples/skflow/mnist_rnn.py | 14 | 2812 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This example builds rnn network for mnist data.
Borrowed structure from here: https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/3%20-%20Neural%20Networks/recurrent_network.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sklearn import metrics, preprocessing
import tensorflow as tf
from tensorflow.contrib import learn
# Parameters
learning_rate = 0.1
training_steps = 3000
batch_size = 128
# Network Parameters
n_input = 28 # MNIST data input (img shape: 28*28)
n_steps = 28 # timesteps
n_hidden = 128 # hidden layer num of features
n_classes = 10 # MNIST total classes (0-9 digits)
### Download and load MNIST data.
mnist = learn.datasets.load_dataset('mnist')
X_train = mnist.train.images
y_train = mnist.train.labels
X_test = mnist.test.images
y_test = mnist.test.labels
# It's useful to scale to ensure Stochastic Gradient Descent will do the right thing
scaler = preprocessing.StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.fit_transform(X_test)
def rnn_model(X, y):
X = tf.reshape(X, [-1, n_steps, n_input]) # (batch_size, n_steps, n_input)
# # permute n_steps and batch_size
X = tf.transpose(X, [1, 0, 2])
# # Reshape to prepare input to hidden activation
X = tf.reshape(X, [-1, n_input]) # (n_steps*batch_size, n_input)
# # Split data because rnn cell needs a list of inputs for the RNN inner loop
X = tf.split(0, n_steps, X) # n_steps * (batch_size, n_input)
# Define a GRU cell with tensorflow
lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(n_hidden)
# Get lstm cell output
_, encoding = tf.nn.rnn(lstm_cell, X, dtype=tf.float32)
return learn.models.logistic_regression(encoding, y)
classifier = learn.TensorFlowEstimator(model_fn=rnn_model, n_classes=n_classes,
batch_size=batch_size,
steps=training_steps,
learning_rate=learning_rate)
classifier.fit(X_train, y_train, logdir="/tmp/mnist_rnn")
score = metrics.accuracy_score(y_test, classifier.predict(X_test))
print('Accuracy: {0:f}'.format(score))
| apache-2.0 |
gundramleifert/exp_tf | models/htr_iam/bdlstm_iam_v1.py | 1 | 11648 | # Author: Tobi and Gundram
from __future__ import print_function
import tensorflow as tf
from tensorflow.python.ops import ctc_ops as ctc
from tensorflow.contrib.layers import batch_norm
from tensorflow.python.ops import rnn_cell
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops.rnn import bidirectional_rnn
from util.LoaderUtil import read_image_list, get_list_vals, clean_list
from util.CharacterMapper import get_cm_iam
from util.saver import PrefixSaver
from random import shuffle
import os
import time
import numpy as np
# import matplotlib.pyplot as plt
# import warnings
# Goes down to 10%
INPUT_PATH_TRAIN = './private/data/iam/lists/iam_train.lst'
INPUT_PATH_VAL = './private/data/iam/lists/iam_test.lst'
cm = get_cm_iam()
# Additional NaC Channel
nClasses = cm.size() + 1
nEpochs = 150
batchSize = 16
# learningRate = 0.001
# momentum = 0.9
# It is assumed that the TextLines are ALL saved with a consistent height of imgH
imgH = 32 # 64
# Depending on the size the image is skipped or zero padded
imgW = 2048 # 4096
image_depth = 1
nHiddenLSTM1 = 256
# Needs to be consistent with subsampling [X] in the model to correctly clean up the data
subsampling = 12
os.chdir("../..")
trainList = read_image_list(INPUT_PATH_TRAIN)
valList = read_image_list(INPUT_PATH_VAL)
print("Cleaning up train list:")
trainList = clean_list(trainList, imgW, cm, subsampling)
print("Cleaning up validation list:")
valList = clean_list(valList, imgW, cm, subsampling)
numT = 1024 # number of training samples per epoch
stepsPerEpochTrain = numT / batchSize
stepsPerEpochVal = len(valList) / batchSize
def inference(images, seqLen, keep_prob, phase_train):
"""
:param images: tensor [batch][Y][X][Z] with dim(Z)=channels float32
:param seqLen: tensor with length of batchsize containing the lenght of the images [batch] int32
:param keep_prob: tensor with dim=0 dropout-rate float32
:param phase_train: tensor with dim=0 boolean
:return: output of network and length of output sequences after convolution [batch][1][x/subsample][channels+1] and [batch]
"""
with tf.variable_scope('network'):
with tf.variable_scope('conv1') as scope:
kernel = tf.Variable(tf.truncated_normal([6, 5, image_depth, 32], stddev=5e-2), name='weights')
# Weight Decay?
# weight_decay = tf.mul(tf.nn.l2_loss(kernel), 0.002, name='weight_loss')
# tf.add_to_collection('losses', weight_decay)
conv = tf.nn.conv2d(images, kernel, [1, 4, 3, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.1, shape=[32]), name='biases')
pre_activation = tf.nn.bias_add(conv, biases)
conv1_bn = batch_norm(pre_activation, decay=0.999, is_training=phase_train, scope="BN1")
conv1 = tf.nn.relu(conv1_bn, name=scope.name)
norm1 = tf.nn.local_response_normalization(conv1, name='norm1')
seqFloat = tf.to_float(seqLen)
seqL2 = tf.ceil(seqFloat * 0.3333)
with tf.variable_scope('conv2') as scope:
kernel = tf.Variable(tf.truncated_normal([5, 5, 32, 64], stddev=5e-2), name='weights')
# # Weight Decay?
# weight_decay = tf.mul(tf.nn.l2_loss(kernel), 0.002, name='weight_loss')
# tf.add_to_collection('losses', weight_decay)
conv = tf.nn.conv2d(norm1, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.1, shape=[64]), name='biases')
pre_activation = tf.nn.bias_add(conv, biases)
conv2_bn = batch_norm(pre_activation, decay=0.999, is_training=phase_train, scope="BN2")
conv2 = tf.nn.relu(conv2_bn, name=scope.name)
norm2 = tf.nn.local_response_normalization(conv2, name='norm2')
pool2 = tf.nn.max_pool(norm2, ksize=[1, 4, 2, 1], strides=[1, 4, 2, 1], padding='SAME', name='pool2')
seqL3 = tf.ceil(seqL2 * 0.5)
with tf.variable_scope('conv3') as scope:
kernel = tf.Variable(tf.truncated_normal([5, 3, 64, 128], stddev=5e-2), name='weights')
# #Weight Decay?
# weight_decay = tf.mul(tf.nn.l2_loss(kernel), 0.002, name='weight_loss')
# tf.add_to_collection('losses', weight_decay)
conv = tf.nn.conv2d(pool2, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.1, shape=[128]), name='biases')
pre_activation = tf.nn.bias_add(conv, biases)
conv3_bn = batch_norm(pre_activation, decay=0.999, is_training=phase_train, scope="BN3")
conv3 = tf.nn.relu(conv3_bn, name=scope.name)
norm3 = tf.nn.local_response_normalization(conv3, name='norm3')
pool3 = tf.nn.max_pool(norm3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool2')
seqL4 = tf.ceil(seqL3 * 0.5)
# NO POOLING HERE -> CTC needs an appropriate length.
seqLenAfterConv = tf.to_int32(seqL4)
with tf.variable_scope('RNN_Prep') as scope:
# (#batch Y X Z) --> (X #batch Y Z)
rnnIn = tf.transpose(pool3, [2, 0, 1, 3])
# (X #batch Y Z) --> (X #batch Y*Z)
shape = rnnIn.get_shape()
steps = shape[0]
rnnIn = tf.reshape(rnnIn, tf.pack([shape[0], shape[1], -1]))
# (X #batch Y*Z) --> (X*#batch Y*Z)
shape = rnnIn.get_shape()
rnnIn = tf.reshape(rnnIn, tf.pack([-1, shape[2]]))
# (X*#batch Y*Z) --> list of X tensors of shape (#batch, Y*Z)
rnnIn = tf.split(0, steps, rnnIn)
with tf.variable_scope('BLSTM1') as scope:
forwardH1 = rnn_cell.LSTMCell(nHiddenLSTM1,
use_peepholes=True,
state_is_tuple=True)
droppedFW = rnn_cell.DropoutWrapper(forwardH1, output_keep_prob=keep_prob)
backwardH1 = rnn_cell.LSTMCell(nHiddenLSTM1,
use_peepholes=True,
state_is_tuple=True)
droppedBW = rnn_cell.DropoutWrapper(backwardH1, output_keep_prob=keep_prob)
outputs, _, _ = bidirectional_rnn(droppedFW, droppedBW, rnnIn, dtype=tf.float32)
fbH1rs = [tf.reshape(t, [batchSize, 2, nHiddenLSTM1]) for t in outputs]
# outH1 = [tf.reduce_sum(tf.mul(t, weightsOutH1), reduction_indices=1) + biasesOutH1 for t in fbH1rs]
# eventually TODO instead of reduce_sum make matrix multiply
outH1 = [tf.reduce_sum(t, reduction_indices=1) for t in fbH1rs]
with tf.variable_scope('LOGIT') as scope:
weightsClasses = tf.Variable(tf.truncated_normal([nHiddenLSTM1, nClasses],
stddev=np.sqrt(2.0 / nHiddenLSTM1)))
biasesClasses = tf.Variable(tf.zeros([nClasses]))
logitsFin = [tf.matmul(t, weightsClasses) + biasesClasses for t in outH1]
logits3d = tf.pack(logitsFin)
return logits3d, seqLenAfterConv
def loss(logits3d, tgt, seqLenAfterConv):
loss = tf.reduce_sum(ctc.ctc_loss(logits3d, tgt, seqLenAfterConv))
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
if update_ops:
updates = tf.group(*update_ops)
loss = control_flow_ops.with_dependencies([updates], loss)
return loss
print('Defining graph')
graph = tf.Graph()
with graph.as_default():
####Graph input
inputX = tf.placeholder(tf.float32, shape=(batchSize, imgH, imgW, image_depth))
targetIxs = tf.placeholder(tf.int64)
targetVals = tf.placeholder(tf.int32)
targetShape = tf.placeholder(tf.int64)
targetY = tf.SparseTensor(targetIxs, targetVals, targetShape)
seqLengths = tf.placeholder(tf.int32, shape=(batchSize))
keep_prob = tf.placeholder(tf.float32)
trainIN = tf.placeholder_with_default(tf.constant(False), [])
logits3d, seqAfterConv = inference(inputX, seqLengths, keep_prob, trainIN)
loss = loss(logits3d, targetY, seqAfterConv)
saver = PrefixSaver('network', './private/models/iam_01/')
# optimizer = tf.train.MomentumOptimizer(learningRate, momentum).minimize(loss)
optimizer = tf.train.AdamOptimizer().minimize(loss)
# pred = tf.to_int32(ctc.ctc_beam_search_decoder(logits3d, seqAfterConv, merge_repeated=False)[0][0])
pred = tf.to_int32(ctc.ctc_greedy_decoder(logits3d, seqAfterConv)[0][0])
edist = tf.edit_distance(pred, targetY, normalize=False)
tgtLens = tf.to_float(tf.size(targetY.values))
err = tf.reduce_sum(edist) / tgtLens
with tf.Session(graph=graph) as session:
# writer = tf.train.SummaryWriter('./log', session.graph)
print('Initializing')
tf.global_variables_initializer().run()
for epoch in range(nEpochs):
workList = trainList[:]
shuffle(workList)
workList = workList[0:numT]
print('Epoch', epoch + 1, '...')
lossT = 0
errT = 0
timeTS = time.time()
tTL = 0
for bStep in range(stepsPerEpochTrain):
bList, workList = workList[:batchSize], workList[batchSize:]
timeTemp = time.time()
batchInputs, \
batchSeqLengths, \
batchTargetIdxs, \
batchTargetVals, \
batchTargetShape = get_list_vals(
bList,
cm,
imgW,
mvn=False)
tTL += time.time() - timeTemp
feedDict = {inputX: batchInputs, targetIxs: batchTargetIdxs, targetVals: batchTargetVals,
targetShape: batchTargetShape, seqLengths: batchSeqLengths, keep_prob: 0.5, trainIN: True}
_, lossB, aErr = session.run([optimizer, loss, err], feed_dict=feedDict)
lossT += lossB
errT += aErr
print('Train: CTC-loss ', lossT)
cerT = errT / stepsPerEpochTrain
print('Train: CER ', cerT)
print('Train: time ', time.time() - timeTS)
print('Time for loading train data: ', tTL)
workList = valList[:]
errV = 0
lossV = 0
timeVS = time.time()
tVL = 0
for bStep in range(stepsPerEpochVal):
bList, workList = workList[:batchSize], workList[batchSize:]
timeTemp = time.time()
batchInputs, \
batchSeqLengths, \
batchTargetIdxs, \
batchTargetVals, \
batchTargetShape = get_list_vals(
bList,
cm,
imgW,
mvn=False
)
tVL += time.time() - timeTemp
feedDict = {inputX: batchInputs, targetIxs: batchTargetIdxs, targetVals: batchTargetVals,
targetShape: batchTargetShape, seqLengths: batchSeqLengths, keep_prob: 1.0, trainIN: False}
lossB, aErr = session.run([loss, err], feed_dict=feedDict)
# lossB, aErr, sE, sL = session.run([loss, err, err_val, loss_val], feed_dict=feedDict)
# writer.add_summary(sE, epoch*stepsPerEpocheVal + bStep)
# writer.add_summary(sL, epoch * stepsPerEpocheVal + bStep)
lossV += lossB
errV += aErr
print('Val: CTC-loss ', lossV)
errVal = errV / stepsPerEpochVal
print('Val: CER ', errVal)
print('Val: time ', time.time() - timeVS)
print('Time for loading validation data: ', tVL)
# Write a checkpoint.
saveTime = time.time()
print('Saving...')
saver.save(session, global_step=epoch)
print('Time for saving: ', time.time() - saveTime) | apache-2.0 |
kaiserroll14/301finalproject | main/pandas/tseries/index.py | 9 | 75758 | # pylint: disable=E1101
from __future__ import division
import operator
import warnings
from datetime import time, datetime
from datetime import timedelta
import numpy as np
from pandas.core.common import (_NS_DTYPE, _INT64_DTYPE,
_values_from_object, _maybe_box,
is_object_dtype, is_datetime64_dtype,
is_datetimetz, is_dtype_equal,
ABCSeries, is_integer, is_float,
DatetimeTZDtype)
from pandas.io.common import PerformanceWarning
from pandas.core.index import Index, Int64Index, Float64Index
import pandas.compat as compat
from pandas.compat import u
from pandas.tseries.frequencies import (
to_offset, get_period_alias,
Resolution)
from pandas.tseries.base import DatelikeOps, DatetimeIndexOpsMixin
from pandas.tseries.offsets import DateOffset, generate_range, Tick, CDay
from pandas.tseries.tools import parse_time_string, normalize_date
from pandas.tseries.timedeltas import to_timedelta
from pandas.util.decorators import cache_readonly, deprecate_kwarg
import pandas.core.common as com
import pandas.tseries.offsets as offsets
import pandas.tseries.tools as tools
from pandas.lib import Timestamp
import pandas.lib as lib
import pandas.tslib as tslib
import pandas._period as period
import pandas.algos as _algos
import pandas.index as _index
def _utc():
import pytz
return pytz.utc
# -------- some conversion wrapper functions
def _field_accessor(name, field, docstring=None):
def f(self):
values = self.asi8
if self.tz is not None:
utc = _utc()
if self.tz is not utc:
values = self._local_timestamps()
if field in ['is_month_start', 'is_month_end',
'is_quarter_start', 'is_quarter_end',
'is_year_start', 'is_year_end']:
month_kw = self.freq.kwds.get('startingMonth', self.freq.kwds.get('month', 12)) if self.freq else 12
result = tslib.get_start_end_field(values, field, self.freqstr, month_kw)
else:
result = tslib.get_date_field(values, field)
return self._maybe_mask_results(result,convert='float64')
f.__name__ = name
f.__doc__ = docstring
return property(f)
def _dt_index_cmp(opname, nat_result=False):
"""
Wrap comparison operations to convert datetime-like to datetime64
"""
def wrapper(self, other):
func = getattr(super(DatetimeIndex, self), opname)
if isinstance(other, datetime) or isinstance(other, compat.string_types):
other = _to_m8(other, tz=self.tz)
result = func(other)
if com.isnull(other):
result.fill(nat_result)
else:
if isinstance(other, list):
other = DatetimeIndex(other)
elif not isinstance(other, (np.ndarray, Index, ABCSeries)):
other = _ensure_datetime64(other)
result = func(np.asarray(other))
result = _values_from_object(result)
if isinstance(other, Index):
o_mask = other.values.view('i8') == tslib.iNaT
else:
o_mask = other.view('i8') == tslib.iNaT
if o_mask.any():
result[o_mask] = nat_result
mask = self.asi8 == tslib.iNaT
if mask.any():
result[mask] = nat_result
# support of bool dtype indexers
if com.is_bool_dtype(result):
return result
return Index(result)
return wrapper
def _ensure_datetime64(other):
if isinstance(other, np.datetime64):
return other
raise TypeError('%s type object %s' % (type(other), str(other)))
_midnight = time(0, 0)
def _new_DatetimeIndex(cls, d):
""" This is called upon unpickling, rather than the default which doesn't have arguments
and breaks __new__ """
# data are already in UTC
# so need to localize
tz = d.pop('tz',None)
result = cls.__new__(cls, verify_integrity=False, **d)
if tz is not None:
result = result.tz_localize('UTC').tz_convert(tz)
return result
class DatetimeIndex(DatelikeOps, DatetimeIndexOpsMixin, Int64Index):
"""
Immutable ndarray of datetime64 data, represented internally as int64, and
which can be boxed to Timestamp objects that are subclasses of datetime and
carry metadata such as frequency information.
Parameters
----------
data : array-like (1-dimensional), optional
Optional datetime-like data to construct index with
copy : bool
Make a copy of input ndarray
freq : string or pandas offset object, optional
One of pandas date offset strings or corresponding objects
start : starting value, datetime-like, optional
If data is None, start is used as the start point in generating regular
timestamp data.
periods : int, optional, > 0
Number of periods to generate, if generating index. Takes precedence
over end argument
end : end time, datetime-like, optional
If periods is none, generated index will extend to first conforming
time on or just past end argument
closed : string or None, default None
Make the interval closed with respect to the given frequency to
the 'left', 'right', or both sides (None)
tz : pytz.timezone or dateutil.tz.tzfile
ambiguous : 'infer', bool-ndarray, 'NaT', default 'raise'
- 'infer' will attempt to infer fall dst-transition hours based on order
- bool-ndarray where True signifies a DST time, False signifies
a non-DST time (note that this flag is only applicable for ambiguous times)
- 'NaT' will return NaT where there are ambiguous times
- 'raise' will raise an AmbiguousTimeError if there are ambiguous times
infer_dst : boolean, default False (DEPRECATED)
Attempt to infer fall dst-transition hours based on order
name : object
Name to be stored in the index
"""
_typ = 'datetimeindex'
_join_precedence = 10
def _join_i8_wrapper(joinf, **kwargs):
return DatetimeIndexOpsMixin._join_i8_wrapper(joinf, dtype='M8[ns]', **kwargs)
_inner_indexer = _join_i8_wrapper(_algos.inner_join_indexer_int64)
_outer_indexer = _join_i8_wrapper(_algos.outer_join_indexer_int64)
_left_indexer = _join_i8_wrapper(_algos.left_join_indexer_int64)
_left_indexer_unique = _join_i8_wrapper(
_algos.left_join_indexer_unique_int64, with_indexers=False)
_arrmap = None
__eq__ = _dt_index_cmp('__eq__')
__ne__ = _dt_index_cmp('__ne__', nat_result=True)
__lt__ = _dt_index_cmp('__lt__')
__gt__ = _dt_index_cmp('__gt__')
__le__ = _dt_index_cmp('__le__')
__ge__ = _dt_index_cmp('__ge__')
_engine_type = _index.DatetimeEngine
tz = None
offset = None
_comparables = ['name', 'freqstr', 'tz']
_attributes = ['name', 'freq', 'tz']
_datetimelike_ops = ['year','month','day','hour','minute','second',
'weekofyear','week','dayofweek','weekday','dayofyear','quarter', 'days_in_month', 'daysinmonth',
'date','time','microsecond','nanosecond','is_month_start','is_month_end',
'is_quarter_start','is_quarter_end','is_year_start','is_year_end',
'tz','freq']
_is_numeric_dtype = False
@deprecate_kwarg(old_arg_name='infer_dst', new_arg_name='ambiguous',
mapping={True: 'infer', False: 'raise'})
def __new__(cls, data=None,
freq=None, start=None, end=None, periods=None,
copy=False, name=None, tz=None,
verify_integrity=True, normalize=False,
closed=None, ambiguous='raise', dtype=None, **kwargs):
dayfirst = kwargs.pop('dayfirst', None)
yearfirst = kwargs.pop('yearfirst', None)
freq_infer = False
if not isinstance(freq, DateOffset):
# if a passed freq is None, don't infer automatically
if freq != 'infer':
freq = to_offset(freq)
else:
freq_infer = True
freq = None
if periods is not None:
if is_float(periods):
periods = int(periods)
elif not is_integer(periods):
raise ValueError('Periods must be a number, got %s' %
str(periods))
if data is None and freq is None:
raise ValueError("Must provide freq argument if no data is "
"supplied")
if data is None:
return cls._generate(start, end, periods, name, freq,
tz=tz, normalize=normalize, closed=closed,
ambiguous=ambiguous)
if not isinstance(data, (np.ndarray, Index, ABCSeries)):
if np.isscalar(data):
raise ValueError('DatetimeIndex() must be called with a '
'collection of some kind, %s was passed'
% repr(data))
# other iterable of some kind
if not isinstance(data, (list, tuple)):
data = list(data)
data = np.asarray(data, dtype='O')
# try a few ways to make it datetime64
if lib.is_string_array(data):
data = tslib.parse_str_array_to_datetime(data, freq=freq,
dayfirst=dayfirst,
yearfirst=yearfirst)
else:
data = tools.to_datetime(data, errors='raise')
data.offset = freq
if isinstance(data, DatetimeIndex):
if name is not None:
data.name = name
if tz is not None:
return data.tz_localize(tz, ambiguous=ambiguous)
return data
if issubclass(data.dtype.type, compat.string_types):
data = tslib.parse_str_array_to_datetime(data, freq=freq,
dayfirst=dayfirst,
yearfirst=yearfirst)
if issubclass(data.dtype.type, np.datetime64) or is_datetimetz(data):
if isinstance(data, ABCSeries):
data = data._values
if isinstance(data, DatetimeIndex):
if tz is None:
tz = data.tz
subarr = data.values
if freq is None:
freq = data.offset
verify_integrity = False
else:
if data.dtype != _NS_DTYPE:
subarr = tslib.cast_to_nanoseconds(data)
else:
subarr = data
elif data.dtype == _INT64_DTYPE:
if isinstance(data, Int64Index):
raise TypeError('cannot convert Int64Index->DatetimeIndex')
if copy:
subarr = np.asarray(data, dtype=_NS_DTYPE)
else:
subarr = data.view(_NS_DTYPE)
else:
if isinstance(data, (ABCSeries, Index)):
values = data._values
else:
values = data
if lib.is_string_array(values):
subarr = tslib.parse_str_array_to_datetime(values, freq=freq, dayfirst=dayfirst,
yearfirst=yearfirst)
else:
try:
subarr = tools.to_datetime(data, box=False)
# make sure that we have a index/ndarray like (and not a Series)
if isinstance(subarr, ABCSeries):
subarr = subarr._values
if subarr.dtype == np.object_:
subarr = tools._to_datetime(subarr, box=False)
except ValueError:
# tz aware
subarr = tools._to_datetime(data, box=False, utc=True)
# we may not have been able to convert
if not (is_datetimetz(subarr) or np.issubdtype(subarr.dtype, np.datetime64)):
raise ValueError('Unable to convert %s to datetime dtype'
% str(data))
if isinstance(subarr, DatetimeIndex):
if tz is None:
tz = subarr.tz
else:
if tz is not None:
tz = tslib.maybe_get_tz(tz)
if (not isinstance(data, DatetimeIndex) or
getattr(data, 'tz', None) is None):
# Convert tz-naive to UTC
ints = subarr.view('i8')
subarr = tslib.tz_localize_to_utc(ints, tz,
ambiguous=ambiguous)
subarr = subarr.view(_NS_DTYPE)
subarr = cls._simple_new(subarr, name=name, freq=freq, tz=tz)
# if dtype is provided, coerce here
if dtype is not None:
if not is_dtype_equal(subarr.dtype, dtype):
if subarr.tz is not None:
raise ValueError("cannot localize from non-UTC data")
dtype = DatetimeTZDtype.construct_from_string(dtype)
subarr = subarr.tz_localize(dtype.tz)
if verify_integrity and len(subarr) > 0:
if freq is not None and not freq_infer:
inferred = subarr.inferred_freq
if inferred != freq.freqstr:
on_freq = cls._generate(subarr[0], None, len(subarr), None, freq, tz=tz, ambiguous=ambiguous)
if not np.array_equal(subarr.asi8, on_freq.asi8):
raise ValueError('Inferred frequency {0} from passed dates does not '
'conform to passed frequency {1}'.format(inferred, freq.freqstr))
if freq_infer:
inferred = subarr.inferred_freq
if inferred:
subarr.offset = to_offset(inferred)
return subarr
@classmethod
def _generate(cls, start, end, periods, name, offset,
tz=None, normalize=False, ambiguous='raise', closed=None):
if com._count_not_none(start, end, periods) != 2:
raise ValueError('Must specify two of start, end, or periods')
_normalized = True
if start is not None:
start = Timestamp(start)
if end is not None:
end = Timestamp(end)
left_closed = False
right_closed = False
if start is None and end is None:
if closed is not None:
raise ValueError("Closed has to be None if not both of start"
"and end are defined")
if closed is None:
left_closed = True
right_closed = True
elif closed == "left":
left_closed = True
elif closed == "right":
right_closed = True
else:
raise ValueError("Closed has to be either 'left', 'right' or None")
try:
inferred_tz = tools._infer_tzinfo(start, end)
except:
raise TypeError('Start and end cannot both be tz-aware with '
'different timezones')
inferred_tz = tslib.maybe_get_tz(inferred_tz)
# these may need to be localized
tz = tslib.maybe_get_tz(tz)
if tz is not None:
date = start or end
if date.tzinfo is not None and hasattr(tz, 'localize'):
tz = tz.localize(date.replace(tzinfo=None)).tzinfo
if tz is not None and inferred_tz is not None:
if not inferred_tz == tz:
raise AssertionError("Inferred time zone not equal to passed "
"time zone")
elif inferred_tz is not None:
tz = inferred_tz
if start is not None:
if normalize:
start = normalize_date(start)
_normalized = True
else:
_normalized = _normalized and start.time() == _midnight
if end is not None:
if normalize:
end = normalize_date(end)
_normalized = True
else:
_normalized = _normalized and end.time() == _midnight
if hasattr(offset, 'delta') and offset != offsets.Day():
if inferred_tz is None and tz is not None:
# naive dates
if start is not None and start.tz is None:
start = start.tz_localize(tz, ambiguous=False)
if end is not None and end.tz is None:
end = end.tz_localize(tz, ambiguous=False)
if start and end:
if start.tz is None and end.tz is not None:
start = start.tz_localize(end.tz, ambiguous=False)
if end.tz is None and start.tz is not None:
end = end.tz_localize(start.tz, ambiguous=False)
if _use_cached_range(offset, _normalized, start, end):
index = cls._cached_range(start, end, periods=periods,
offset=offset, name=name)
else:
index = _generate_regular_range(start, end, periods, offset)
else:
if tz is not None:
# naive dates
if start is not None and start.tz is not None:
start = start.replace(tzinfo=None)
if end is not None and end.tz is not None:
end = end.replace(tzinfo=None)
if start and end:
if start.tz is None and end.tz is not None:
end = end.replace(tzinfo=None)
if end.tz is None and start.tz is not None:
start = start.replace(tzinfo=None)
if _use_cached_range(offset, _normalized, start, end):
index = cls._cached_range(start, end, periods=periods,
offset=offset, name=name)
else:
index = _generate_regular_range(start, end, periods, offset)
if tz is not None and getattr(index, 'tz', None) is None:
index = tslib.tz_localize_to_utc(com._ensure_int64(index), tz,
ambiguous=ambiguous)
index = index.view(_NS_DTYPE)
index = cls._simple_new(index, name=name, freq=offset, tz=tz)
if not left_closed:
index = index[1:]
if not right_closed:
index = index[:-1]
return index
@property
def _box_func(self):
return lambda x: Timestamp(x, offset=self.offset, tz=self.tz)
def _convert_for_op(self, value):
""" Convert value to be insertable to ndarray """
if self._has_same_tz(value):
return _to_m8(value)
raise ValueError('Passed item and index have different timezone')
def _local_timestamps(self):
utc = _utc()
if self.is_monotonic:
return tslib.tz_convert(self.asi8, utc, self.tz)
else:
values = self.asi8
indexer = values.argsort()
result = tslib.tz_convert(values.take(indexer), utc, self.tz)
n = len(indexer)
reverse = np.empty(n, dtype=np.int_)
reverse.put(indexer, np.arange(n))
return result.take(reverse)
@classmethod
def _simple_new(cls, values, name=None, freq=None, tz=None, dtype=None, **kwargs):
"""
we require the we have a dtype compat for the values
if we are passed a non-dtype compat, then coerce using the constructor
"""
if not getattr(values,'dtype',None):
# empty, but with dtype compat
if values is None:
values = np.empty(0, dtype=_NS_DTYPE)
return cls(values, name=name, freq=freq, tz=tz, dtype=dtype, **kwargs)
values = np.array(values,copy=False)
if is_object_dtype(values):
return cls(values, name=name, freq=freq, tz=tz, dtype=dtype, **kwargs).values
elif not is_datetime64_dtype(values):
values = com._ensure_int64(values).view(_NS_DTYPE)
result = object.__new__(cls)
result._data = values
result.name = name
result.offset = freq
result.tz = tslib.maybe_get_tz(tz)
result._reset_identity()
return result
@property
def tzinfo(self):
"""
Alias for tz attribute
"""
return self.tz
@cache_readonly
def _timezone(self):
""" Comparable timezone both for pytz / dateutil"""
return tslib.get_timezone(self.tzinfo)
def _has_same_tz(self, other):
zzone = self._timezone
# vzone sholdn't be None if value is non-datetime like
if isinstance(other, np.datetime64):
# convert to Timestamp as np.datetime64 doesn't have tz attr
other = Timestamp(other)
vzone = tslib.get_timezone(getattr(other, 'tzinfo', '__no_tz__'))
return zzone == vzone
@classmethod
def _cached_range(cls, start=None, end=None, periods=None, offset=None,
name=None):
if start is None and end is None:
# I somewhat believe this should never be raised externally and therefore
# should be a `PandasError` but whatever...
raise TypeError('Must specify either start or end.')
if start is not None:
start = Timestamp(start)
if end is not None:
end = Timestamp(end)
if (start is None or end is None) and periods is None:
raise TypeError('Must either specify period or provide both start and end.')
if offset is None:
# This can't happen with external-facing code, therefore PandasError
raise TypeError('Must provide offset.')
drc = _daterange_cache
if offset not in _daterange_cache:
xdr = generate_range(offset=offset, start=_CACHE_START,
end=_CACHE_END)
arr = tools._to_datetime(list(xdr), box=False)
cachedRange = DatetimeIndex._simple_new(arr)
cachedRange.offset = offset
cachedRange.tz = None
cachedRange.name = None
drc[offset] = cachedRange
else:
cachedRange = drc[offset]
if start is None:
if not isinstance(end, Timestamp):
raise AssertionError('end must be an instance of Timestamp')
end = offset.rollback(end)
endLoc = cachedRange.get_loc(end) + 1
startLoc = endLoc - periods
elif end is None:
if not isinstance(start, Timestamp):
raise AssertionError('start must be an instance of Timestamp')
start = offset.rollforward(start)
startLoc = cachedRange.get_loc(start)
endLoc = startLoc + periods
else:
if not offset.onOffset(start):
start = offset.rollforward(start)
if not offset.onOffset(end):
end = offset.rollback(end)
startLoc = cachedRange.get_loc(start)
endLoc = cachedRange.get_loc(end) + 1
indexSlice = cachedRange[startLoc:endLoc]
indexSlice.name = name
indexSlice.offset = offset
return indexSlice
def _mpl_repr(self):
# how to represent ourselves to matplotlib
return tslib.ints_to_pydatetime(self.asi8, self.tz)
_na_value = tslib.NaT
"""The expected NA value to use with this index."""
@cache_readonly
def _is_dates_only(self):
from pandas.core.format import _is_dates_only
return _is_dates_only(self.values)
@property
def _formatter_func(self):
from pandas.core.format import _get_format_datetime64
formatter = _get_format_datetime64(is_dates_only=self._is_dates_only)
return lambda x: "'%s'" % formatter(x, tz=self.tz)
def __reduce__(self):
# we use a special reudce here because we need
# to simply set the .tz (and not reinterpret it)
d = dict(data=self._data)
d.update(self._get_attributes_dict())
return _new_DatetimeIndex, (self.__class__, d), None
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if isinstance(state, dict):
super(DatetimeIndex, self).__setstate__(state)
elif isinstance(state, tuple):
# < 0.15 compat
if len(state) == 2:
nd_state, own_state = state
data = np.empty(nd_state[1], dtype=nd_state[2])
np.ndarray.__setstate__(data, nd_state)
self.name = own_state[0]
self.offset = own_state[1]
self.tz = own_state[2]
# provide numpy < 1.7 compat
if nd_state[2] == 'M8[us]':
new_state = np.ndarray.__reduce__(data.astype('M8[ns]'))
np.ndarray.__setstate__(data, new_state[2])
else: # pragma: no cover
data = np.empty(state)
np.ndarray.__setstate__(data, state)
self._data = data
self._reset_identity()
else:
raise Exception("invalid pickle state")
_unpickle_compat = __setstate__
def _sub_datelike(self, other):
# subtract a datetime from myself, yielding a TimedeltaIndex
from pandas import TimedeltaIndex
other = Timestamp(other)
# require tz compat
if not self._has_same_tz(other):
raise TypeError("Timestamp subtraction must have the same timezones or no timezones")
i8 = self.asi8
result = i8 - other.value
result = self._maybe_mask_results(result,fill_value=tslib.iNaT)
return TimedeltaIndex(result,name=self.name,copy=False)
def _maybe_update_attributes(self, attrs):
""" Update Index attributes (e.g. freq) depending on op """
freq = attrs.get('freq', None)
if freq is not None:
# no need to infer if freq is None
attrs['freq'] = 'infer'
return attrs
def _add_delta(self, delta):
from pandas import TimedeltaIndex
name = self.name
if isinstance(delta, (Tick, timedelta, np.timedelta64)):
new_values = self._add_delta_td(delta)
elif isinstance(delta, TimedeltaIndex):
new_values = self._add_delta_tdi(delta)
# update name when delta is Index
name = com._maybe_match_name(self, delta)
elif isinstance(delta, DateOffset):
new_values = self._add_offset(delta).asi8
else:
new_values = self.astype('O') + delta
tz = 'UTC' if self.tz is not None else None
result = DatetimeIndex(new_values, tz=tz, name=name, freq='infer')
utc = _utc()
if self.tz is not None and self.tz is not utc:
result = result.tz_convert(self.tz)
return result
def _add_offset(self, offset):
try:
if self.tz is not None:
values = self.tz_localize(None)
else:
values = self
result = offset.apply_index(values)
if self.tz is not None:
result = result.tz_localize(self.tz)
return result
except NotImplementedError:
warnings.warn("Non-vectorized DateOffset being applied to Series or DatetimeIndex",
PerformanceWarning)
return self.astype('O') + offset
def _format_native_types(self, na_rep=u('NaT'),
date_format=None, **kwargs):
from pandas.core.format import _get_format_datetime64_from_values
format = _get_format_datetime64_from_values(self, date_format)
return tslib.format_array_from_datetime(self.asi8,
tz=self.tz,
format=format,
na_rep=na_rep)
def to_datetime(self, dayfirst=False):
return self.copy()
def astype(self, dtype):
dtype = np.dtype(dtype)
if dtype == np.object_:
return self.asobject
elif dtype == _INT64_DTYPE:
return self.asi8.copy()
elif dtype == _NS_DTYPE and self.tz is not None:
return self.tz_convert('UTC').tz_localize(None)
elif dtype == str:
return self._shallow_copy(values=self.format(), infer=True)
else: # pragma: no cover
raise ValueError('Cannot cast DatetimeIndex to dtype %s' % dtype)
def _get_time_micros(self):
utc = _utc()
values = self.asi8
if self.tz is not None and self.tz is not utc:
values = self._local_timestamps()
return tslib.get_time_micros(values)
def to_series(self, keep_tz=False):
"""
Create a Series with both index and values equal to the index keys
useful with map for returning an indexer based on an index
Parameters
----------
keep_tz : optional, defaults False.
return the data keeping the timezone.
If keep_tz is True:
If the timezone is not set, the resulting
Series will have a datetime64[ns] dtype.
Otherwise the Series will have an datetime64[ns, tz] dtype; the
tz will be preserved.
If keep_tz is False:
Series will have a datetime64[ns] dtype. TZ aware
objects will have the tz removed.
Returns
-------
Series
"""
from pandas import Series
return Series(self._to_embed(keep_tz), index=self, name=self.name)
def _to_embed(self, keep_tz=False):
"""
return an array repr of this object, potentially casting to object
This is for internal compat
"""
if keep_tz and self.tz is not None:
# preserve the tz & copy
return self.copy(deep=True)
return self.values.copy()
def to_pydatetime(self):
"""
Return DatetimeIndex as object ndarray of datetime.datetime objects
Returns
-------
datetimes : ndarray
"""
return tslib.ints_to_pydatetime(self.asi8, tz=self.tz)
def to_period(self, freq=None):
"""
Cast to PeriodIndex at a particular frequency
"""
from pandas.tseries.period import PeriodIndex
if freq is None:
freq = self.freqstr or self.inferred_freq
if freq is None:
msg = "You must pass a freq argument as current index has none."
raise ValueError(msg)
freq = get_period_alias(freq)
return PeriodIndex(self.values, name=self.name, freq=freq, tz=self.tz)
def snap(self, freq='S'):
"""
Snap time stamps to nearest occurring frequency
"""
# Superdumb, punting on any optimizing
freq = to_offset(freq)
snapped = np.empty(len(self), dtype=_NS_DTYPE)
for i, v in enumerate(self):
s = v
if not freq.onOffset(s):
t0 = freq.rollback(s)
t1 = freq.rollforward(s)
if abs(s - t0) < abs(t1 - s):
s = t0
else:
s = t1
snapped[i] = s
# we know it conforms; skip check
return DatetimeIndex(snapped, freq=freq, verify_integrity=False)
def union(self, other):
"""
Specialized union for DatetimeIndex objects. If combine
overlapping ranges with the same DateOffset, will be much
faster than Index.union
Parameters
----------
other : DatetimeIndex or array-like
Returns
-------
y : Index or DatetimeIndex
"""
self._assert_can_do_setop(other)
if not isinstance(other, DatetimeIndex):
try:
other = DatetimeIndex(other)
except TypeError:
pass
this, other = self._maybe_utc_convert(other)
if this._can_fast_union(other):
return this._fast_union(other)
else:
result = Index.union(this, other)
if isinstance(result, DatetimeIndex):
result.tz = this.tz
if (result.freq is None and
(this.freq is not None or other.freq is not None)):
result.offset = to_offset(result.inferred_freq)
return result
def to_perioddelta(self, freq):
"""
Calcuates TimedeltaIndex of difference between index
values and index converted to PeriodIndex at specified
freq. Used for vectorized offsets
.. versionadded:: 0.17.0
Parameters
----------
freq : Period frequency
Returns
-------
y : TimedeltaIndex
"""
return to_timedelta(self.asi8 - self.to_period(freq).to_timestamp().asi8)
def union_many(self, others):
"""
A bit of a hack to accelerate unioning a collection of indexes
"""
this = self
for other in others:
if not isinstance(this, DatetimeIndex):
this = Index.union(this, other)
continue
if not isinstance(other, DatetimeIndex):
try:
other = DatetimeIndex(other)
except TypeError:
pass
this, other = this._maybe_utc_convert(other)
if this._can_fast_union(other):
this = this._fast_union(other)
else:
tz = this.tz
this = Index.union(this, other)
if isinstance(this, DatetimeIndex):
this.tz = tz
if this.freq is None:
this.offset = to_offset(this.inferred_freq)
return this
def append(self, other):
"""
Append a collection of Index options together
Parameters
----------
other : Index or list/tuple of indices
Returns
-------
appended : Index
"""
name = self.name
to_concat = [self]
if isinstance(other, (list, tuple)):
to_concat = to_concat + list(other)
else:
to_concat.append(other)
for obj in to_concat:
if isinstance(obj, Index) and obj.name != name:
name = None
break
to_concat = self._ensure_compat_concat(to_concat)
to_concat, factory = _process_concat_data(to_concat, name)
return factory(to_concat)
def join(self, other, how='left', level=None, return_indexers=False):
"""
See Index.join
"""
if (not isinstance(other, DatetimeIndex) and len(other) > 0 and
other.inferred_type not in ('floating', 'mixed-integer',
'mixed-integer-float', 'mixed')):
try:
other = DatetimeIndex(other)
except (TypeError, ValueError):
pass
this, other = self._maybe_utc_convert(other)
return Index.join(this, other, how=how, level=level,
return_indexers=return_indexers)
def _maybe_utc_convert(self, other):
this = self
if isinstance(other, DatetimeIndex):
if self.tz is not None:
if other.tz is None:
raise TypeError('Cannot join tz-naive with tz-aware '
'DatetimeIndex')
elif other.tz is not None:
raise TypeError('Cannot join tz-naive with tz-aware '
'DatetimeIndex')
if self.tz != other.tz:
this = self.tz_convert('UTC')
other = other.tz_convert('UTC')
return this, other
def _wrap_joined_index(self, joined, other):
name = self.name if self.name == other.name else None
if (isinstance(other, DatetimeIndex)
and self.offset == other.offset
and self._can_fast_union(other)):
joined = self._shallow_copy(joined)
joined.name = name
return joined
else:
tz = getattr(other, 'tz', None)
return self._simple_new(joined, name, tz=tz)
def _can_fast_union(self, other):
if not isinstance(other, DatetimeIndex):
return False
offset = self.offset
if offset is None or offset != other.offset:
return False
if not self.is_monotonic or not other.is_monotonic:
return False
if len(self) == 0 or len(other) == 0:
return True
# to make our life easier, "sort" the two ranges
if self[0] <= other[0]:
left, right = self, other
else:
left, right = other, self
right_start = right[0]
left_end = left[-1]
# Only need to "adjoin", not overlap
try:
return (right_start == left_end + offset) or right_start in left
except (ValueError):
# if we are comparing an offset that does not propogate timezones
# this will raise
return False
def _fast_union(self, other):
if len(other) == 0:
return self.view(type(self))
if len(self) == 0:
return other.view(type(self))
# to make our life easier, "sort" the two ranges
if self[0] <= other[0]:
left, right = self, other
else:
left, right = other, self
left_start, left_end = left[0], left[-1]
right_end = right[-1]
if not self.offset._should_cache():
# concatenate dates
if left_end < right_end:
loc = right.searchsorted(left_end, side='right')
right_chunk = right.values[loc:]
dates = com._concat_compat((left.values, right_chunk))
return self._shallow_copy(dates)
else:
return left
else:
return type(self)(start=left_start,
end=max(left_end, right_end),
freq=left.offset)
def __iter__(self):
"""
Return an iterator over the boxed values
Returns
-------
Timestamps : ndarray
"""
# convert in chunks of 10k for efficiency
data = self.asi8
l = len(self)
chunksize = 10000
chunks = int(l / chunksize) + 1
for i in range(chunks):
start_i = i*chunksize
end_i = min((i+1)*chunksize,l)
converted = tslib.ints_to_pydatetime(data[start_i:end_i], tz=self.tz, offset=self.offset, box=True)
for v in converted:
yield v
def _wrap_union_result(self, other, result):
name = self.name if self.name == other.name else None
if self.tz != other.tz:
raise ValueError('Passed item and index have different timezone')
return self._simple_new(result, name=name, freq=None, tz=self.tz)
def intersection(self, other):
"""
Specialized intersection for DatetimeIndex objects. May be much faster
than Index.intersection
Parameters
----------
other : DatetimeIndex or array-like
Returns
-------
y : Index or DatetimeIndex
"""
self._assert_can_do_setop(other)
if not isinstance(other, DatetimeIndex):
try:
other = DatetimeIndex(other)
except (TypeError, ValueError):
pass
result = Index.intersection(self, other)
if isinstance(result, DatetimeIndex):
if result.freq is None:
result.offset = to_offset(result.inferred_freq)
return result
elif (other.offset is None or self.offset is None or
other.offset != self.offset or
not other.offset.isAnchored() or
(not self.is_monotonic or not other.is_monotonic)):
result = Index.intersection(self, other)
if isinstance(result, DatetimeIndex):
if result.freq is None:
result.offset = to_offset(result.inferred_freq)
return result
if len(self) == 0:
return self
if len(other) == 0:
return other
# to make our life easier, "sort" the two ranges
if self[0] <= other[0]:
left, right = self, other
else:
left, right = other, self
end = min(left[-1], right[-1])
start = right[0]
if end < start:
return type(self)(data=[])
else:
lslice = slice(*left.slice_locs(start, end))
left_chunk = left.values[lslice]
return self._shallow_copy(left_chunk)
def _parsed_string_to_bounds(self, reso, parsed):
"""
Calculate datetime bounds for parsed time string and its resolution.
Parameters
----------
reso : Resolution
Resolution provided by parsed string.
parsed : datetime
Datetime from parsed string.
Returns
-------
lower, upper: pd.Timestamp
"""
is_monotonic = self.is_monotonic
if reso == 'year':
return (Timestamp(datetime(parsed.year, 1, 1), tz=self.tz),
Timestamp(datetime(parsed.year, 12, 31, 23, 59, 59, 999999), tz=self.tz))
elif reso == 'month':
d = tslib.monthrange(parsed.year, parsed.month)[1]
return (Timestamp(datetime(parsed.year, parsed.month, 1), tz=self.tz),
Timestamp(datetime(parsed.year, parsed.month, d, 23, 59, 59, 999999), tz=self.tz))
elif reso == 'quarter':
qe = (((parsed.month - 1) + 2) % 12) + 1 # two months ahead
d = tslib.monthrange(parsed.year, qe)[1] # at end of month
return (Timestamp(datetime(parsed.year, parsed.month, 1), tz=self.tz),
Timestamp(datetime(parsed.year, qe, d, 23, 59, 59, 999999), tz=self.tz))
elif reso == 'day':
st = datetime(parsed.year, parsed.month, parsed.day)
return (Timestamp(st, tz=self.tz),
Timestamp(Timestamp(st + offsets.Day(), tz=self.tz).value - 1))
elif reso == 'hour':
st = datetime(parsed.year, parsed.month, parsed.day,
hour=parsed.hour)
return (Timestamp(st, tz=self.tz),
Timestamp(Timestamp(st + offsets.Hour(),
tz=self.tz).value - 1))
elif reso == 'minute':
st = datetime(parsed.year, parsed.month, parsed.day,
hour=parsed.hour, minute=parsed.minute)
return (Timestamp(st, tz=self.tz),
Timestamp(Timestamp(st + offsets.Minute(),
tz=self.tz).value - 1))
elif reso == 'second':
st = datetime(parsed.year, parsed.month, parsed.day,
hour=parsed.hour, minute=parsed.minute, second=parsed.second)
return (Timestamp(st, tz=self.tz),
Timestamp(Timestamp(st + offsets.Second(),
tz=self.tz).value - 1))
elif reso == 'microsecond':
st = datetime(parsed.year, parsed.month, parsed.day,
parsed.hour, parsed.minute, parsed.second,
parsed.microsecond)
return (Timestamp(st, tz=self.tz), Timestamp(st, tz=self.tz))
else:
raise KeyError
def _partial_date_slice(self, reso, parsed, use_lhs=True, use_rhs=True):
is_monotonic = self.is_monotonic
if ((reso in ['day', 'hour', 'minute'] and
not (self._resolution < Resolution.get_reso(reso) or
not is_monotonic)) or
(reso == 'second' and
not (self._resolution <= Resolution.RESO_SEC or
not is_monotonic))):
# These resolution/monotonicity validations came from GH3931,
# GH3452 and GH2369.
raise KeyError
if reso == 'microsecond':
# _partial_date_slice doesn't allow microsecond resolution, but
# _parsed_string_to_bounds allows it.
raise KeyError
t1, t2 = self._parsed_string_to_bounds(reso, parsed)
stamps = self.asi8
if is_monotonic:
# we are out of range
if len(stamps) and (
(use_lhs and t1.value < stamps[0] and t2.value < stamps[0]) or (
(use_rhs and t1.value > stamps[-1] and t2.value > stamps[-1]))):
raise KeyError
# a monotonic (sorted) series can be sliced
left = stamps.searchsorted(t1.value, side='left') if use_lhs else None
right = stamps.searchsorted(t2.value, side='right') if use_rhs else None
return slice(left, right)
lhs_mask = (stamps >= t1.value) if use_lhs else True
rhs_mask = (stamps <= t2.value) if use_rhs else True
# try to find a the dates
return (lhs_mask & rhs_mask).nonzero()[0]
def _possibly_promote(self, other):
if other.inferred_type == 'date':
other = DatetimeIndex(other)
return self, other
def get_value(self, series, key):
"""
Fast lookup of value from 1-dimensional ndarray. Only use this if you
know what you're doing
"""
if isinstance(key, datetime):
# needed to localize naive datetimes
if self.tz is not None:
key = Timestamp(key, tz=self.tz)
return self.get_value_maybe_box(series, key)
if isinstance(key, time):
locs = self.indexer_at_time(key)
return series.take(locs)
try:
return _maybe_box(self, Index.get_value(self, series, key), series, key)
except KeyError:
try:
loc = self._get_string_slice(key)
return series[loc]
except (TypeError, ValueError, KeyError):
pass
try:
return self.get_value_maybe_box(series, key)
except (TypeError, ValueError, KeyError):
raise KeyError(key)
def get_value_maybe_box(self, series, key):
# needed to localize naive datetimes
if self.tz is not None:
key = Timestamp(key, tz=self.tz)
elif not isinstance(key, Timestamp):
key = Timestamp(key)
values = self._engine.get_value(_values_from_object(series), key)
return _maybe_box(self, values, series, key)
def get_loc(self, key, method=None, tolerance=None):
"""
Get integer location for requested label
Returns
-------
loc : int
"""
if tolerance is not None:
# try converting tolerance now, so errors don't get swallowed by
# the try/except clauses below
tolerance = self._convert_tolerance(tolerance)
if isinstance(key, datetime):
# needed to localize naive datetimes
key = Timestamp(key, tz=self.tz)
return Index.get_loc(self, key, method, tolerance)
if isinstance(key, time):
if method is not None:
raise NotImplementedError('cannot yet lookup inexact labels '
'when key is a time object')
return self.indexer_at_time(key)
try:
return Index.get_loc(self, key, method, tolerance)
except (KeyError, ValueError, TypeError):
try:
return self._get_string_slice(key)
except (TypeError, KeyError, ValueError):
pass
try:
stamp = Timestamp(key, tz=self.tz)
return Index.get_loc(self, stamp, method, tolerance)
except (KeyError, ValueError):
raise KeyError(key)
def _maybe_cast_slice_bound(self, label, side, kind):
"""
If label is a string, cast it to datetime according to resolution.
Parameters
----------
label : object
side : {'left', 'right'}
kind : string / None
Returns
-------
label : object
Notes
-----
Value of `side` parameter should be validated in caller.
"""
if is_float(label) or isinstance(label, time) or is_integer(label):
self._invalid_indexer('slice',label)
if isinstance(label, compat.string_types):
freq = getattr(self, 'freqstr',
getattr(self, 'inferred_freq', None))
_, parsed, reso = parse_time_string(label, freq)
bounds = self._parsed_string_to_bounds(reso, parsed)
return bounds[0 if side == 'left' else 1]
else:
return label
def _get_string_slice(self, key, use_lhs=True, use_rhs=True):
freq = getattr(self, 'freqstr',
getattr(self, 'inferred_freq', None))
_, parsed, reso = parse_time_string(key, freq)
loc = self._partial_date_slice(reso, parsed, use_lhs=use_lhs,
use_rhs=use_rhs)
return loc
def slice_indexer(self, start=None, end=None, step=None, kind=None):
"""
Return indexer for specified label slice.
Index.slice_indexer, customized to handle time slicing.
In addition to functionality provided by Index.slice_indexer, does the
following:
- if both `start` and `end` are instances of `datetime.time`, it
invokes `indexer_between_time`
- if `start` and `end` are both either string or None perform
value-based selection in non-monotonic cases.
"""
# For historical reasons DatetimeIndex supports slices between two
# instances of datetime.time as if it were applying a slice mask to
# an array of (self.hour, self.minute, self.seconds, self.microsecond).
if isinstance(start, time) and isinstance(end, time):
if step is not None and step != 1:
raise ValueError('Must have step size of 1 with time slices')
return self.indexer_between_time(start, end)
if isinstance(start, time) or isinstance(end, time):
raise KeyError('Cannot mix time and non-time slice keys')
try:
return Index.slice_indexer(self, start, end, step)
except KeyError:
# For historical reasons DatetimeIndex by default supports
# value-based partial (aka string) slices on non-monotonic arrays,
# let's try that.
if ((start is None or isinstance(start, compat.string_types)) and
(end is None or isinstance(end, compat.string_types))):
mask = True
if start is not None:
start_casted = self._maybe_cast_slice_bound(start, 'left', kind)
mask = start_casted <= self
if end is not None:
end_casted = self._maybe_cast_slice_bound(end, 'right', kind)
mask = (self <= end_casted) & mask
indexer = mask.nonzero()[0][::step]
if len(indexer) == len(self):
return slice(None)
else:
return indexer
else:
raise
# alias to offset
def _get_freq(self):
return self.offset
def _set_freq(self, value):
self.offset = value
freq = property(fget=_get_freq, fset=_set_freq, doc="get/set the frequncy of the Index")
year = _field_accessor('year', 'Y', "The year of the datetime")
month = _field_accessor('month', 'M', "The month as January=1, December=12")
day = _field_accessor('day', 'D', "The days of the datetime")
hour = _field_accessor('hour', 'h', "The hours of the datetime")
minute = _field_accessor('minute', 'm', "The minutes of the datetime")
second = _field_accessor('second', 's', "The seconds of the datetime")
millisecond = _field_accessor('millisecond', 'ms', "The milliseconds of the datetime")
microsecond = _field_accessor('microsecond', 'us', "The microseconds of the datetime")
nanosecond = _field_accessor('nanosecond', 'ns', "The nanoseconds of the datetime")
weekofyear = _field_accessor('weekofyear', 'woy', "The week ordinal of the year")
week = weekofyear
dayofweek = _field_accessor('dayofweek', 'dow',
"The day of the week with Monday=0, Sunday=6")
weekday = dayofweek
dayofyear = _field_accessor('dayofyear', 'doy', "The ordinal day of the year")
quarter = _field_accessor('quarter', 'q', "The quarter of the date")
days_in_month = _field_accessor('days_in_month', 'dim', "The number of days in the month\n\n.. versionadded:: 0.16.0")
daysinmonth = days_in_month
is_month_start = _field_accessor('is_month_start', 'is_month_start', "Logical indicating if first day of month (defined by frequency)")
is_month_end = _field_accessor('is_month_end', 'is_month_end', "Logical indicating if last day of month (defined by frequency)")
is_quarter_start = _field_accessor('is_quarter_start', 'is_quarter_start', "Logical indicating if first day of quarter (defined by frequency)")
is_quarter_end = _field_accessor('is_quarter_end', 'is_quarter_end', "Logical indicating if last day of quarter (defined by frequency)")
is_year_start = _field_accessor('is_year_start', 'is_year_start', "Logical indicating if first day of year (defined by frequency)")
is_year_end = _field_accessor('is_year_end', 'is_year_end', "Logical indicating if last day of year (defined by frequency)")
@property
def time(self):
"""
Returns numpy array of datetime.time. The time part of the Timestamps.
"""
# can't call self.map() which tries to treat func as ufunc
# and causes recursion warnings on python 2.6
return self._maybe_mask_results(_algos.arrmap_object(self.asobject.values,
lambda x: np.nan if x is tslib.NaT else x.time()))
@property
def date(self):
"""
Returns numpy array of datetime.date. The date part of the Timestamps.
"""
return self._maybe_mask_results(_algos.arrmap_object(self.asobject.values, lambda x: x.date()))
def normalize(self):
"""
Return DatetimeIndex with times to midnight. Length is unaltered
Returns
-------
normalized : DatetimeIndex
"""
new_values = tslib.date_normalize(self.asi8, self.tz)
return DatetimeIndex(new_values, freq='infer', name=self.name,
tz=self.tz)
def searchsorted(self, key, side='left'):
if isinstance(key, (np.ndarray, Index)):
key = np.array(key, dtype=_NS_DTYPE, copy=False)
else:
key = _to_m8(key, tz=self.tz)
return self.values.searchsorted(key, side=side)
def is_type_compatible(self, typ):
return typ == self.inferred_type or typ == 'datetime'
@property
def inferred_type(self):
# b/c datetime is represented as microseconds since the epoch, make
# sure we can't have ambiguous indexing
return 'datetime64'
@cache_readonly
def dtype(self):
if self.tz is None:
return _NS_DTYPE
return com.DatetimeTZDtype('ns',self.tz)
@property
def is_all_dates(self):
return True
@cache_readonly
def is_normalized(self):
"""
Returns True if all of the dates are at midnight ("no time")
"""
return tslib.dates_normalized(self.asi8, self.tz)
@cache_readonly
def _resolution(self):
return period.resolution(self.asi8, self.tz)
def equals(self, other):
"""
Determines if two Index objects contain the same elements.
"""
if self.is_(other):
return True
if (not hasattr(other, 'inferred_type') or
other.inferred_type != 'datetime64'):
if self.offset is not None:
return False
try:
other = DatetimeIndex(other)
except:
return False
if self._has_same_tz(other):
return np.array_equal(self.asi8, other.asi8)
return False
def insert(self, loc, item):
"""
Make new Index inserting new item at location
Parameters
----------
loc : int
item : object
if not either a Python datetime or a numpy integer-like, returned
Index dtype will be object rather than datetime.
Returns
-------
new_index : Index
"""
freq = None
if isinstance(item, (datetime, np.datetime64)):
self._assert_can_do_op(item)
if not self._has_same_tz(item):
raise ValueError('Passed item and index have different timezone')
# check freq can be preserved on edge cases
if self.size and self.freq is not None:
if (loc == 0 or loc == -len(self)) and item + self.freq == self[0]:
freq = self.freq
elif (loc == len(self)) and item - self.freq == self[-1]:
freq = self.freq
item = _to_m8(item, tz=self.tz)
try:
new_dates = np.concatenate((self[:loc].asi8, [item.view(np.int64)],
self[loc:].asi8))
if self.tz is not None:
new_dates = tslib.tz_convert(new_dates, 'UTC', self.tz)
return DatetimeIndex(new_dates, name=self.name, freq=freq, tz=self.tz)
except (AttributeError, TypeError):
# fall back to object index
if isinstance(item,compat.string_types):
return self.asobject.insert(loc, item)
raise TypeError("cannot insert DatetimeIndex with incompatible label")
def delete(self, loc):
"""
Make a new DatetimeIndex with passed location(s) deleted.
Parameters
----------
loc: int, slice or array of ints
Indicate which sub-arrays to remove.
Returns
-------
new_index : DatetimeIndex
"""
new_dates = np.delete(self.asi8, loc)
freq = None
if is_integer(loc):
if loc in (0, -len(self), -1, len(self) - 1):
freq = self.freq
else:
if com.is_list_like(loc):
loc = lib.maybe_indices_to_slice(com._ensure_int64(np.array(loc)), len(self))
if isinstance(loc, slice) and loc.step in (1, None):
if (loc.start in (0, None) or loc.stop in (len(self), None)):
freq = self.freq
if self.tz is not None:
new_dates = tslib.tz_convert(new_dates, 'UTC', self.tz)
return DatetimeIndex(new_dates, name=self.name, freq=freq, tz=self.tz)
def tz_convert(self, tz):
"""
Convert tz-aware DatetimeIndex from one time zone to another (using pytz/dateutil)
Parameters
----------
tz : string, pytz.timezone, dateutil.tz.tzfile or None
Time zone for time. Corresponding timestamps would be converted to
time zone of the TimeSeries.
None will remove timezone holding UTC time.
Returns
-------
normalized : DatetimeIndex
Raises
------
TypeError
If DatetimeIndex is tz-naive.
"""
tz = tslib.maybe_get_tz(tz)
if self.tz is None:
# tz naive, use tz_localize
raise TypeError('Cannot convert tz-naive timestamps, use '
'tz_localize to localize')
# No conversion since timestamps are all UTC to begin with
return self._shallow_copy(tz=tz)
@deprecate_kwarg(old_arg_name='infer_dst', new_arg_name='ambiguous',
mapping={True: 'infer', False: 'raise'})
def tz_localize(self, tz, ambiguous='raise'):
"""
Localize tz-naive DatetimeIndex to given time zone (using pytz/dateutil),
or remove timezone from tz-aware DatetimeIndex
Parameters
----------
tz : string, pytz.timezone, dateutil.tz.tzfile or None
Time zone for time. Corresponding timestamps would be converted to
time zone of the TimeSeries.
None will remove timezone holding local time.
ambiguous : 'infer', bool-ndarray, 'NaT', default 'raise'
- 'infer' will attempt to infer fall dst-transition hours based on order
- bool-ndarray where True signifies a DST time, False signifies
a non-DST time (note that this flag is only applicable for ambiguous times)
- 'NaT' will return NaT where there are ambiguous times
- 'raise' will raise an AmbiguousTimeError if there are ambiguous times
infer_dst : boolean, default False (DEPRECATED)
Attempt to infer fall dst-transition hours based on order
Returns
-------
localized : DatetimeIndex
Raises
------
TypeError
If the DatetimeIndex is tz-aware and tz is not None.
"""
if self.tz is not None:
if tz is None:
new_dates = tslib.tz_convert(self.asi8, 'UTC', self.tz)
else:
raise TypeError("Already tz-aware, use tz_convert to convert.")
else:
tz = tslib.maybe_get_tz(tz)
# Convert to UTC
new_dates = tslib.tz_localize_to_utc(self.asi8, tz,
ambiguous=ambiguous)
new_dates = new_dates.view(_NS_DTYPE)
return self._shallow_copy(new_dates, tz=tz)
def indexer_at_time(self, time, asof=False):
"""
Select values at particular time of day (e.g. 9:30AM)
Parameters
----------
time : datetime.time or string
Returns
-------
values_at_time : TimeSeries
"""
from dateutil.parser import parse
if asof:
raise NotImplementedError("'asof' argument is not supported")
if isinstance(time, compat.string_types):
time = parse(time).time()
if time.tzinfo:
# TODO
raise NotImplementedError("argument 'time' with timezone info is "
"not supported")
time_micros = self._get_time_micros()
micros = _time_to_micros(time)
return (micros == time_micros).nonzero()[0]
def indexer_between_time(self, start_time, end_time, include_start=True,
include_end=True):
"""
Select values between particular times of day (e.g., 9:00-9:30AM)
Parameters
----------
start_time : datetime.time or string
end_time : datetime.time or string
include_start : boolean, default True
include_end : boolean, default True
tz : string or pytz.timezone or dateutil.tz.tzfile, default None
Returns
-------
values_between_time : TimeSeries
"""
from dateutil.parser import parse
if isinstance(start_time, compat.string_types):
start_time = parse(start_time).time()
if isinstance(end_time, compat.string_types):
end_time = parse(end_time).time()
if start_time.tzinfo or end_time.tzinfo:
raise NotImplementedError("argument 'time' with timezone info is "
"not supported")
time_micros = self._get_time_micros()
start_micros = _time_to_micros(start_time)
end_micros = _time_to_micros(end_time)
if include_start and include_end:
lop = rop = operator.le
elif include_start:
lop = operator.le
rop = operator.lt
elif include_end:
lop = operator.lt
rop = operator.le
else:
lop = rop = operator.lt
if start_time <= end_time:
join_op = operator.and_
else:
join_op = operator.or_
mask = join_op(lop(start_micros, time_micros),
rop(time_micros, end_micros))
return mask.nonzero()[0]
def to_julian_date(self):
"""
Convert DatetimeIndex to Float64Index of Julian Dates.
0 Julian date is noon January 1, 4713 BC.
http://en.wikipedia.org/wiki/Julian_day
"""
# http://mysite.verizon.net/aesir_research/date/jdalg2.htm
year = self.year
month = self.month
day = self.day
testarr = month < 3
year[testarr] -= 1
month[testarr] += 12
return Float64Index(day +
np.fix((153*month - 457)/5) +
365*year +
np.floor(year / 4) -
np.floor(year / 100) +
np.floor(year / 400) +
1721118.5 +
(self.hour +
self.minute/60.0 +
self.second/3600.0 +
self.microsecond/3600.0/1e+6 +
self.nanosecond/3600.0/1e+9
)/24.0)
DatetimeIndex._add_numeric_methods_disabled()
DatetimeIndex._add_logical_methods_disabled()
DatetimeIndex._add_datetimelike_methods()
def _generate_regular_range(start, end, periods, offset):
if isinstance(offset, Tick):
stride = offset.nanos
if periods is None:
b = Timestamp(start).value
# cannot just use e = Timestamp(end) + 1 because arange breaks when
# stride is too large, see GH10887
e = b + (Timestamp(end).value - b)//stride * stride + stride//2
# end.tz == start.tz by this point due to _generate implementation
tz = start.tz
elif start is not None:
b = Timestamp(start).value
e = b + np.int64(periods) * stride
tz = start.tz
elif end is not None:
e = Timestamp(end).value + stride
b = e - np.int64(periods) * stride
tz = end.tz
else:
raise ValueError("at least 'start' or 'end' should be specified "
"if a 'period' is given.")
data = np.arange(b, e, stride, dtype=np.int64)
data = DatetimeIndex._simple_new(data, None, tz=tz)
else:
if isinstance(start, Timestamp):
start = start.to_pydatetime()
if isinstance(end, Timestamp):
end = end.to_pydatetime()
xdr = generate_range(start=start, end=end,
periods=periods, offset=offset)
dates = list(xdr)
# utc = len(dates) > 0 and dates[0].tzinfo is not None
data = tools.to_datetime(dates)
return data
def date_range(start=None, end=None, periods=None, freq='D', tz=None,
normalize=False, name=None, closed=None, **kwargs):
"""
Return a fixed frequency datetime index, with day (calendar) as the default
frequency
Parameters
----------
start : string or datetime-like, default None
Left bound for generating dates
end : string or datetime-like, default None
Right bound for generating dates
periods : integer or None, default None
If None, must specify start and end
freq : string or DateOffset, default 'D' (calendar daily)
Frequency strings can have multiples, e.g. '5H'
tz : string or None
Time zone name for returning localized DatetimeIndex, for example
Asia/Hong_Kong
normalize : bool, default False
Normalize start/end dates to midnight before generating date range
name : str, default None
Name of the resulting index
closed : string or None, default None
Make the interval closed with respect to the given frequency to
the 'left', 'right', or both sides (None)
Notes
-----
2 of start, end, or periods must be specified
Returns
-------
rng : DatetimeIndex
"""
return DatetimeIndex(start=start, end=end, periods=periods,
freq=freq, tz=tz, normalize=normalize, name=name,
closed=closed, **kwargs)
def bdate_range(start=None, end=None, periods=None, freq='B', tz=None,
normalize=True, name=None, closed=None, **kwargs):
"""
Return a fixed frequency datetime index, with business day as the default
frequency
Parameters
----------
start : string or datetime-like, default None
Left bound for generating dates
end : string or datetime-like, default None
Right bound for generating dates
periods : integer or None, default None
If None, must specify start and end
freq : string or DateOffset, default 'B' (business daily)
Frequency strings can have multiples, e.g. '5H'
tz : string or None
Time zone name for returning localized DatetimeIndex, for example
Asia/Beijing
normalize : bool, default False
Normalize start/end dates to midnight before generating date range
name : str, default None
Name for the resulting index
closed : string or None, default None
Make the interval closed with respect to the given frequency to
the 'left', 'right', or both sides (None)
Notes
-----
2 of start, end, or periods must be specified
Returns
-------
rng : DatetimeIndex
"""
return DatetimeIndex(start=start, end=end, periods=periods,
freq=freq, tz=tz, normalize=normalize, name=name,
closed=closed, **kwargs)
def cdate_range(start=None, end=None, periods=None, freq='C', tz=None,
normalize=True, name=None, closed=None, **kwargs):
"""
**EXPERIMENTAL** Return a fixed frequency datetime index, with
CustomBusinessDay as the default frequency
.. warning:: EXPERIMENTAL
The CustomBusinessDay class is not officially supported and the API is
likely to change in future versions. Use this at your own risk.
Parameters
----------
start : string or datetime-like, default None
Left bound for generating dates
end : string or datetime-like, default None
Right bound for generating dates
periods : integer or None, default None
If None, must specify start and end
freq : string or DateOffset, default 'C' (CustomBusinessDay)
Frequency strings can have multiples, e.g. '5H'
tz : string or None
Time zone name for returning localized DatetimeIndex, for example
Asia/Beijing
normalize : bool, default False
Normalize start/end dates to midnight before generating date range
name : str, default None
Name for the resulting index
weekmask : str, Default 'Mon Tue Wed Thu Fri'
weekmask of valid business days, passed to ``numpy.busdaycalendar``
holidays : list
list/array of dates to exclude from the set of valid business days,
passed to ``numpy.busdaycalendar``
closed : string or None, default None
Make the interval closed with respect to the given frequency to
the 'left', 'right', or both sides (None)
Notes
-----
2 of start, end, or periods must be specified
Returns
-------
rng : DatetimeIndex
"""
if freq=='C':
holidays = kwargs.pop('holidays', [])
weekmask = kwargs.pop('weekmask', 'Mon Tue Wed Thu Fri')
freq = CDay(holidays=holidays, weekmask=weekmask)
return DatetimeIndex(start=start, end=end, periods=periods, freq=freq,
tz=tz, normalize=normalize, name=name,
closed=closed, **kwargs)
def _to_m8(key, tz=None):
'''
Timestamp-like => dt64
'''
if not isinstance(key, Timestamp):
# this also converts strings
key = Timestamp(key, tz=tz)
return np.int64(tslib.pydt_to_i8(key)).view(_NS_DTYPE)
_CACHE_START = Timestamp(datetime(1950, 1, 1))
_CACHE_END = Timestamp(datetime(2030, 1, 1))
_daterange_cache = {}
def _naive_in_cache_range(start, end):
if start is None or end is None:
return False
else:
if start.tzinfo is not None or end.tzinfo is not None:
return False
return _in_range(start, end, _CACHE_START, _CACHE_END)
def _in_range(start, end, rng_start, rng_end):
return start > rng_start and end < rng_end
def _use_cached_range(offset, _normalized, start, end):
return (offset._should_cache() and
not (offset._normalize_cache and not _normalized) and
_naive_in_cache_range(start, end))
def _time_to_micros(time):
seconds = time.hour * 60 * 60 + 60 * time.minute + time.second
return 1000000 * seconds + time.microsecond
def _process_concat_data(to_concat, name):
klass = Index
kwargs = {}
concat = np.concatenate
all_dti = True
need_utc_convert = False
has_naive = False
tz = None
for x in to_concat:
if not isinstance(x, DatetimeIndex):
all_dti = False
else:
if tz is None:
tz = x.tz
if x.tz is None:
has_naive = True
if x.tz != tz:
need_utc_convert = True
tz = 'UTC'
if all_dti:
need_obj_convert = False
if has_naive and tz is not None:
need_obj_convert = True
if need_obj_convert:
to_concat = [x.asobject.values for x in to_concat]
else:
if need_utc_convert:
to_concat = [x.tz_convert('UTC').values for x in to_concat]
else:
to_concat = [x.values for x in to_concat]
# well, technically not a "class" anymore...oh well
klass = DatetimeIndex._simple_new
kwargs = {'tz': tz}
concat = com._concat_compat
else:
for i, x in enumerate(to_concat):
if isinstance(x, DatetimeIndex):
to_concat[i] = x.asobject.values
elif isinstance(x, Index):
to_concat[i] = x.values
factory_func = lambda x: klass(concat(x), name=name, **kwargs)
return to_concat, factory_func
| gpl-3.0 |
pmatigakis/jsbsim | tests/TestTurboProp.py | 4 | 3032 | # TestTurboProp.py
#
# Regression tests for the turboprop engine model.
#
# Copyright (c) 2016 Bertrand Coconnier
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 3 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, see <http://www.gnu.org/licenses/>
#
import shutil
import xml.etree.ElementTree as et
import pandas as pd
from JSBSim_utils import JSBSimTestCase, CreateFDM, RunTest, isDataMatching, FindDifferences
class TestTurboProp(JSBSimTestCase):
def testEnginePowerVC(self):
# Check that the same results are obtained whether the engine power
# velocity correction is given in a <table> or <function>
fdm = CreateFDM(self.sandbox)
fdm.load_script(self.sandbox.path_to_jsbsim_file('scripts',
'L4102.xml'))
fdm.run_ic()
while fdm.run():
pass
del fdm
ref = pd.read_csv('L410.csv', index_col=0)
tree = et.parse(self.sandbox.path_to_jsbsim_file('engine',
'engtm601.xml'))
# Modify the engine definition to use a <function> rather than a
# <table> component.
root = tree.getroot()
engPowVC_tag = root.find("table/[@name='EnginePowerVC']")
root.remove(engPowVC_tag)
del engPowVC_tag.attrib['name']
func_engPowVC = et.SubElement(root, 'function')
func_engPowVC.attrib['name'] = 'EnginePowerVC'
func_engPowVC.append(engPowVC_tag)
tree.write('engtm601.xml')
# Copy the propeller file.
shutil.copy(self.sandbox.path_to_jsbsim_file('engine', 'vrtule2.xml'),
'.')
self.sandbox.delete_csv_files()
fdm = CreateFDM(self.sandbox)
fdm.set_engine_path('.')
fdm.load_script(self.sandbox.path_to_jsbsim_file('scripts',
'L4102.xml'))
fdm.run_ic()
while fdm.run():
pass
current = pd.read_csv('L410.csv', index_col=0)
# Check the data are matching i.e. the time steps are the same between
# the two data sets and that the output data are also the same.
self.assertTrue(isDataMatching(ref, current))
# Find all the data that are differing by more than 1E-5 between the
# two data sets.
diff = FindDifferences(ref, current, 0.0)
self.longMessage = True
self.assertEqual(len(diff), 0, msg='\n'+diff.to_string())
RunTest(TestTurboProp)
| lgpl-2.1 |
vivekmishra1991/scikit-learn | examples/plot_multioutput_face_completion.py | 330 | 3019 | """
==============================================
Face completion with a multi-output estimators
==============================================
This example shows the use of multi-output estimator to complete images.
The goal is to predict the lower half of a face given its upper half.
The first column of images shows true faces. The next columns illustrate
how extremely randomized trees, k nearest neighbors, linear
regression and ridge regression complete the lower half of those faces.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_olivetti_faces
from sklearn.utils.validation import check_random_state
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.neighbors import KNeighborsRegressor
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import RidgeCV
# Load the faces datasets
data = fetch_olivetti_faces()
targets = data.target
data = data.images.reshape((len(data.images), -1))
train = data[targets < 30]
test = data[targets >= 30] # Test on independent people
# Test on a subset of people
n_faces = 5
rng = check_random_state(4)
face_ids = rng.randint(test.shape[0], size=(n_faces, ))
test = test[face_ids, :]
n_pixels = data.shape[1]
X_train = train[:, :np.ceil(0.5 * n_pixels)] # Upper half of the faces
y_train = train[:, np.floor(0.5 * n_pixels):] # Lower half of the faces
X_test = test[:, :np.ceil(0.5 * n_pixels)]
y_test = test[:, np.floor(0.5 * n_pixels):]
# Fit estimators
ESTIMATORS = {
"Extra trees": ExtraTreesRegressor(n_estimators=10, max_features=32,
random_state=0),
"K-nn": KNeighborsRegressor(),
"Linear regression": LinearRegression(),
"Ridge": RidgeCV(),
}
y_test_predict = dict()
for name, estimator in ESTIMATORS.items():
estimator.fit(X_train, y_train)
y_test_predict[name] = estimator.predict(X_test)
# Plot the completed faces
image_shape = (64, 64)
n_cols = 1 + len(ESTIMATORS)
plt.figure(figsize=(2. * n_cols, 2.26 * n_faces))
plt.suptitle("Face completion with multi-output estimators", size=16)
for i in range(n_faces):
true_face = np.hstack((X_test[i], y_test[i]))
if i:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 1)
else:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 1,
title="true faces")
sub.axis("off")
sub.imshow(true_face.reshape(image_shape),
cmap=plt.cm.gray,
interpolation="nearest")
for j, est in enumerate(sorted(ESTIMATORS)):
completed_face = np.hstack((X_test[i], y_test_predict[est][i]))
if i:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 2 + j)
else:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 2 + j,
title=est)
sub.axis("off")
sub.imshow(completed_face.reshape(image_shape),
cmap=plt.cm.gray,
interpolation="nearest")
plt.show()
| bsd-3-clause |
carlvlewis/bokeh | bokeh/charts/builder/timeseries_builder.py | 26 | 6252 | """This is the Bokeh charts interface. It gives you a high level API to build
complex plot is a simple way.
This is the TimeSeries class which lets you build your TimeSeries charts just
passing the arguments to the Chart class and calling the proper functions.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
from six import string_types
try:
import pandas as pd
except ImportError:
pd = None
from ..utils import chunk, cycle_colors
from .._builder import Builder, create_and_build
from ...models import ColumnDataSource, DataRange1d, GlyphRenderer, Range1d
from ...models.glyphs import Line
from ...properties import Any
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
def TimeSeries(values, index=None, xscale='datetime', **kws):
""" Create a timeseries chart using
:class:`TimeSeriesBuilder <bokeh.charts.builder.timeseries_builder.TimeSeriesBuilder>`
to render the lines from values and index.
Args:
values (iterable): a 2d iterable containing the values. Can be anything that
can be converted to a 2d array, and which is the x (time) axis is determined
by ``index``, while the others are interpreted as y values.
index (str|1d iterable, optional): can be used to specify a common custom
index for all data series as an **1d iterable** of any sort that will be used as
series common index or a **string** that corresponds to the key of the
mapping to be used as index (and not as data series) if
area.values is a mapping (like a dict, an OrderedDict
or a pandas DataFrame)
In addition the the parameters specific to this chart,
:ref:`userguide_charts_generic_arguments` are also accepted as keyword parameters.
Returns:
a new :class:`Chart <bokeh.charts.Chart>`
Examples:
.. bokeh-plot::
:source-position: above
from collections import OrderedDict
import datetime
from bokeh.charts import TimeSeries, output_file, show
# (dict, OrderedDict, lists, arrays and DataFrames are valid inputs)
now = datetime.datetime.now()
delta = datetime.timedelta(minutes=1)
dts = [now + delta*i for i in range(5)]
xyvalues = OrderedDict({'Date': dts})
y_python = xyvalues['python'] = [2, 3, 7, 5, 26]
y_pypy = xyvalues['pypy'] = [12, 33, 47, 15, 126]
y_jython = xyvalues['jython'] = [22, 43, 10, 25, 26]
ts = TimeSeries(xyvalues, index='Date', title="TimeSeries", legend="top_left",
ylabel='Languages')
output_file('timeseries.html')
show(ts)
"""
return create_and_build(
TimeSeriesBuilder, values, index=index, xscale=xscale, **kws
)
class TimeSeriesBuilder(Builder):
"""This is the TimeSeries class and it is in charge of plotting
TimeSeries charts in an easy and intuitive way.
Essentially, we provide a way to ingest the data, make the proper
calculations and push the references into a source object.
We additionally make calculations for the ranges.
And finally add the needed lines taking the references from the source.
"""
index = Any(help="""
An index to be used for all data series as follows:
- A 1d iterable of any sort that will be used as
series common index
- As a string that corresponds to the key of the
mapping to be used as index (and not as data
series) if area.values is a mapping (like a dict,
an OrderedDict or a pandas DataFrame)
""")
def _process_data(self):
"""Take the x/y data from the timeseries values.
It calculates the chart properties accordingly. Then build a dict
containing references to all the points to be used by
the line glyph inside the ``_yield_renderers`` method.
"""
self._data = dict()
# list to save all the attributes we are going to create
self._attr = []
# necessary to make all formats and encoder happy with array, blaze, ...
xs = list([x for x in self._values_index])
for col, values in self._values.items():
if isinstance(self.index, string_types) \
and col == self.index:
continue
# save every the groups available in the incomming input
self._groups.append(col)
self.set_and_get("x_", col, xs)
self.set_and_get("y_", col, values)
def _set_sources(self):
"""Push the TimeSeries data into the ColumnDataSource and
calculate the proper ranges.
"""
self._source = ColumnDataSource(self._data)
self.x_range = DataRange1d()
y_names = self._attr[1::2]
endy = max(max(self._data[i]) for i in y_names)
starty = min(min(self._data[i]) for i in y_names)
self.y_range = Range1d(
start=starty - 0.1 * (endy - starty),
end=endy + 0.1 * (endy - starty)
)
def _yield_renderers(self):
"""Use the line glyphs to connect the xy points in the time series.
Takes reference points from the data loaded at the ColumnDataSource.
"""
self._duplet = list(chunk(self._attr, 2))
colors = cycle_colors(self._duplet, self.palette)
for i, (x, y) in enumerate(self._duplet, start=1):
glyph = Line(x=x, y=y, line_color=colors[i - 1])
renderer = GlyphRenderer(data_source=self._source, glyph=glyph)
self._legends.append((self._groups[i-1], [renderer]))
yield renderer
| bsd-3-clause |
mahak/spark | python/pyspark/sql/pandas/types.py | 20 | 13357 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Type-specific codes between pandas and PyArrow. Also contains some utils to correct
pandas instances during the type conversion.
"""
from pyspark.sql.types import BooleanType, ByteType, ShortType, IntegerType, LongType, \
FloatType, DoubleType, DecimalType, StringType, BinaryType, DateType, TimestampType, \
ArrayType, MapType, StructType, StructField, NullType
def to_arrow_type(dt):
""" Convert Spark data type to pyarrow type
"""
from distutils.version import LooseVersion
import pyarrow as pa
if type(dt) == BooleanType:
arrow_type = pa.bool_()
elif type(dt) == ByteType:
arrow_type = pa.int8()
elif type(dt) == ShortType:
arrow_type = pa.int16()
elif type(dt) == IntegerType:
arrow_type = pa.int32()
elif type(dt) == LongType:
arrow_type = pa.int64()
elif type(dt) == FloatType:
arrow_type = pa.float32()
elif type(dt) == DoubleType:
arrow_type = pa.float64()
elif type(dt) == DecimalType:
arrow_type = pa.decimal128(dt.precision, dt.scale)
elif type(dt) == StringType:
arrow_type = pa.string()
elif type(dt) == BinaryType:
arrow_type = pa.binary()
elif type(dt) == DateType:
arrow_type = pa.date32()
elif type(dt) == TimestampType:
# Timestamps should be in UTC, JVM Arrow timestamps require a timezone to be read
arrow_type = pa.timestamp('us', tz='UTC')
elif type(dt) == ArrayType:
if type(dt.elementType) in [StructType, TimestampType]:
raise TypeError("Unsupported type in conversion to Arrow: " + str(dt))
arrow_type = pa.list_(to_arrow_type(dt.elementType))
elif type(dt) == MapType:
if LooseVersion(pa.__version__) < LooseVersion("2.0.0"):
raise TypeError("MapType is only supported with pyarrow 2.0.0 and above")
if type(dt.keyType) in [StructType, TimestampType] or \
type(dt.valueType) in [StructType, TimestampType]:
raise TypeError("Unsupported type in conversion to Arrow: " + str(dt))
arrow_type = pa.map_(to_arrow_type(dt.keyType), to_arrow_type(dt.valueType))
elif type(dt) == StructType:
if any(type(field.dataType) == StructType for field in dt):
raise TypeError("Nested StructType not supported in conversion to Arrow")
fields = [pa.field(field.name, to_arrow_type(field.dataType), nullable=field.nullable)
for field in dt]
arrow_type = pa.struct(fields)
elif type(dt) == NullType:
arrow_type = pa.null()
else:
raise TypeError("Unsupported type in conversion to Arrow: " + str(dt))
return arrow_type
def to_arrow_schema(schema):
""" Convert a schema from Spark to Arrow
"""
import pyarrow as pa
fields = [pa.field(field.name, to_arrow_type(field.dataType), nullable=field.nullable)
for field in schema]
return pa.schema(fields)
def from_arrow_type(at):
""" Convert pyarrow type to Spark data type.
"""
from distutils.version import LooseVersion
import pyarrow as pa
import pyarrow.types as types
if types.is_boolean(at):
spark_type = BooleanType()
elif types.is_int8(at):
spark_type = ByteType()
elif types.is_int16(at):
spark_type = ShortType()
elif types.is_int32(at):
spark_type = IntegerType()
elif types.is_int64(at):
spark_type = LongType()
elif types.is_float32(at):
spark_type = FloatType()
elif types.is_float64(at):
spark_type = DoubleType()
elif types.is_decimal(at):
spark_type = DecimalType(precision=at.precision, scale=at.scale)
elif types.is_string(at):
spark_type = StringType()
elif types.is_binary(at):
spark_type = BinaryType()
elif types.is_date32(at):
spark_type = DateType()
elif types.is_timestamp(at):
spark_type = TimestampType()
elif types.is_list(at):
if types.is_timestamp(at.value_type):
raise TypeError("Unsupported type in conversion from Arrow: " + str(at))
spark_type = ArrayType(from_arrow_type(at.value_type))
elif types.is_map(at):
if LooseVersion(pa.__version__) < LooseVersion("2.0.0"):
raise TypeError("MapType is only supported with pyarrow 2.0.0 and above")
if types.is_timestamp(at.key_type) or types.is_timestamp(at.item_type):
raise TypeError("Unsupported type in conversion from Arrow: " + str(at))
spark_type = MapType(from_arrow_type(at.key_type), from_arrow_type(at.item_type))
elif types.is_struct(at):
if any(types.is_struct(field.type) for field in at):
raise TypeError("Nested StructType not supported in conversion from Arrow: " + str(at))
return StructType(
[StructField(field.name, from_arrow_type(field.type), nullable=field.nullable)
for field in at])
elif types.is_dictionary(at):
spark_type = from_arrow_type(at.value_type)
elif types.is_null(at):
spark_type = NullType()
else:
raise TypeError("Unsupported type in conversion from Arrow: " + str(at))
return spark_type
def from_arrow_schema(arrow_schema):
""" Convert schema from Arrow to Spark.
"""
return StructType(
[StructField(field.name, from_arrow_type(field.type), nullable=field.nullable)
for field in arrow_schema])
def _get_local_timezone():
""" Get local timezone using pytz with environment variable, or dateutil.
If there is a 'TZ' environment variable, pass it to pandas to use pytz and use it as timezone
string, otherwise use the special word 'dateutil/:' which means that pandas uses dateutil and
it reads system configuration to know the system local timezone.
See also:
- https://github.com/pandas-dev/pandas/blob/0.19.x/pandas/tslib.pyx#L1753
- https://github.com/dateutil/dateutil/blob/2.6.1/dateutil/tz/tz.py#L1338
"""
import os
return os.environ.get('TZ', 'dateutil/:')
def _check_series_localize_timestamps(s, timezone):
"""
Convert timezone aware timestamps to timezone-naive in the specified timezone or local timezone.
If the input series is not a timestamp series, then the same series is returned. If the input
series is a timestamp series, then a converted series is returned.
Parameters
----------
s : pandas.Series
timezone : str
the timezone to convert. if None then use local timezone
Returns
-------
pandas.Series
`pandas.Series` that have been converted to tz-naive
"""
from pyspark.sql.pandas.utils import require_minimum_pandas_version
require_minimum_pandas_version()
from pandas.api.types import is_datetime64tz_dtype
tz = timezone or _get_local_timezone()
# TODO: handle nested timestamps, such as ArrayType(TimestampType())?
if is_datetime64tz_dtype(s.dtype):
return s.dt.tz_convert(tz).dt.tz_localize(None)
else:
return s
def _check_series_convert_timestamps_internal(s, timezone):
"""
Convert a tz-naive timestamp in the specified timezone or local timezone to UTC normalized for
Spark internal storage
Parameters
----------
s : pandas.Series
timezone : str
the timezone to convert. if None then use local timezone
Returns
-------
pandas.Series
`pandas.Series` where if it is a timestamp, has been UTC normalized without a time zone
"""
from pyspark.sql.pandas.utils import require_minimum_pandas_version
require_minimum_pandas_version()
from pandas.api.types import is_datetime64_dtype, is_datetime64tz_dtype
# TODO: handle nested timestamps, such as ArrayType(TimestampType())?
if is_datetime64_dtype(s.dtype):
# When tz_localize a tz-naive timestamp, the result is ambiguous if the tz-naive
# timestamp is during the hour when the clock is adjusted backward during due to
# daylight saving time (dst).
# E.g., for America/New_York, the clock is adjusted backward on 2015-11-01 2:00 to
# 2015-11-01 1:00 from dst-time to standard time, and therefore, when tz_localize
# a tz-naive timestamp 2015-11-01 1:30 with America/New_York timezone, it can be either
# dst time (2015-01-01 1:30-0400) or standard time (2015-11-01 1:30-0500).
#
# Here we explicit choose to use standard time. This matches the default behavior of
# pytz.
#
# Here are some code to help understand this behavior:
# >>> import datetime
# >>> import pandas as pd
# >>> import pytz
# >>>
# >>> t = datetime.datetime(2015, 11, 1, 1, 30)
# >>> ts = pd.Series([t])
# >>> tz = pytz.timezone('America/New_York')
# >>>
# >>> ts.dt.tz_localize(tz, ambiguous=True)
# 0 2015-11-01 01:30:00-04:00
# dtype: datetime64[ns, America/New_York]
# >>>
# >>> ts.dt.tz_localize(tz, ambiguous=False)
# 0 2015-11-01 01:30:00-05:00
# dtype: datetime64[ns, America/New_York]
# >>>
# >>> str(tz.localize(t))
# '2015-11-01 01:30:00-05:00'
tz = timezone or _get_local_timezone()
return s.dt.tz_localize(tz, ambiguous=False).dt.tz_convert('UTC')
elif is_datetime64tz_dtype(s.dtype):
return s.dt.tz_convert('UTC')
else:
return s
def _check_series_convert_timestamps_localize(s, from_timezone, to_timezone):
"""
Convert timestamp to timezone-naive in the specified timezone or local timezone
Parameters
----------
s : pandas.Series
from_timezone : str
the timezone to convert from. if None then use local timezone
to_timezone : str
the timezone to convert to. if None then use local timezone
Returns
-------
pandas.Series
`pandas.Series` where if it is a timestamp, has been converted to tz-naive
"""
from pyspark.sql.pandas.utils import require_minimum_pandas_version
require_minimum_pandas_version()
import pandas as pd
from pandas.api.types import is_datetime64tz_dtype, is_datetime64_dtype
from_tz = from_timezone or _get_local_timezone()
to_tz = to_timezone or _get_local_timezone()
# TODO: handle nested timestamps, such as ArrayType(TimestampType())?
if is_datetime64tz_dtype(s.dtype):
return s.dt.tz_convert(to_tz).dt.tz_localize(None)
elif is_datetime64_dtype(s.dtype) and from_tz != to_tz:
# `s.dt.tz_localize('tzlocal()')` doesn't work properly when including NaT.
return s.apply(
lambda ts: ts.tz_localize(from_tz, ambiguous=False).tz_convert(to_tz).tz_localize(None)
if ts is not pd.NaT else pd.NaT)
else:
return s
def _check_series_convert_timestamps_local_tz(s, timezone):
"""
Convert timestamp to timezone-naive in the specified timezone or local timezone
Parameters
----------
s : pandas.Series
timezone : str
the timezone to convert to. if None then use local timezone
Returns
-------
pandas.Series
`pandas.Series` where if it is a timestamp, has been converted to tz-naive
"""
return _check_series_convert_timestamps_localize(s, None, timezone)
def _check_series_convert_timestamps_tz_local(s, timezone):
"""
Convert timestamp to timezone-naive in the specified timezone or local timezone
Parameters
----------
s : pandas.Series
timezone : str
the timezone to convert from. if None then use local timezone
Returns
-------
pandas.Series
`pandas.Series` where if it is a timestamp, has been converted to tz-naive
"""
return _check_series_convert_timestamps_localize(s, timezone, None)
def _convert_map_items_to_dict(s):
"""
Convert a series with items as list of (key, value), as made from an Arrow column of map type,
to dict for compatibility with non-arrow MapType columns.
:param s: pandas.Series of lists of (key, value) pairs
:return: pandas.Series of dictionaries
"""
return s.apply(lambda m: None if m is None else {k: v for k, v in m})
def _convert_dict_to_map_items(s):
"""
Convert a series of dictionaries to list of (key, value) pairs to match expected data
for Arrow column of map type.
:param s: pandas.Series of dictionaries
:return: pandas.Series of lists of (key, value) pairs
"""
return s.apply(lambda d: list(d.items()) if d is not None else None)
| apache-2.0 |
zaxtax/scikit-learn | sklearn/cluster/tests/test_birch.py | 342 | 5603 | """
Tests for the birch clustering algorithm.
"""
from scipy import sparse
import numpy as np
from sklearn.cluster.tests.common import generate_clustered_data
from sklearn.cluster.birch import Birch
from sklearn.cluster.hierarchical import AgglomerativeClustering
from sklearn.datasets import make_blobs
from sklearn.linear_model import ElasticNet
from sklearn.metrics import pairwise_distances_argmin, v_measure_score
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
def test_n_samples_leaves_roots():
# Sanity check for the number of samples in leaves and roots
X, y = make_blobs(n_samples=10)
brc = Birch()
brc.fit(X)
n_samples_root = sum([sc.n_samples_ for sc in brc.root_.subclusters_])
n_samples_leaves = sum([sc.n_samples_ for leaf in brc._get_leaves()
for sc in leaf.subclusters_])
assert_equal(n_samples_leaves, X.shape[0])
assert_equal(n_samples_root, X.shape[0])
def test_partial_fit():
# Test that fit is equivalent to calling partial_fit multiple times
X, y = make_blobs(n_samples=100)
brc = Birch(n_clusters=3)
brc.fit(X)
brc_partial = Birch(n_clusters=None)
brc_partial.partial_fit(X[:50])
brc_partial.partial_fit(X[50:])
assert_array_equal(brc_partial.subcluster_centers_,
brc.subcluster_centers_)
# Test that same global labels are obtained after calling partial_fit
# with None
brc_partial.set_params(n_clusters=3)
brc_partial.partial_fit(None)
assert_array_equal(brc_partial.subcluster_labels_, brc.subcluster_labels_)
def test_birch_predict():
# Test the predict method predicts the nearest centroid.
rng = np.random.RandomState(0)
X = generate_clustered_data(n_clusters=3, n_features=3,
n_samples_per_cluster=10)
# n_samples * n_samples_per_cluster
shuffle_indices = np.arange(30)
rng.shuffle(shuffle_indices)
X_shuffle = X[shuffle_indices, :]
brc = Birch(n_clusters=4, threshold=1.)
brc.fit(X_shuffle)
centroids = brc.subcluster_centers_
assert_array_equal(brc.labels_, brc.predict(X_shuffle))
nearest_centroid = pairwise_distances_argmin(X_shuffle, centroids)
assert_almost_equal(v_measure_score(nearest_centroid, brc.labels_), 1.0)
def test_n_clusters():
# Test that n_clusters param works properly
X, y = make_blobs(n_samples=100, centers=10)
brc1 = Birch(n_clusters=10)
brc1.fit(X)
assert_greater(len(brc1.subcluster_centers_), 10)
assert_equal(len(np.unique(brc1.labels_)), 10)
# Test that n_clusters = Agglomerative Clustering gives
# the same results.
gc = AgglomerativeClustering(n_clusters=10)
brc2 = Birch(n_clusters=gc)
brc2.fit(X)
assert_array_equal(brc1.subcluster_labels_, brc2.subcluster_labels_)
assert_array_equal(brc1.labels_, brc2.labels_)
# Test that the wrong global clustering step raises an Error.
clf = ElasticNet()
brc3 = Birch(n_clusters=clf)
assert_raises(ValueError, brc3.fit, X)
# Test that a small number of clusters raises a warning.
brc4 = Birch(threshold=10000.)
assert_warns(UserWarning, brc4.fit, X)
def test_sparse_X():
# Test that sparse and dense data give same results
X, y = make_blobs(n_samples=100, centers=10)
brc = Birch(n_clusters=10)
brc.fit(X)
csr = sparse.csr_matrix(X)
brc_sparse = Birch(n_clusters=10)
brc_sparse.fit(csr)
assert_array_equal(brc.labels_, brc_sparse.labels_)
assert_array_equal(brc.subcluster_centers_,
brc_sparse.subcluster_centers_)
def check_branching_factor(node, branching_factor):
subclusters = node.subclusters_
assert_greater_equal(branching_factor, len(subclusters))
for cluster in subclusters:
if cluster.child_:
check_branching_factor(cluster.child_, branching_factor)
def test_branching_factor():
# Test that nodes have at max branching_factor number of subclusters
X, y = make_blobs()
branching_factor = 9
# Purposefully set a low threshold to maximize the subclusters.
brc = Birch(n_clusters=None, branching_factor=branching_factor,
threshold=0.01)
brc.fit(X)
check_branching_factor(brc.root_, branching_factor)
brc = Birch(n_clusters=3, branching_factor=branching_factor,
threshold=0.01)
brc.fit(X)
check_branching_factor(brc.root_, branching_factor)
# Raises error when branching_factor is set to one.
brc = Birch(n_clusters=None, branching_factor=1, threshold=0.01)
assert_raises(ValueError, brc.fit, X)
def check_threshold(birch_instance, threshold):
"""Use the leaf linked list for traversal"""
current_leaf = birch_instance.dummy_leaf_.next_leaf_
while current_leaf:
subclusters = current_leaf.subclusters_
for sc in subclusters:
assert_greater_equal(threshold, sc.radius)
current_leaf = current_leaf.next_leaf_
def test_threshold():
# Test that the leaf subclusters have a threshold lesser than radius
X, y = make_blobs(n_samples=80, centers=4)
brc = Birch(threshold=0.5, n_clusters=None)
brc.fit(X)
check_threshold(brc, 0.5)
brc = Birch(threshold=5.0, n_clusters=None)
brc.fit(X)
check_threshold(brc, 5.)
| bsd-3-clause |
nelson-liu/scikit-learn | examples/applications/svm_gui.py | 124 | 11251 | """
==========
Libsvm GUI
==========
A simple graphical frontend for Libsvm mainly intended for didactic
purposes. You can create data points by point and click and visualize
the decision region induced by different kernels and parameter settings.
To create positive examples click the left mouse button; to create
negative examples click the right button.
If all examples are from the same class, it uses a one-class SVM.
"""
from __future__ import division, print_function
print(__doc__)
# Author: Peter Prettenhoer <peter.prettenhofer@gmail.com>
#
# License: BSD 3 clause
import matplotlib
matplotlib.use('TkAgg')
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.backends.backend_tkagg import NavigationToolbar2TkAgg
from matplotlib.figure import Figure
from matplotlib.contour import ContourSet
try:
import tkinter as Tk
except ImportError:
# Backward compat for Python 2
import Tkinter as Tk
import sys
import numpy as np
from sklearn import svm
from sklearn.datasets import dump_svmlight_file
from sklearn.externals.six.moves import xrange
y_min, y_max = -50, 50
x_min, x_max = -50, 50
class Model(object):
"""The Model which hold the data. It implements the
observable in the observer pattern and notifies the
registered observers on change event.
"""
def __init__(self):
self.observers = []
self.surface = None
self.data = []
self.cls = None
self.surface_type = 0
def changed(self, event):
"""Notify the observers. """
for observer in self.observers:
observer.update(event, self)
def add_observer(self, observer):
"""Register an observer. """
self.observers.append(observer)
def set_surface(self, surface):
self.surface = surface
def dump_svmlight_file(self, file):
data = np.array(self.data)
X = data[:, 0:2]
y = data[:, 2]
dump_svmlight_file(X, y, file)
class Controller(object):
def __init__(self, model):
self.model = model
self.kernel = Tk.IntVar()
self.surface_type = Tk.IntVar()
# Whether or not a model has been fitted
self.fitted = False
def fit(self):
print("fit the model")
train = np.array(self.model.data)
X = train[:, 0:2]
y = train[:, 2]
C = float(self.complexity.get())
gamma = float(self.gamma.get())
coef0 = float(self.coef0.get())
degree = int(self.degree.get())
kernel_map = {0: "linear", 1: "rbf", 2: "poly"}
if len(np.unique(y)) == 1:
clf = svm.OneClassSVM(kernel=kernel_map[self.kernel.get()],
gamma=gamma, coef0=coef0, degree=degree)
clf.fit(X)
else:
clf = svm.SVC(kernel=kernel_map[self.kernel.get()], C=C,
gamma=gamma, coef0=coef0, degree=degree)
clf.fit(X, y)
if hasattr(clf, 'score'):
print("Accuracy:", clf.score(X, y) * 100)
X1, X2, Z = self.decision_surface(clf)
self.model.clf = clf
self.model.set_surface((X1, X2, Z))
self.model.surface_type = self.surface_type.get()
self.fitted = True
self.model.changed("surface")
def decision_surface(self, cls):
delta = 1
x = np.arange(x_min, x_max + delta, delta)
y = np.arange(y_min, y_max + delta, delta)
X1, X2 = np.meshgrid(x, y)
Z = cls.decision_function(np.c_[X1.ravel(), X2.ravel()])
Z = Z.reshape(X1.shape)
return X1, X2, Z
def clear_data(self):
self.model.data = []
self.fitted = False
self.model.changed("clear")
def add_example(self, x, y, label):
self.model.data.append((x, y, label))
self.model.changed("example_added")
# update decision surface if already fitted.
self.refit()
def refit(self):
"""Refit the model if already fitted. """
if self.fitted:
self.fit()
class View(object):
"""Test docstring. """
def __init__(self, root, controller):
f = Figure()
ax = f.add_subplot(111)
ax.set_xticks([])
ax.set_yticks([])
ax.set_xlim((x_min, x_max))
ax.set_ylim((y_min, y_max))
canvas = FigureCanvasTkAgg(f, master=root)
canvas.show()
canvas.get_tk_widget().pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
canvas._tkcanvas.pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
canvas.mpl_connect('button_press_event', self.onclick)
toolbar = NavigationToolbar2TkAgg(canvas, root)
toolbar.update()
self.controllbar = ControllBar(root, controller)
self.f = f
self.ax = ax
self.canvas = canvas
self.controller = controller
self.contours = []
self.c_labels = None
self.plot_kernels()
def plot_kernels(self):
self.ax.text(-50, -60, "Linear: $u^T v$")
self.ax.text(-20, -60, "RBF: $\exp (-\gamma \| u-v \|^2)$")
self.ax.text(10, -60, "Poly: $(\gamma \, u^T v + r)^d$")
def onclick(self, event):
if event.xdata and event.ydata:
if event.button == 1:
self.controller.add_example(event.xdata, event.ydata, 1)
elif event.button == 3:
self.controller.add_example(event.xdata, event.ydata, -1)
def update_example(self, model, idx):
x, y, l = model.data[idx]
if l == 1:
color = 'w'
elif l == -1:
color = 'k'
self.ax.plot([x], [y], "%so" % color, scalex=0.0, scaley=0.0)
def update(self, event, model):
if event == "examples_loaded":
for i in xrange(len(model.data)):
self.update_example(model, i)
if event == "example_added":
self.update_example(model, -1)
if event == "clear":
self.ax.clear()
self.ax.set_xticks([])
self.ax.set_yticks([])
self.contours = []
self.c_labels = None
self.plot_kernels()
if event == "surface":
self.remove_surface()
self.plot_support_vectors(model.clf.support_vectors_)
self.plot_decision_surface(model.surface, model.surface_type)
self.canvas.draw()
def remove_surface(self):
"""Remove old decision surface."""
if len(self.contours) > 0:
for contour in self.contours:
if isinstance(contour, ContourSet):
for lineset in contour.collections:
lineset.remove()
else:
contour.remove()
self.contours = []
def plot_support_vectors(self, support_vectors):
"""Plot the support vectors by placing circles over the
corresponding data points and adds the circle collection
to the contours list."""
cs = self.ax.scatter(support_vectors[:, 0], support_vectors[:, 1],
s=80, edgecolors="k", facecolors="none")
self.contours.append(cs)
def plot_decision_surface(self, surface, type):
X1, X2, Z = surface
if type == 0:
levels = [-1.0, 0.0, 1.0]
linestyles = ['dashed', 'solid', 'dashed']
colors = 'k'
self.contours.append(self.ax.contour(X1, X2, Z, levels,
colors=colors,
linestyles=linestyles))
elif type == 1:
self.contours.append(self.ax.contourf(X1, X2, Z, 10,
cmap=matplotlib.cm.bone,
origin='lower', alpha=0.85))
self.contours.append(self.ax.contour(X1, X2, Z, [0.0], colors='k',
linestyles=['solid']))
else:
raise ValueError("surface type unknown")
class ControllBar(object):
def __init__(self, root, controller):
fm = Tk.Frame(root)
kernel_group = Tk.Frame(fm)
Tk.Radiobutton(kernel_group, text="Linear", variable=controller.kernel,
value=0, command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(kernel_group, text="RBF", variable=controller.kernel,
value=1, command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(kernel_group, text="Poly", variable=controller.kernel,
value=2, command=controller.refit).pack(anchor=Tk.W)
kernel_group.pack(side=Tk.LEFT)
valbox = Tk.Frame(fm)
controller.complexity = Tk.StringVar()
controller.complexity.set("1.0")
c = Tk.Frame(valbox)
Tk.Label(c, text="C:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(c, width=6, textvariable=controller.complexity).pack(
side=Tk.LEFT)
c.pack()
controller.gamma = Tk.StringVar()
controller.gamma.set("0.01")
g = Tk.Frame(valbox)
Tk.Label(g, text="gamma:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(g, width=6, textvariable=controller.gamma).pack(side=Tk.LEFT)
g.pack()
controller.degree = Tk.StringVar()
controller.degree.set("3")
d = Tk.Frame(valbox)
Tk.Label(d, text="degree:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(d, width=6, textvariable=controller.degree).pack(side=Tk.LEFT)
d.pack()
controller.coef0 = Tk.StringVar()
controller.coef0.set("0")
r = Tk.Frame(valbox)
Tk.Label(r, text="coef0:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(r, width=6, textvariable=controller.coef0).pack(side=Tk.LEFT)
r.pack()
valbox.pack(side=Tk.LEFT)
cmap_group = Tk.Frame(fm)
Tk.Radiobutton(cmap_group, text="Hyperplanes",
variable=controller.surface_type, value=0,
command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(cmap_group, text="Surface",
variable=controller.surface_type, value=1,
command=controller.refit).pack(anchor=Tk.W)
cmap_group.pack(side=Tk.LEFT)
train_button = Tk.Button(fm, text='Fit', width=5,
command=controller.fit)
train_button.pack()
fm.pack(side=Tk.LEFT)
Tk.Button(fm, text='Clear', width=5,
command=controller.clear_data).pack(side=Tk.LEFT)
def get_parser():
from optparse import OptionParser
op = OptionParser()
op.add_option("--output",
action="store", type="str", dest="output",
help="Path where to dump data.")
return op
def main(argv):
op = get_parser()
opts, args = op.parse_args(argv[1:])
root = Tk.Tk()
model = Model()
controller = Controller(model)
root.wm_title("Scikit-learn Libsvm GUI")
view = View(root, controller)
model.add_observer(view)
Tk.mainloop()
if opts.output:
model.dump_svmlight_file(opts.output)
if __name__ == "__main__":
main(sys.argv)
| bsd-3-clause |
belltailjp/scikit-learn | sklearn/preprocessing/tests/test_data.py | 14 | 37957 | import warnings
import numpy as np
import numpy.linalg as la
from scipy import sparse
from distutils.version import LooseVersion
from sklearn.utils.testing import assert_almost_equal, clean_warning_registry
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_less_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.sparsefuncs import mean_variance_axis
from sklearn.preprocessing.data import _transform_selected
from sklearn.preprocessing.data import Binarizer
from sklearn.preprocessing.data import KernelCenterer
from sklearn.preprocessing.data import Normalizer
from sklearn.preprocessing.data import normalize
from sklearn.preprocessing.data import OneHotEncoder
from sklearn.preprocessing.data import StandardScaler
from sklearn.preprocessing.data import scale
from sklearn.preprocessing.data import MinMaxScaler
from sklearn.preprocessing.data import MaxAbsScaler
from sklearn.preprocessing.data import maxabs_scale
from sklearn.preprocessing.data import RobustScaler
from sklearn.preprocessing.data import robust_scale
from sklearn.preprocessing.data import add_dummy_feature
from sklearn.preprocessing.data import PolynomialFeatures
from sklearn.utils.validation import DataConversionWarning
from sklearn import datasets
iris = datasets.load_iris()
def toarray(a):
if hasattr(a, "toarray"):
a = a.toarray()
return a
def test_polynomial_features():
# Test Polynomial Features
X1 = np.arange(6)[:, np.newaxis]
P1 = np.hstack([np.ones_like(X1),
X1, X1 ** 2, X1 ** 3])
deg1 = 3
X2 = np.arange(6).reshape((3, 2))
x1 = X2[:, :1]
x2 = X2[:, 1:]
P2 = np.hstack([x1 ** 0 * x2 ** 0,
x1 ** 1 * x2 ** 0,
x1 ** 0 * x2 ** 1,
x1 ** 2 * x2 ** 0,
x1 ** 1 * x2 ** 1,
x1 ** 0 * x2 ** 2])
deg2 = 2
for (deg, X, P) in [(deg1, X1, P1), (deg2, X2, P2)]:
P_test = PolynomialFeatures(deg, include_bias=True).fit_transform(X)
assert_array_almost_equal(P_test, P)
P_test = PolynomialFeatures(deg, include_bias=False).fit_transform(X)
assert_array_almost_equal(P_test, P[:, 1:])
interact = PolynomialFeatures(2, interaction_only=True, include_bias=True)
X_poly = interact.fit_transform(X)
assert_array_almost_equal(X_poly, P2[:, [0, 1, 2, 4]])
def test_scaler_1d():
# Test scaling of dataset along single axis
rng = np.random.RandomState(0)
X = rng.randn(5)
X_orig_copy = X.copy()
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.0)
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_array_almost_equal(X_scaled_back, X_orig_copy)
# Test with 1D list
X = [0., 1., 2, 0.4, 1.]
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.0)
X_scaled = scale(X)
assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.0)
X = np.ones(5)
assert_array_equal(scale(X, with_mean=False), X)
def test_standard_scaler_numerical_stability():
"""Test numerical stability of scaling"""
# np.log(1e-5) is taken because of its floating point representation
# was empirically found to cause numerical problems with np.mean & np.std.
x = np.zeros(8, dtype=np.float64) + np.log(1e-5, dtype=np.float64)
if LooseVersion(np.__version__) >= LooseVersion('1.9'):
# This does not raise a warning as the number of samples is too low
# to trigger the problem in recent numpy
x_scaled = assert_no_warnings(scale, x)
assert_array_almost_equal(scale(x), np.zeros(8))
else:
w = "standard deviation of the data is probably very close to 0"
x_scaled = assert_warns_message(UserWarning, w, scale, x)
assert_array_almost_equal(x_scaled, np.zeros(8))
# with 2 more samples, the std computation run into numerical issues:
x = np.zeros(10, dtype=np.float64) + np.log(1e-5, dtype=np.float64)
w = "standard deviation of the data is probably very close to 0"
x_scaled = assert_warns_message(UserWarning, w, scale, x)
assert_array_almost_equal(x_scaled, np.zeros(10))
x = np.ones(10, dtype=np.float64) * 1e-100
x_small_scaled = assert_no_warnings(scale, x)
assert_array_almost_equal(x_small_scaled, np.zeros(10))
# Large values can cause (often recoverable) numerical stability issues:
x_big = np.ones(10, dtype=np.float64) * 1e100
w = "Dataset may contain too large values"
x_big_scaled = assert_warns_message(UserWarning, w, scale, x_big)
assert_array_almost_equal(x_big_scaled, np.zeros(10))
assert_array_almost_equal(x_big_scaled, x_small_scaled)
x_big_centered = assert_warns_message(UserWarning, w, scale, x_big,
with_std=False)
assert_array_almost_equal(x_big_centered, np.zeros(10))
assert_array_almost_equal(x_big_centered, x_small_scaled)
def test_scaler_2d_arrays():
# Test scaling of 2d array along first axis
rng = np.random.RandomState(0)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has been copied
assert_true(X_scaled is not X)
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_scaled = scale(X, axis=1, with_std=False)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=1), 4 * [0.0])
X_scaled = scale(X, axis=1, with_std=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=1), 4 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=1), 4 * [1.0])
# Check that the data hasn't been modified
assert_true(X_scaled is not X)
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is X)
X = rng.randn(4, 5)
X[:, 0] = 1.0 # first feature is a constant, non zero feature
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is not X)
def test_min_max_scaler_iris():
X = iris.data
scaler = MinMaxScaler()
# default params
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), 0)
assert_array_almost_equal(X_trans.min(axis=0), 0)
assert_array_almost_equal(X_trans.max(axis=0), 1)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# not default params: min=1, max=2
scaler = MinMaxScaler(feature_range=(1, 2))
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), 1)
assert_array_almost_equal(X_trans.max(axis=0), 2)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# min=-.5, max=.6
scaler = MinMaxScaler(feature_range=(-.5, .6))
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), -.5)
assert_array_almost_equal(X_trans.max(axis=0), .6)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# raises on invalid range
scaler = MinMaxScaler(feature_range=(2, 1))
assert_raises(ValueError, scaler.fit, X)
def test_min_max_scaler_zero_variance_features():
# Check min max scaler on toy data with zero variance features
X = [[0., 1., +0.5],
[0., 1., -0.1],
[0., 1., +1.1]]
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
# default params
scaler = MinMaxScaler()
X_trans = scaler.fit_transform(X)
X_expected_0_1 = [[0., 0., 0.5],
[0., 0., 0.0],
[0., 0., 1.0]]
assert_array_almost_equal(X_trans, X_expected_0_1)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
X_trans_new = scaler.transform(X_new)
X_expected_0_1_new = [[+0., 1., 0.500],
[-1., 0., 0.083],
[+0., 0., 1.333]]
assert_array_almost_equal(X_trans_new, X_expected_0_1_new, decimal=2)
# not default params
scaler = MinMaxScaler(feature_range=(1, 2))
X_trans = scaler.fit_transform(X)
X_expected_1_2 = [[1., 1., 1.5],
[1., 1., 1.0],
[1., 1., 2.0]]
assert_array_almost_equal(X_trans, X_expected_1_2)
def test_min_max_scaler_1d():
# Test scaling of dataset along single axis
rng = np.random.RandomState(0)
X = rng.randn(5)
X_orig_copy = X.copy()
scaler = MinMaxScaler()
X_scaled = scaler.fit(X).transform(X)
assert_array_almost_equal(X_scaled.min(axis=0), 0.0)
assert_array_almost_equal(X_scaled.max(axis=0), 1.0)
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_array_almost_equal(X_scaled_back, X_orig_copy)
# Test with 1D list
X = [0., 1., 2, 0.4, 1.]
scaler = MinMaxScaler()
X_scaled = scaler.fit(X).transform(X)
assert_array_almost_equal(X_scaled.min(axis=0), 0.0)
assert_array_almost_equal(X_scaled.max(axis=0), 1.0)
# Constant feature.
X = np.zeros(5)
scaler = MinMaxScaler()
X_scaled = scaler.fit(X).transform(X)
assert_greater_equal(X_scaled.min(), 0.)
assert_less_equal(X_scaled.max(), 1.)
def test_scaler_without_centering():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
assert_raises(ValueError, StandardScaler().fit, X_csr)
null_transform = StandardScaler(with_mean=False, with_std=False, copy=True)
X_null = null_transform.fit_transform(X_csr)
assert_array_equal(X_null.data, X_csr.data)
X_orig = null_transform.inverse_transform(X_null)
assert_array_equal(X_orig.data, X_csr.data)
scaler = StandardScaler(with_mean=False).fit(X)
X_scaled = scaler.transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
scaler_csr = StandardScaler(with_mean=False).fit(X_csr)
X_csr_scaled = scaler_csr.transform(X_csr, copy=True)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
scaler_csc = StandardScaler(with_mean=False).fit(X_csc)
X_csc_scaled = scaler_csr.transform(X_csc, copy=True)
assert_false(np.any(np.isnan(X_csc_scaled.data)))
assert_equal(scaler.mean_, scaler_csr.mean_)
assert_array_almost_equal(scaler.std_, scaler_csr.std_)
assert_equal(scaler.mean_, scaler_csc.mean_)
assert_array_almost_equal(scaler.std_, scaler_csc.std_)
assert_array_almost_equal(
X_scaled.mean(axis=0), [0., -0.01, 2.24, -0.35, -0.78], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(X_csr_scaled, 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
# Check that X has not been modified (copy)
assert_true(X_scaled is not X)
assert_true(X_csr_scaled is not X_csr)
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_csr_scaled_back = scaler_csr.inverse_transform(X_csr_scaled)
assert_true(X_csr_scaled_back is not X_csr)
assert_true(X_csr_scaled_back is not X_csr_scaled)
assert_array_almost_equal(X_csr_scaled_back.toarray(), X)
X_csc_scaled_back = scaler_csr.inverse_transform(X_csc_scaled.tocsc())
assert_true(X_csc_scaled_back is not X_csc)
assert_true(X_csc_scaled_back is not X_csc_scaled)
assert_array_almost_equal(X_csc_scaled_back.toarray(), X)
def test_scaler_int():
# test that scaler converts integer input to floating
# for both sparse and dense matrices
rng = np.random.RandomState(42)
X = rng.randint(20, size=(4, 5))
X[:, 0] = 0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
null_transform = StandardScaler(with_mean=False, with_std=False, copy=True)
clean_warning_registry()
with warnings.catch_warnings(record=True):
X_null = null_transform.fit_transform(X_csr)
assert_array_equal(X_null.data, X_csr.data)
X_orig = null_transform.inverse_transform(X_null)
assert_array_equal(X_orig.data, X_csr.data)
clean_warning_registry()
with warnings.catch_warnings(record=True):
scaler = StandardScaler(with_mean=False).fit(X)
X_scaled = scaler.transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
clean_warning_registry()
with warnings.catch_warnings(record=True):
scaler_csr = StandardScaler(with_mean=False).fit(X_csr)
X_csr_scaled = scaler_csr.transform(X_csr, copy=True)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
clean_warning_registry()
with warnings.catch_warnings(record=True):
scaler_csc = StandardScaler(with_mean=False).fit(X_csc)
X_csc_scaled = scaler_csr.transform(X_csc, copy=True)
assert_false(np.any(np.isnan(X_csc_scaled.data)))
assert_equal(scaler.mean_, scaler_csr.mean_)
assert_array_almost_equal(scaler.std_, scaler_csr.std_)
assert_equal(scaler.mean_, scaler_csc.mean_)
assert_array_almost_equal(scaler.std_, scaler_csc.std_)
assert_array_almost_equal(
X_scaled.mean(axis=0),
[0., 1.109, 1.856, 21., 1.559], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(
X_csr_scaled.astype(np.float), 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
# Check that X has not been modified (copy)
assert_true(X_scaled is not X)
assert_true(X_csr_scaled is not X_csr)
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_csr_scaled_back = scaler_csr.inverse_transform(X_csr_scaled)
assert_true(X_csr_scaled_back is not X_csr)
assert_true(X_csr_scaled_back is not X_csr_scaled)
assert_array_almost_equal(X_csr_scaled_back.toarray(), X)
X_csc_scaled_back = scaler_csr.inverse_transform(X_csc_scaled.tocsc())
assert_true(X_csc_scaled_back is not X_csc)
assert_true(X_csc_scaled_back is not X_csc_scaled)
assert_array_almost_equal(X_csc_scaled_back.toarray(), X)
def test_scaler_without_copy():
# Check that StandardScaler.fit does not change input
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_copy = X.copy()
StandardScaler(copy=False).fit(X)
assert_array_equal(X, X_copy)
X_csr_copy = X_csr.copy()
StandardScaler(with_mean=False, copy=False).fit(X_csr)
assert_array_equal(X_csr.toarray(), X_csr_copy.toarray())
def test_scale_sparse_with_mean_raise_exception():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X_csr = sparse.csr_matrix(X)
# check scaling and fit with direct calls on sparse data
assert_raises(ValueError, scale, X_csr, with_mean=True)
assert_raises(ValueError, StandardScaler(with_mean=True).fit, X_csr)
# check transform and inverse_transform after a fit on a dense array
scaler = StandardScaler(with_mean=True).fit(X)
assert_raises(ValueError, scaler.transform, X_csr)
X_transformed_csr = sparse.csr_matrix(scaler.transform(X))
assert_raises(ValueError, scaler.inverse_transform, X_transformed_csr)
def test_scale_input_finiteness_validation():
# Check if non finite inputs raise ValueError
X = [np.nan, 5, 6, 7, 8]
assert_raises_regex(ValueError,
"Input contains NaN, infinity or a value too large",
scale, X)
X = [np.inf, 5, 6, 7, 8]
assert_raises_regex(ValueError,
"Input contains NaN, infinity or a value too large",
scale, X)
def test_scale_function_without_centering():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_scaled = scale(X, with_mean=False)
assert_false(np.any(np.isnan(X_scaled)))
X_csr_scaled = scale(X_csr, with_mean=False)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
# test csc has same outcome
X_csc_scaled = scale(X_csr.tocsc(), with_mean=False)
assert_array_almost_equal(X_scaled, X_csc_scaled.toarray())
# raises value error on axis != 0
assert_raises(ValueError, scale, X_csr, with_mean=False, axis=1)
assert_array_almost_equal(X_scaled.mean(axis=0),
[0., -0.01, 2.24, -0.35, -0.78], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is not X)
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(X_csr_scaled, 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
def test_robust_scaler_2d_arrays():
"""Test robust scaling of 2d array along first axis"""
rng = np.random.RandomState(0)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
scaler = RobustScaler()
X_scaled = scaler.fit(X).transform(X)
assert_array_almost_equal(np.median(X_scaled, axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0)[0], 0)
def test_robust_scaler_iris():
X = iris.data
scaler = RobustScaler()
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(np.median(X_trans, axis=0), 0)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
q = np.percentile(X_trans, q=(25, 75), axis=0)
iqr = q[1] - q[0]
assert_array_almost_equal(iqr, 1)
def test_robust_scale_axis1():
X = iris.data
X_trans = robust_scale(X, axis=1)
assert_array_almost_equal(np.median(X_trans, axis=1), 0)
q = np.percentile(X_trans, q=(25, 75), axis=1)
iqr = q[1] - q[0]
assert_array_almost_equal(iqr, 1)
def test_robust_scaler_zero_variance_features():
"""Check RobustScaler on toy data with zero variance features"""
X = [[0., 1., +0.5],
[0., 1., -0.1],
[0., 1., +1.1]]
scaler = RobustScaler()
X_trans = scaler.fit_transform(X)
# NOTE: for such a small sample size, what we expect in the third column
# depends HEAVILY on the method used to calculate quantiles. The values
# here were calculated to fit the quantiles produces by np.percentile
# using numpy 1.9 Calculating quantiles with
# scipy.stats.mstats.scoreatquantile or scipy.stats.mstats.mquantiles
# would yield very different results!
X_expected = [[0., 0., +0.0],
[0., 0., -1.0],
[0., 0., +1.0]]
assert_array_almost_equal(X_trans, X_expected)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# make sure new data gets transformed correctly
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
X_trans_new = scaler.transform(X_new)
X_expected_new = [[+0., 1., +0.],
[-1., 0., -0.83333],
[+0., 0., +1.66667]]
assert_array_almost_equal(X_trans_new, X_expected_new, decimal=3)
def test_maxabs_scaler_zero_variance_features():
"""Check MaxAbsScaler on toy data with zero variance features"""
X = [[0., 1., +0.5],
[0., 1., -0.3],
[0., 1., +1.5],
[0., 0., +0.0]]
scaler = MaxAbsScaler()
X_trans = scaler.fit_transform(X)
X_expected = [[0., 1., 1.0 / 3.0],
[0., 1., -0.2],
[0., 1., 1.0],
[0., 0., 0.0]]
assert_array_almost_equal(X_trans, X_expected)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# make sure new data gets transformed correctly
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
X_trans_new = scaler.transform(X_new)
X_expected_new = [[+0., 2.0, 1.0 / 3.0],
[-1., 1.0, 0.0],
[+0., 1.0, 1.0]]
assert_array_almost_equal(X_trans_new, X_expected_new, decimal=2)
# sparse data
X_csr = sparse.csr_matrix(X)
X_trans = scaler.fit_transform(X_csr)
X_expected = [[0., 1., 1.0 / 3.0],
[0., 1., -0.2],
[0., 1., 1.0],
[0., 0., 0.0]]
assert_array_almost_equal(X_trans.A, X_expected)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv.A)
def test_maxabs_scaler_large_negative_value():
"""Check MaxAbsScaler on toy data with a large negative value"""
X = [[0., 1., +0.5, -1.0],
[0., 1., -0.3, -0.5],
[0., 1., -100.0, 0.0],
[0., 0., +0.0, -2.0]]
scaler = MaxAbsScaler()
X_trans = scaler.fit_transform(X)
X_expected = [[0., 1., 0.005, -0.5],
[0., 1., -0.003, -0.25],
[0., 1., -1.0, 0.0],
[0., 0., 0.0, -1.0]]
assert_array_almost_equal(X_trans, X_expected)
def test_warning_scaling_integers():
# Check warning when scaling integer data
X = np.array([[1, 2, 0],
[0, 0, 0]], dtype=np.uint8)
w = "Data with input dtype uint8 was converted to float64"
clean_warning_registry()
assert_warns_message(DataConversionWarning, w, scale, X)
assert_warns_message(DataConversionWarning, w, StandardScaler().fit, X)
assert_warns_message(DataConversionWarning, w, MinMaxScaler().fit, X)
def test_normalizer_l1():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='l1', copy=True)
X_norm = normalizer.transform(X)
assert_true(X_norm is not X)
X_norm1 = toarray(X_norm)
normalizer = Normalizer(norm='l1', copy=False)
X_norm = normalizer.transform(X)
assert_true(X_norm is X)
X_norm2 = toarray(X_norm)
for X_norm in (X_norm1, X_norm2):
row_sums = np.abs(X_norm).sum(axis=1)
for i in range(3):
assert_almost_equal(row_sums[i], 1.0)
assert_almost_equal(row_sums[3], 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sparse.csr_matrix))
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(row_sums[i], 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalizer_l2():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='l2', copy=True)
X_norm1 = normalizer.transform(X)
assert_true(X_norm1 is not X)
X_norm1 = toarray(X_norm1)
normalizer = Normalizer(norm='l2', copy=False)
X_norm2 = normalizer.transform(X)
assert_true(X_norm2 is X)
X_norm2 = toarray(X_norm2)
for X_norm in (X_norm1, X_norm2):
for i in range(3):
assert_almost_equal(la.norm(X_norm[i]), 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sparse.csr_matrix))
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(la.norm(X_norm[i]), 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalizer_max():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='max', copy=True)
X_norm1 = normalizer.transform(X)
assert_true(X_norm1 is not X)
X_norm1 = toarray(X_norm1)
normalizer = Normalizer(norm='max', copy=False)
X_norm2 = normalizer.transform(X)
assert_true(X_norm2 is X)
X_norm2 = toarray(X_norm2)
for X_norm in (X_norm1, X_norm2):
row_maxs = X_norm.max(axis=1)
for i in range(3):
assert_almost_equal(row_maxs[i], 1.0)
assert_almost_equal(row_maxs[3], 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sparse.csr_matrix))
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(row_maxs[i], 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalize():
# Test normalize function
# Only tests functionality not used by the tests for Normalizer.
X = np.random.RandomState(37).randn(3, 2)
assert_array_equal(normalize(X, copy=False),
normalize(X.T, axis=0, copy=False).T)
assert_raises(ValueError, normalize, [[0]], axis=2)
assert_raises(ValueError, normalize, [[0]], norm='l3')
def test_binarizer():
X_ = np.array([[1, 0, 5], [2, 3, -1]])
for init in (np.array, list, sparse.csr_matrix, sparse.csc_matrix):
X = init(X_.copy())
binarizer = Binarizer(threshold=2.0, copy=True)
X_bin = toarray(binarizer.transform(X))
assert_equal(np.sum(X_bin == 0), 4)
assert_equal(np.sum(X_bin == 1), 2)
X_bin = binarizer.transform(X)
assert_equal(sparse.issparse(X), sparse.issparse(X_bin))
binarizer = Binarizer(copy=True).fit(X)
X_bin = toarray(binarizer.transform(X))
assert_true(X_bin is not X)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(copy=True)
X_bin = binarizer.transform(X)
assert_true(X_bin is not X)
X_bin = toarray(X_bin)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(copy=False)
X_bin = binarizer.transform(X)
if init is not list:
assert_true(X_bin is X)
X_bin = toarray(X_bin)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(threshold=-0.5, copy=True)
for init in (np.array, list):
X = init(X_.copy())
X_bin = toarray(binarizer.transform(X))
assert_equal(np.sum(X_bin == 0), 1)
assert_equal(np.sum(X_bin == 1), 5)
X_bin = binarizer.transform(X)
# Cannot use threshold < 0 for sparse
assert_raises(ValueError, binarizer.transform, sparse.csc_matrix(X))
def test_center_kernel():
# Test that KernelCenterer is equivalent to StandardScaler
# in feature space
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
scaler = StandardScaler(with_std=False)
scaler.fit(X_fit)
X_fit_centered = scaler.transform(X_fit)
K_fit = np.dot(X_fit, X_fit.T)
# center fit time matrix
centerer = KernelCenterer()
K_fit_centered = np.dot(X_fit_centered, X_fit_centered.T)
K_fit_centered2 = centerer.fit_transform(K_fit)
assert_array_almost_equal(K_fit_centered, K_fit_centered2)
# center predict time matrix
X_pred = rng.random_sample((2, 4))
K_pred = np.dot(X_pred, X_fit.T)
X_pred_centered = scaler.transform(X_pred)
K_pred_centered = np.dot(X_pred_centered, X_fit_centered.T)
K_pred_centered2 = centerer.transform(K_pred)
assert_array_almost_equal(K_pred_centered, K_pred_centered2)
def test_fit_transform():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
for obj in ((StandardScaler(), Normalizer(), Binarizer())):
X_transformed = obj.fit(X).transform(X)
X_transformed2 = obj.fit_transform(X)
assert_array_equal(X_transformed, X_transformed2)
def test_add_dummy_feature():
X = [[1, 0], [0, 1], [0, 1]]
X = add_dummy_feature(X)
assert_array_equal(X, [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_coo():
X = sparse.coo_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sparse.isspmatrix_coo(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_csc():
X = sparse.csc_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sparse.isspmatrix_csc(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_csr():
X = sparse.csr_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sparse.isspmatrix_csr(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_one_hot_encoder_sparse():
# Test OneHotEncoder's fit and transform.
X = [[3, 2, 1], [0, 1, 1]]
enc = OneHotEncoder()
# discover max values automatically
X_trans = enc.fit_transform(X).toarray()
assert_equal(X_trans.shape, (2, 5))
assert_array_equal(enc.active_features_,
np.where([1, 0, 0, 1, 0, 1, 1, 0, 1])[0])
assert_array_equal(enc.feature_indices_, [0, 4, 7, 9])
# check outcome
assert_array_equal(X_trans,
[[0., 1., 0., 1., 1.],
[1., 0., 1., 0., 1.]])
# max value given as 3
enc = OneHotEncoder(n_values=4)
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 4 * 3))
assert_array_equal(enc.feature_indices_, [0, 4, 8, 12])
# max value given per feature
enc = OneHotEncoder(n_values=[3, 2, 2])
X = [[1, 0, 1], [0, 1, 1]]
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 3 + 2 + 2))
assert_array_equal(enc.n_values_, [3, 2, 2])
# check that testing with larger feature works:
X = np.array([[2, 0, 1], [0, 1, 1]])
enc.transform(X)
# test that an error is raised when out of bounds:
X_too_large = [[0, 2, 1], [0, 1, 1]]
assert_raises(ValueError, enc.transform, X_too_large)
assert_raises(ValueError, OneHotEncoder(n_values=2).fit_transform, X)
# test that error is raised when wrong number of features
assert_raises(ValueError, enc.transform, X[:, :-1])
# test that error is raised when wrong number of features in fit
# with prespecified n_values
assert_raises(ValueError, enc.fit, X[:, :-1])
# test exception on wrong init param
assert_raises(TypeError, OneHotEncoder(n_values=np.int).fit, X)
enc = OneHotEncoder()
# test negative input to fit
assert_raises(ValueError, enc.fit, [[0], [-1]])
# test negative input to transform
enc.fit([[0], [1]])
assert_raises(ValueError, enc.transform, [[0], [-1]])
def test_one_hot_encoder_dense():
# check for sparse=False
X = [[3, 2, 1], [0, 1, 1]]
enc = OneHotEncoder(sparse=False)
# discover max values automatically
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 5))
assert_array_equal(enc.active_features_,
np.where([1, 0, 0, 1, 0, 1, 1, 0, 1])[0])
assert_array_equal(enc.feature_indices_, [0, 4, 7, 9])
# check outcome
assert_array_equal(X_trans,
np.array([[0., 1., 0., 1., 1.],
[1., 0., 1., 0., 1.]]))
def _check_transform_selected(X, X_expected, sel):
for M in (X, sparse.csr_matrix(X)):
Xtr = _transform_selected(M, Binarizer().transform, sel)
assert_array_equal(toarray(Xtr), X_expected)
def test_transform_selected():
X = [[3, 2, 1], [0, 1, 1]]
X_expected = [[1, 2, 1], [0, 1, 1]]
_check_transform_selected(X, X_expected, [0])
_check_transform_selected(X, X_expected, [True, False, False])
X_expected = [[1, 1, 1], [0, 1, 1]]
_check_transform_selected(X, X_expected, [0, 1, 2])
_check_transform_selected(X, X_expected, [True, True, True])
_check_transform_selected(X, X_expected, "all")
_check_transform_selected(X, X, [])
_check_transform_selected(X, X, [False, False, False])
def _run_one_hot(X, X2, cat):
enc = OneHotEncoder(categorical_features=cat)
Xtr = enc.fit_transform(X)
X2tr = enc.transform(X2)
return Xtr, X2tr
def _check_one_hot(X, X2, cat, n_features):
ind = np.where(cat)[0]
# With mask
A, B = _run_one_hot(X, X2, cat)
# With indices
C, D = _run_one_hot(X, X2, ind)
# Check shape
assert_equal(A.shape, (2, n_features))
assert_equal(B.shape, (1, n_features))
assert_equal(C.shape, (2, n_features))
assert_equal(D.shape, (1, n_features))
# Check that mask and indices give the same results
assert_array_equal(toarray(A), toarray(C))
assert_array_equal(toarray(B), toarray(D))
def test_one_hot_encoder_categorical_features():
X = np.array([[3, 2, 1], [0, 1, 1]])
X2 = np.array([[1, 1, 1]])
cat = [True, False, False]
_check_one_hot(X, X2, cat, 4)
# Edge case: all non-categorical
cat = [False, False, False]
_check_one_hot(X, X2, cat, 3)
# Edge case: all categorical
cat = [True, True, True]
_check_one_hot(X, X2, cat, 5)
def test_one_hot_encoder_unknown_transform():
X = np.array([[0, 2, 1], [1, 0, 3], [1, 0, 2]])
y = np.array([[4, 1, 1]])
# Test that one hot encoder raises error for unknown features
# present during transform.
oh = OneHotEncoder(handle_unknown='error')
oh.fit(X)
assert_raises(ValueError, oh.transform, y)
# Test the ignore option, ignores unknown features.
oh = OneHotEncoder(handle_unknown='ignore')
oh.fit(X)
assert_array_equal(
oh.transform(y).toarray(),
np.array([[0., 0., 0., 0., 1., 0., 0.]])
)
# Raise error if handle_unknown is neither ignore or error.
oh = OneHotEncoder(handle_unknown='42')
oh.fit(X)
assert_raises(ValueError, oh.transform, y)
| bsd-3-clause |
arabenjamin/scikit-learn | examples/linear_model/plot_sparse_recovery.py | 243 | 7461 | """
============================================================
Sparse recovery: feature selection for sparse linear models
============================================================
Given a small number of observations, we want to recover which features
of X are relevant to explain y. For this :ref:`sparse linear models
<l1_feature_selection>` can outperform standard statistical tests if the
true model is sparse, i.e. if a small fraction of the features are
relevant.
As detailed in :ref:`the compressive sensing notes
<compressive_sensing>`, the ability of L1-based approach to identify the
relevant variables depends on the sparsity of the ground truth, the
number of samples, the number of features, the conditioning of the
design matrix on the signal subspace, the amount of noise, and the
absolute value of the smallest non-zero coefficient [Wainwright2006]
(http://statistics.berkeley.edu/tech-reports/709.pdf).
Here we keep all parameters constant and vary the conditioning of the
design matrix. For a well-conditioned design matrix (small mutual
incoherence) we are exactly in compressive sensing conditions (i.i.d
Gaussian sensing matrix), and L1-recovery with the Lasso performs very
well. For an ill-conditioned matrix (high mutual incoherence),
regressors are very correlated, and the Lasso randomly selects one.
However, randomized-Lasso can recover the ground truth well.
In each situation, we first vary the alpha parameter setting the sparsity
of the estimated model and look at the stability scores of the randomized
Lasso. This analysis, knowing the ground truth, shows an optimal regime
in which relevant features stand out from the irrelevant ones. If alpha
is chosen too small, non-relevant variables enter the model. On the
opposite, if alpha is selected too large, the Lasso is equivalent to
stepwise regression, and thus brings no advantage over a univariate
F-test.
In a second time, we set alpha and compare the performance of different
feature selection methods, using the area under curve (AUC) of the
precision-recall.
"""
print(__doc__)
# Author: Alexandre Gramfort and Gael Varoquaux
# License: BSD 3 clause
import warnings
import matplotlib.pyplot as plt
import numpy as np
from scipy import linalg
from sklearn.linear_model import (RandomizedLasso, lasso_stability_path,
LassoLarsCV)
from sklearn.feature_selection import f_regression
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import auc, precision_recall_curve
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.utils.extmath import pinvh
from sklearn.utils import ConvergenceWarning
def mutual_incoherence(X_relevant, X_irelevant):
"""Mutual incoherence, as defined by formula (26a) of [Wainwright2006].
"""
projector = np.dot(np.dot(X_irelevant.T, X_relevant),
pinvh(np.dot(X_relevant.T, X_relevant)))
return np.max(np.abs(projector).sum(axis=1))
for conditioning in (1, 1e-4):
###########################################################################
# Simulate regression data with a correlated design
n_features = 501
n_relevant_features = 3
noise_level = .2
coef_min = .2
# The Donoho-Tanner phase transition is around n_samples=25: below we
# will completely fail to recover in the well-conditioned case
n_samples = 25
block_size = n_relevant_features
rng = np.random.RandomState(42)
# The coefficients of our model
coef = np.zeros(n_features)
coef[:n_relevant_features] = coef_min + rng.rand(n_relevant_features)
# The correlation of our design: variables correlated by blocs of 3
corr = np.zeros((n_features, n_features))
for i in range(0, n_features, block_size):
corr[i:i + block_size, i:i + block_size] = 1 - conditioning
corr.flat[::n_features + 1] = 1
corr = linalg.cholesky(corr)
# Our design
X = rng.normal(size=(n_samples, n_features))
X = np.dot(X, corr)
# Keep [Wainwright2006] (26c) constant
X[:n_relevant_features] /= np.abs(
linalg.svdvals(X[:n_relevant_features])).max()
X = StandardScaler().fit_transform(X.copy())
# The output variable
y = np.dot(X, coef)
y /= np.std(y)
# We scale the added noise as a function of the average correlation
# between the design and the output variable
y += noise_level * rng.normal(size=n_samples)
mi = mutual_incoherence(X[:, :n_relevant_features],
X[:, n_relevant_features:])
###########################################################################
# Plot stability selection path, using a high eps for early stopping
# of the path, to save computation time
alpha_grid, scores_path = lasso_stability_path(X, y, random_state=42,
eps=0.05)
plt.figure()
# We plot the path as a function of alpha/alpha_max to the power 1/3: the
# power 1/3 scales the path less brutally than the log, and enables to
# see the progression along the path
hg = plt.plot(alpha_grid[1:] ** .333, scores_path[coef != 0].T[1:], 'r')
hb = plt.plot(alpha_grid[1:] ** .333, scores_path[coef == 0].T[1:], 'k')
ymin, ymax = plt.ylim()
plt.xlabel(r'$(\alpha / \alpha_{max})^{1/3}$')
plt.ylabel('Stability score: proportion of times selected')
plt.title('Stability Scores Path - Mutual incoherence: %.1f' % mi)
plt.axis('tight')
plt.legend((hg[0], hb[0]), ('relevant features', 'irrelevant features'),
loc='best')
###########################################################################
# Plot the estimated stability scores for a given alpha
# Use 6-fold cross-validation rather than the default 3-fold: it leads to
# a better choice of alpha:
# Stop the user warnings outputs- they are not necessary for the example
# as it is specifically set up to be challenging.
with warnings.catch_warnings():
warnings.simplefilter('ignore', UserWarning)
warnings.simplefilter('ignore', ConvergenceWarning)
lars_cv = LassoLarsCV(cv=6).fit(X, y)
# Run the RandomizedLasso: we use a paths going down to .1*alpha_max
# to avoid exploring the regime in which very noisy variables enter
# the model
alphas = np.linspace(lars_cv.alphas_[0], .1 * lars_cv.alphas_[0], 6)
clf = RandomizedLasso(alpha=alphas, random_state=42).fit(X, y)
trees = ExtraTreesRegressor(100).fit(X, y)
# Compare with F-score
F, _ = f_regression(X, y)
plt.figure()
for name, score in [('F-test', F),
('Stability selection', clf.scores_),
('Lasso coefs', np.abs(lars_cv.coef_)),
('Trees', trees.feature_importances_),
]:
precision, recall, thresholds = precision_recall_curve(coef != 0,
score)
plt.semilogy(np.maximum(score / np.max(score), 1e-4),
label="%s. AUC: %.3f" % (name, auc(recall, precision)))
plt.plot(np.where(coef != 0)[0], [2e-4] * n_relevant_features, 'mo',
label="Ground truth")
plt.xlabel("Features")
plt.ylabel("Score")
# Plot only the 100 first coefficients
plt.xlim(0, 100)
plt.legend(loc='best')
plt.title('Feature selection scores - Mutual incoherence: %.1f'
% mi)
plt.show()
| bsd-3-clause |
llhe/tensorflow | tensorflow/contrib/labeled_tensor/python/ops/ops.py | 77 | 46403 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Non-core ops for LabeledTensor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import types
import numpy as np
from six import string_types
from tensorflow.contrib.labeled_tensor.python.ops import _typecheck as tc
from tensorflow.contrib.labeled_tensor.python.ops import core
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import numerics
from tensorflow.python.ops import random_ops
from tensorflow.python.training import input # pylint: disable=redefined-builtin
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensor, ops.Tensor, core.Axis,
tc.Optional(string_types))
def _gather_1d_on_axis(labeled_tensor, indexer, axis, name=None):
with ops.name_scope(name, 'lt_take', [labeled_tensor]) as scope:
temp_axes = core.Axes([axis] + list(
labeled_tensor.axes.remove(axis.name).values()))
transposed = core.transpose(labeled_tensor, temp_axes.keys())
indexed = core.LabeledTensor(
array_ops.gather(transposed.tensor, indexer), temp_axes)
return core.transpose(indexed, labeled_tensor.axes.keys(), name=scope)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Mapping(string_types,
tc.Union(slice, collections.Hashable, list)),
tc.Optional(string_types))
def select(labeled_tensor, selection, name=None):
"""Slice out a subset of the tensor.
Args:
labeled_tensor: The input tensor.
selection: A dictionary mapping an axis name to a scalar, slice or list of
values to select. Currently supports two types of selections:
(a) Any number of scalar and/or slice selections.
(b) Exactly one list selection, without any scalars or slices.
name: Optional op name.
Returns:
The selection as a `LabeledTensor`.
Raises:
ValueError: If the tensor doesn't have an axis in the selection or if
that axis lacks labels.
KeyError: If any labels in a selection are not found in the original axis.
NotImplementedError: If you attempt to combine a list selection with
scalar selection or another list selection.
"""
with ops.name_scope(name, 'lt_select', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
slices = {}
indexers = {}
for axis_name, value in selection.items():
if axis_name not in labeled_tensor.axes:
raise ValueError(
'The tensor does not have an axis named %s. Its axes are: %r' %
(axis_name, labeled_tensor.axes.keys()))
axis = labeled_tensor.axes[axis_name]
if axis.labels is None:
raise ValueError(
'The axis named %s does not have labels. The axis is: %r' %
(axis_name, axis))
if isinstance(value, slice):
# TODO(shoyer): consider deprecating using slices in favor of lists
if value.start is None:
start = None
else:
start = axis.index(value.start)
if value.stop is None:
stop = None
else:
# For now, follow the pandas convention of making labeled slices
# inclusive of both bounds.
stop = axis.index(value.stop) + 1
if value.step is not None:
raise NotImplementedError('slicing with a step is not yet supported')
slices[axis_name] = slice(start, stop)
# Needs to be after checking for slices, since slice objects claim to be
# instances of collections.Hashable but hash() on them fails.
elif isinstance(value, collections.Hashable):
slices[axis_name] = axis.index(value)
elif isinstance(value, list):
if indexers:
raise NotImplementedError(
'select does not yet support more than one list selection at '
'the same time')
indexer = [axis.index(v) for v in value]
indexers[axis_name] = ops.convert_to_tensor(indexer, dtype=dtypes.int64)
else:
# If type checking is working properly, this shouldn't be possible.
raise TypeError('cannot handle arbitrary types')
if indexers and slices:
raise NotImplementedError(
'select does not yet support combined scalar and list selection')
# For now, handle array selection separately, because tf.gather_nd does
# not support gradients yet. Later, using gather_nd will let us combine
# these paths.
if indexers:
(axis_name, indexer), = indexers.items()
axis = core.Axis(axis_name, selection[axis_name])
return _gather_1d_on_axis(labeled_tensor, indexer, axis, name=scope)
else:
return core.slice_function(labeled_tensor, slices, name=scope)
@tc.returns(core.LabeledTensor)
@tc.accepts(
tc.Collection(core.LabeledTensorLike), string_types,
tc.Optional(string_types))
def concat(labeled_tensors, axis_name, name=None):
"""Concatenate tensors along a dimension.
See tf.concat.
Args:
labeled_tensors: A list of input LabeledTensors.
axis_name: The name of the axis along which to concatenate.
name: Optional op name.
Returns:
The concatenated tensor.
The coordinate labels for the concatenation dimension are also concatenated,
if they are available for every tensor.
Raises:
ValueError: If fewer than one tensor inputs is provided, if the tensors
have incompatible axes, or if `axis_name` isn't the name of an axis.
"""
with ops.name_scope(name, 'lt_concat', labeled_tensors) as scope:
labeled_tensors = [
core.convert_to_labeled_tensor(lt) for lt in labeled_tensors
]
if len(labeled_tensors) < 1:
raise ValueError('concat expects at least 1 tensor, but received %s' %
labeled_tensors)
# All tensors must have these axes.
axes_0 = labeled_tensors[0].axes
axis_names = list(axes_0.keys())
if axis_name not in axis_names:
raise ValueError('%s not in %s' % (axis_name, axis_names))
shared_axes = axes_0.remove(axis_name)
tensors = [labeled_tensors[0].tensor]
concat_axis_list = [axes_0[axis_name]]
for labeled_tensor in labeled_tensors[1:]:
current_shared_axes = labeled_tensor.axes.remove(axis_name)
if current_shared_axes != shared_axes:
# TODO(shoyer): add more specific checks about what went wrong,
# including raising AxisOrderError when appropriate
raise ValueError('Mismatched shared axes: the first tensor '
'had axes %r but this tensor has axes %r.' %
(shared_axes, current_shared_axes))
# Accumulate the axis labels, if they're available.
concat_axis_list.append(labeled_tensor.axes[axis_name])
tensors.append(labeled_tensor.tensor)
concat_axis = core.concat_axes(concat_axis_list)
concat_dimension = axis_names.index(axis_name)
concat_tensor = array_ops.concat(tensors, concat_dimension, name=scope)
values = list(axes_0.values())
concat_axes = (values[:concat_dimension] + [concat_axis] +
values[concat_dimension + 1:])
return core.LabeledTensor(concat_tensor, concat_axes)
# TODO(shoyer): rename pack/unpack to stack/unstack
@tc.returns(core.LabeledTensor)
@tc.accepts(
tc.Collection(core.LabeledTensorLike),
tc.Union(string_types, core.AxisLike), int, tc.Optional(string_types))
def pack(labeled_tensors, new_axis, axis_position=0, name=None):
"""Pack tensors along a new axis.
See tf.pack.
Args:
labeled_tensors: The input tensors, which must have identical axes.
new_axis: The name of the new axis, or a tuple containing the name
and coordinate labels.
axis_position: Optional integer position at which to insert the new axis.
name: Optional op name.
Returns:
The packed tensors as a single LabeledTensor, with `new_axis` in the given
`axis_position`.
Raises:
ValueError: If fewer than one input tensors is provided, or if the tensors
don't have identical axes.
"""
with ops.name_scope(name, 'lt_pack', labeled_tensors) as scope:
labeled_tensors = [
core.convert_to_labeled_tensor(lt) for lt in labeled_tensors
]
if len(labeled_tensors) < 1:
raise ValueError('pack expects at least 1 tensors, but received %s' %
labeled_tensors)
axes_0 = labeled_tensors[0].axes
for t in labeled_tensors:
if t.axes != axes_0:
raise ValueError('Non-identical axes. Expected %s but got %s' %
(axes_0, t.axes))
pack_op = array_ops.stack(
[t.tensor for t in labeled_tensors], axis=axis_position, name=scope)
axes = list(axes_0.values())
axes.insert(axis_position, new_axis)
return core.LabeledTensor(pack_op, axes)
@tc.returns(tc.List(core.LabeledTensor))
@tc.accepts(core.LabeledTensorLike,
tc.Optional(string_types), tc.Optional(string_types))
def unpack(labeled_tensor, axis_name=None, name=None):
"""Unpack the tensor.
See tf.unpack.
Args:
labeled_tensor: The input tensor.
axis_name: Optional name of axis to unpack. By default, the first axis is
used.
name: Optional op name.
Returns:
The list of unpacked LabeledTensors.
Raises:
ValueError: If `axis_name` is not an axis on the input.
"""
with ops.name_scope(name, 'lt_unpack', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
axis_names = list(labeled_tensor.axes.keys())
if axis_name is None:
axis_name = axis_names[0]
if axis_name not in axis_names:
raise ValueError('%s not in %s' % (axis_name, axis_names))
axis = axis_names.index(axis_name)
unpack_ops = array_ops.unstack(labeled_tensor.tensor, axis=axis, name=scope)
axes = [a for i, a in enumerate(labeled_tensor.axes.values()) if i != axis]
return [core.LabeledTensor(t, axes) for t in unpack_ops]
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Collection(string_types),
tc.Collection(tc.Union(string_types, core.AxisLike)),
tc.Optional(string_types))
def reshape(labeled_tensor, existing_axes, new_axes, name=None):
"""Reshape specific axes of a LabeledTensor.
Non-indicated axes remain in their original locations.
Args:
labeled_tensor: The input tensor.
existing_axes: List of axis names found on the input tensor. These must
appear sequentially in the list of axis names on the input. In other
words, they must be a valid slice of `list(labeled_tensor.axes.keys())`.
new_axes: List of strings, tuples of (axis_name, axis_value) or Axis objects
providing new axes with which to replace `existing_axes` in the reshaped
result. At most one element of `new_axes` may be a string, indicating an
axis with unknown size.
name: Optional op name.
Returns:
The reshaped LabeledTensor.
Raises:
ValueError: If `existing_axes` are not all axes on the input, or if more
than one of `new_axes` has unknown size.
AxisOrderError: If `existing_axes` are not a slice of axis names on the
input.
"""
with ops.name_scope(name, 'lt_reshape', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
original_axis_names = list(labeled_tensor.axes.keys())
existing_axes = list(existing_axes)
if not set(existing_axes) <= set(original_axis_names):
raise ValueError('existing_axes %r are not contained in the set of axis '
'names %r on the input labeled tensor' %
(existing_axes, original_axis_names))
start = original_axis_names.index(existing_axes[0])
stop = original_axis_names.index(existing_axes[-1]) + 1
if existing_axes != original_axis_names[start:stop]:
# We could support existing_axes that aren't a slice by using transpose,
# but that could lead to unpredictable performance consequences because
# transposes are not free in TensorFlow. If we did transpose
# automatically, the user might never realize that their data is being
# produced with the wrong order. (The later will occur with some frequency
# because of how broadcasting automatically choose axis order.)
# So for now we've taken the strict approach.
raise core.AxisOrderError(
'existing_axes %r are not a slice of axis names %r on the input '
'labeled tensor. Use `transpose` or `impose_axis_order` to reorder '
'axes on the input explicitly.' %
(existing_axes, original_axis_names))
if sum(isinstance(axis, string_types) for axis in new_axes) > 1:
raise ValueError(
'at most one axis in new_axes can have unknown size. All other '
'axes must have an indicated integer size or labels: %r' % new_axes)
original_values = list(labeled_tensor.axes.values())
axis_size = lambda axis: -1 if axis.size is None else axis.size
shape = [axis_size(axis) for axis in original_values[:start]]
for axis_ref in new_axes:
if isinstance(axis_ref, string_types):
shape.append(-1)
else:
axis = core.as_axis(axis_ref)
shape.append(axis_size(axis))
shape.extend(axis_size(axis) for axis in original_values[stop:])
reshaped_tensor = array_ops.reshape(
labeled_tensor.tensor, shape, name=scope)
axes = original_values[:start] + list(new_axes) + original_values[stop:]
return core.LabeledTensor(reshaped_tensor, axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike, string_types, string_types,
tc.Optional(string_types))
def rename_axis(labeled_tensor, existing_name, new_name, name=None):
"""Rename an axis of LabeledTensor.
Args:
labeled_tensor: The input tensor.
existing_name: Name for an existing axis on the input.
new_name: Desired replacement name.
name: Optional op name.
Returns:
LabeledTensor with renamed axis.
Raises:
ValueError: If `existing_name` is not an axis on the input.
"""
with ops.name_scope(name, 'lt_rename_axis', [labeled_tensor]) as scope:
if existing_name not in labeled_tensor.axes:
raise ValueError('existing_name %r are not contained in the set of axis '
'names %r on the input labeled tensor' %
(existing_name, labeled_tensor.axes.keys()))
new_axis = core.Axis(new_name, labeled_tensor.axes[existing_name].value)
return reshape(labeled_tensor, [existing_name], [new_axis], name=scope)
@tc.returns(tc.List(core.LabeledTensor))
@tc.accepts(string_types, collections.Callable, int, bool,
tc.Collection(core.LabeledTensorLike), bool,
tc.Optional(string_types))
def _batch_helper(default_name,
batch_fn,
batch_size,
enqueue_many,
labeled_tensors,
allow_smaller_final_batch,
name=None):
with ops.name_scope(name, default_name, labeled_tensors) as scope:
labeled_tensors = [
core.convert_to_labeled_tensor(lt) for lt in labeled_tensors
]
batch_ops = batch_fn([t.tensor for t in labeled_tensors], scope)
# TODO(shoyer): Remove this when they sanitize the TF API.
if not isinstance(batch_ops, list):
assert isinstance(batch_ops, ops.Tensor)
batch_ops = [batch_ops]
if allow_smaller_final_batch:
batch_size = None
@tc.returns(core.Axes)
@tc.accepts(core.Axes)
def output_axes(axes):
if enqueue_many:
if 'batch' not in axes or list(axes.keys()).index('batch') != 0:
raise ValueError(
'When enqueue_many is True, input tensors must have an axis '
'called "batch" as their first dimension, '
'but axes were %s' % axes)
culled_axes = axes.remove('batch')
return core.Axes([('batch', batch_size)] + list(culled_axes.values()))
else:
return core.Axes([('batch', batch_size)] + list(axes.values()))
output_labeled_tensors = []
for i, tensor in enumerate(batch_ops):
axes = output_axes(labeled_tensors[i].axes)
output_labeled_tensors.append(core.LabeledTensor(tensor, axes))
return output_labeled_tensors
@tc.returns(tc.List(core.LabeledTensor))
@tc.accepts(
tc.Collection(core.LabeledTensorLike), int, int, int, bool, bool,
tc.Optional(string_types))
def batch(labeled_tensors,
batch_size,
num_threads=1,
capacity=32,
enqueue_many=False,
allow_smaller_final_batch=False,
name=None):
"""Rebatch a tensor.
See tf.batch.
Args:
labeled_tensors: The input tensors.
batch_size: The output batch size.
num_threads: See tf.batch.
capacity: See tf.batch.
enqueue_many: If true, the input tensors must contain a 'batch' axis as
their first axis.
If false, the input tensors must not contain a 'batch' axis.
See tf.batch.
allow_smaller_final_batch: See tf.batch.
name: Optional op name.
Returns:
The rebatched tensors.
If enqueue_many is false, the output tensors will have a new 'batch' axis
as their first axis.
Raises:
ValueError: If enqueue_many is True and the first axis of the tensors
isn't "batch".
"""
def fn(tensors, scope):
return input.batch(
tensors,
batch_size=batch_size,
num_threads=num_threads,
capacity=capacity,
enqueue_many=enqueue_many,
allow_smaller_final_batch=allow_smaller_final_batch,
name=scope)
return _batch_helper('lt_batch', fn, batch_size, enqueue_many,
labeled_tensors, allow_smaller_final_batch, name)
@tc.returns(tc.List(core.LabeledTensor))
@tc.accepts(
tc.Collection(core.LabeledTensorLike), int, int, int, bool, int,
tc.Optional(int), bool, tc.Optional(string_types))
def shuffle_batch(labeled_tensors,
batch_size,
num_threads=1,
capacity=32,
enqueue_many=False,
min_after_dequeue=0,
seed=None,
allow_smaller_final_batch=False,
name=None):
"""Rebatch a tensor, with shuffling.
See tf.batch.
Args:
labeled_tensors: The input tensors.
batch_size: The output batch size.
num_threads: See tf.batch.
capacity: See tf.batch.
enqueue_many: If true, the input tensors must contain a 'batch' axis as
their first axis.
If false, the input tensors must not contain a 'batch' axis.
See tf.batch.
min_after_dequeue: Minimum number of elements in the queue after a dequeue,
used to ensure mixing.
seed: Optional random seed.
allow_smaller_final_batch: See tf.batch.
name: Optional op name.
Returns:
The rebatched tensors.
If enqueue_many is false, the output tensors will have a new 'batch' axis
as their first axis.
Raises:
ValueError: If enqueue_many is True and the first axis of the tensors
isn't "batch".
"""
def fn(tensors, scope):
return input.shuffle_batch(
tensors,
batch_size=batch_size,
num_threads=num_threads,
capacity=capacity,
enqueue_many=enqueue_many,
min_after_dequeue=min_after_dequeue,
seed=seed,
allow_smaller_final_batch=allow_smaller_final_batch,
name=scope)
return _batch_helper('lt_shuffle_batch', fn, batch_size, enqueue_many,
labeled_tensors, allow_smaller_final_batch, name)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Mapping(string_types, int),
tc.Optional(int), tc.Optional(string_types))
def random_crop(labeled_tensor, shape_map, seed=None, name=None):
"""Randomly crops a tensor to a given size.
See tf.random_crop.
Args:
labeled_tensor: The input tensor.
shape_map: A dictionary mapping axis names to the size of the random crop
for that dimension.
seed: An optional random seed.
name: An optional op name.
Returns:
A tensor of the same rank as `labeled_tensor`, cropped randomly in the
selected dimensions.
Raises:
ValueError: If the shape map contains an axis name not in the input tensor.
"""
with ops.name_scope(name, 'lt_random_crop', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
for axis_name in shape_map:
if axis_name not in labeled_tensor.axes:
raise ValueError('Selection axis %s not in axes %s' %
(axis_name, labeled_tensor.axes))
shape = []
axes = []
for axis in labeled_tensor.axes.values():
if axis.name in shape_map:
size = shape_map[axis.name]
shape.append(size)
# We lose labels for the axes we crop, leaving just the size.
axes.append((axis.name, size))
else:
shape.append(len(axis))
axes.append(axis)
crop_op = random_ops.random_crop(
labeled_tensor.tensor, shape, seed=seed, name=scope)
return core.LabeledTensor(crop_op, axes)
# TODO(shoyer): Allow the user to select the axis over which to map.
@tc.returns(core.LabeledTensor)
@tc.accepts(collections.Callable, core.LabeledTensorLike,
tc.Optional(string_types))
def map_fn(fn, labeled_tensor, name=None):
"""Map on the list of tensors unpacked from labeled_tensor.
See tf.map_fn.
Args:
fn: The function to apply to each unpacked LabeledTensor.
It should have type LabeledTensor -> LabeledTensor.
labeled_tensor: The input tensor.
name: Optional op name.
Returns:
A tensor that packs the results of applying fn to the list of tensors
unpacked from labeled_tensor.
"""
with ops.name_scope(name, 'lt_map_fn', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
unpack_lts = unpack(labeled_tensor)
# TODO(ericmc): Fix this upstream.
if labeled_tensor.dtype == dtypes.string:
# We must construct the full graph here, because functional_ops.map_fn
# doesn't work for string-valued tensors.
# Constructing the full graph may be slow.
map_lts = [fn(t) for t in unpack_lts]
return pack(map_lts, list(labeled_tensor.axes.values())[0], name=scope)
else:
# Figure out what the axis labels should be, but use tf.map_fn to
# construct the graph because it's efficient.
# It may be slow to construct the full graph, so we infer the labels from
# the first element.
# TODO(ericmc): This builds a subgraph which then gets thrown away.
# Find a more elegant solution.
first_map_lt = fn(unpack_lts[0])
final_axes = list(labeled_tensor.axes.values())[:1] + list(
first_map_lt.axes.values())
@tc.returns(ops.Tensor)
@tc.accepts(ops.Tensor)
def tf_fn(tensor):
original_axes = list(labeled_tensor.axes.values())[1:]
tensor_lt = core.LabeledTensor(tensor, original_axes)
return fn(tensor_lt).tensor
map_op = functional_ops.map_fn(tf_fn, labeled_tensor.tensor)
map_lt = core.LabeledTensor(map_op, final_axes)
return core.identity(map_lt, name=scope)
@tc.returns(core.LabeledTensor)
@tc.accepts(collections.Callable, core.LabeledTensorLike,
core.LabeledTensorLike, tc.Optional(string_types))
def foldl(fn, labeled_tensor, initial_value, name=None):
"""Left fold on the list of tensors unpacked from labeled_tensor.
See tf.foldl.
Args:
fn: The function to apply to each unpacked LabeledTensor.
It should have type (LabeledTensor, LabeledTensor) -> LabeledTensor.
Its arguments are (accumulated_value, next_value).
labeled_tensor: The input tensor.
initial_value: The initial value of the accumulator.
name: Optional op name.
Returns:
The accumulated value.
"""
with ops.name_scope(name, 'lt_foldl',
[labeled_tensor, initial_value]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
initial_value = core.convert_to_labeled_tensor(initial_value)
@tc.returns(ops.Tensor)
@tc.accepts(ops.Tensor, ops.Tensor)
def tf_fn(accumulator, next_element):
accumulator_lt = core.LabeledTensor(accumulator, initial_value.axes)
next_element_lt = core.LabeledTensor(
next_element, list(labeled_tensor.axes.values())[1:])
return fn(accumulator_lt, next_element_lt).tensor
foldl_op = functional_ops.foldl(
tf_fn, labeled_tensor.tensor, initializer=initial_value.tensor)
foldl_lt = core.LabeledTensor(foldl_op, initial_value.axes)
return core.identity(foldl_lt, name=scope)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Optional(tc.Collection(string_types)), tc.Optional(string_types))
def squeeze(labeled_tensor, axis_names=None, name=None):
"""Remove size-1 dimensions.
See tf.squeeze.
Args:
labeled_tensor: The input tensor.
axis_names: The names of the dimensions to remove, or None to remove
all size-1 dimensions.
name: Optional op name.
Returns:
A tensor with the specified dimensions removed.
Raises:
ValueError: If the named axes are not in the tensor, or if they are
not size-1.
"""
with ops.name_scope(name, 'lt_squeeze', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
if axis_names is None:
axis_names = [a.name for a in labeled_tensor.axes.values() if len(a) == 1]
for axis_name in axis_names:
if axis_name not in labeled_tensor.axes:
raise ValueError('axis %s is not in tensor axes %s' %
(axis_name, labeled_tensor.axes))
elif len(labeled_tensor.axes[axis_name]) != 1:
raise ValueError(
'cannot squeeze axis with size greater than 1: (%s, %s)' %
(axis_name, labeled_tensor.axes[axis_name]))
squeeze_dimensions = []
axes = []
for i, axis in enumerate(labeled_tensor.axes.values()):
if axis.name in axis_names:
squeeze_dimensions.append(i)
else:
axes.append(axis)
if squeeze_dimensions:
squeeze_op = array_ops.squeeze(
labeled_tensor.tensor, squeeze_dimensions, name=scope)
else:
squeeze_op = array_ops.identity(labeled_tensor.tensor, name=scope)
return core.LabeledTensor(squeeze_op, axes)
# pylint: disable=invalid-name
ReduceAxis = tc.Union(string_types,
tc.Tuple(string_types, collections.Hashable))
ReduceAxes = tc.Optional(tc.Union(ReduceAxis, tc.Collection(ReduceAxis)))
# pylint: enable=invalid-name
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike, core.LabeledTensorLike,
tc.Optional(string_types))
def matmul(a, b, name=None):
"""Matrix multiply two tensors with rank 1 or 2.
If both tensors have rank 2, a matrix-matrix product is performed.
If one tensor has rank 1 and the other has rank 2, then a matrix-vector
product is performed.
If both tensors have rank 1, then a vector dot-product is performed.
(This behavior matches that of `numpy.dot`.)
Both tensors must share exactly one dimension in common, which is the
dimension the operation is summed along. The inputs will be automatically
transposed if necessary as part of the matmul op.
We intend to eventually support `matmul` on higher rank input, and also
eventually support summing over any number shared dimensions (via an `axis`
argument), but neither of these features has been implemented yet.
Args:
a: First LabeledTensor.
b: Second LabeledTensor.
name: Optional op name.
Returns:
LabeledTensor with the result of matrix multiplication. Axes are ordered by
the current axis_order_scope, if set, or in or order of appearance on the
inputs.
Raises:
NotImplementedError: If inputs have rank >2 or share multiple axes.
ValueError: If the inputs have rank 0 or do not share any axes.
"""
with ops.name_scope(name, 'lt_matmul', [a, b]) as scope:
a = core.convert_to_labeled_tensor(a)
b = core.convert_to_labeled_tensor(b)
if len(a.axes) > 2 or len(b.axes) > 2:
# We could pass batched inputs to tf.matmul to make this work, but we
# would also need to use tf.tile and/or tf.transpose. These are more
# expensive than doing reshapes, so it's not clear if it's a good idea to
# do this automatically.
raise NotImplementedError(
'matmul currently requires inputs with rank 2 or less, but '
'inputs have ranks %r and %r' % (len(a.axes), len(b.axes)))
if not a.axes or not b.axes:
raise ValueError(
'matmul currently requires inputs with at least rank 1, but '
'inputs have ranks %r and %r' % (len(a.axes), len(b.axes)))
shared_axes = set(a.axes) & set(b.axes)
if len(shared_axes) > 1:
raise NotImplementedError(
'matmul does not yet support summing over multiple shared axes: %r. '
'Use transpose and reshape to create a single shared axis to sum '
'over.' % shared_axes)
if not shared_axes:
raise ValueError('there must have exactly one axis in common between '
'input to matmul: %r, %r' %
(a.axes.keys(), b.axes.keys()))
shared_axis, = shared_axes
if a.axes[shared_axis] != b.axes[shared_axis]:
raise ValueError('axis %r does not match on input arguments: %r vs %r' %
(shared_axis, a.axes[shared_axis].value,
b.axes[shared_axis].value))
result_axes = []
for axes in [a.axes, b.axes]:
for axis in axes.values():
if axis.name != shared_axis:
result_axes.append(axis)
axis_scope_order = core.get_axis_order()
if axis_scope_order is not None:
result_axis_names = [axis.name for axis in result_axes]
new_axis_names = [
name for name in axis_scope_order if name in result_axis_names
]
if new_axis_names != result_axis_names:
# switch a and b
b, a = a, b
# result_axes is a list of length 1 or 2
result_axes = result_axes[::-1]
squeeze_dims = []
if len(a.axes) == 1:
a_tensor = array_ops.reshape(a.tensor, (1, -1))
squeeze_dims.append(0)
transpose_a = False
else:
a_tensor = a.tensor
transpose_a = list(a.axes.keys()).index(shared_axis) == 0
if len(b.axes) == 1:
b_tensor = array_ops.reshape(b.tensor, (-1, 1))
squeeze_dims.append(1)
transpose_b = False
else:
b_tensor = b.tensor
transpose_b = list(b.axes.keys()).index(shared_axis) == 1
result_op = math_ops.matmul(
a_tensor, b_tensor, transpose_a=transpose_a, transpose_b=transpose_b)
if squeeze_dims:
result_op = array_ops.squeeze(result_op, squeeze_dims)
result_op = array_ops.identity(result_op, name=scope)
return core.LabeledTensor(result_op, result_axes)
@tc.returns(types.FunctionType)
@tc.accepts(string_types, collections.Callable)
def define_reduce_op(op_name, reduce_fn):
"""Define a reduction op for labeled tensors.
Args:
op_name: string name of the TensorFlow op.
reduce_fn: function to call to evaluate the op on a tf.Tensor.
Returns:
Function defining the given reduction op that acts on a LabeledTensor.
"""
default_name = 'lt_%s' % op_name
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike, ReduceAxes, tc.Optional(string_types))
def op(labeled_tensor, axes=None, name=None):
"""Computes the given reduction across the given axes of a LabeledTensor.
See `tf.{op_name}` for full details.
Args:
labeled_tensor: The input tensor.
axes: A set of axes or None.
If None, all axes will be reduced.
Axes must all be strings, in which case those dimensions will be
removed, or pairs of (name, None) or (name, label), in which case those
dimensions will be kept.
name: Optional op name.
Returns:
The reduced LabeledTensor.
Raises:
ValueError: if any of the axes to reduce over are not found on
`labeled_tensor`.
"""
with ops.name_scope(name, default_name, [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
if axes is None:
axes = labeled_tensor.axes.keys()
if isinstance(axes, (string_types, tuple)):
axes = [axes]
reduction_axes = {}
axes_to_squeeze = []
for a in axes:
if isinstance(a, string_types):
# We squeeze out this axis.
reduction_axes[a] = a
axes_to_squeeze.append(a)
else:
# We keep this axis, with the user-provided labels.
(axis_name, label) = a
if label is not None:
# The input was a single label, so make it a list so it can be
# turned into an Axis.
label = [label]
reduction_axes[axis_name] = (axis_name, label)
for axis_name in reduction_axes:
if axis_name not in labeled_tensor.axes:
raise ValueError('Axis %s not in axes %s' %
(axis_name, labeled_tensor.axes))
intermediate_axes = []
reduction_dimensions = []
for i, axis in enumerate(labeled_tensor.axes.values()):
if axis.name in reduction_axes:
intermediate_axes.append(reduction_axes[axis.name])
reduction_dimensions.append(i)
else:
intermediate_axes.append(axis)
reduce_op = reduce_fn(
labeled_tensor.tensor, reduction_dimensions, keep_dims=True)
reduce_lt = core.LabeledTensor(reduce_op, intermediate_axes)
return squeeze(reduce_lt, axes_to_squeeze, name=scope)
op.__doc__ = op.__doc__.format(op_name=op_name)
op.__name__ = op_name
return op
reduce_all = define_reduce_op('reduce_all', math_ops.reduce_all)
reduce_any = define_reduce_op('reduce_any', math_ops.reduce_any)
reduce_logsumexp = define_reduce_op('reduce_logsumexp',
math_ops.reduce_logsumexp)
reduce_max = define_reduce_op('reduce_max', math_ops.reduce_max)
reduce_mean = define_reduce_op('reduce_mean', math_ops.reduce_mean)
reduce_min = define_reduce_op('reduce_min', math_ops.reduce_min)
reduce_prod = define_reduce_op('reduce_prod', math_ops.reduce_prod)
reduce_sum = define_reduce_op('reduce_sum', math_ops.reduce_sum)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Mapping(str, tc.Union(int, ops.Tensor)),
tc.Optional(string_types))
def tile(labeled_tensor, multiples, name=None):
"""Constructs a tensor by tiling a given tensor.
Only axes without tick-labels can be tiled. (Otherwise, axis labels on tiled
tensors would no longer be unique.)
See lt.tile.
Args:
labeled_tensor: The input tensor.
multiples: A mapping where the keys are axis names and the values are the
integer number of times to tile along that axis. Only axes with a multiple
different than 1 need be included.
name: Optional op name.
Returns:
A tensor with the indicated axes tiled.
Raises:
ValueError: If the tiled axes are not axes in the input tensor, or if any
axes in multiples have tick labels.
"""
with ops.name_scope(name, 'lt_tile', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
if not set(multiples.keys()) <= set(labeled_tensor.axes.keys()):
raise ValueError('tile axes %r are not contained in the set of axis '
'names %r on the input labeled tensor' %
(multiples.keys(), labeled_tensor.axes))
labeled_axes = [
name for name in multiples
if labeled_tensor.axes[name].labels is not None
]
if labeled_axes:
raise ValueError('cannot tile axes with tick labels: %r' % labeled_axes)
multiples_list = [multiples.get(name, 1) for name in labeled_tensor.axes]
tile_op = array_ops.tile(labeled_tensor.tensor, multiples_list, name=scope)
new_axes = [
axis.name if axis.labels is None else axis
for axis in labeled_tensor.axes.values()
]
return core.LabeledTensor(tile_op, new_axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Mapping(str, tc.Tuple(core.AxisValue, core.AxisValue)),
string_types, tc.Optional(string_types))
def pad(labeled_tensor, paddings, mode='CONSTANT', name=None):
"""Pads a tensor.
See tf.pad.
Args:
labeled_tensor: The input tensor.
paddings: A mapping where the keys are axis names and the values are
tuples where the first element is the padding to insert at the beginning
of the axis and the second is the padding to insert at the end of the
axis.
mode: One of "CONSTANT", "REFLECT", or "SYMMETRIC".
name: Optional op name.
Returns:
A tensor with the indicated axes padded, optionally with those axes extended
with the provided labels.
Raises:
ValueError: If the padded axes are not axes in the input tensor.
"""
with ops.name_scope(name, 'lt_pad', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
if not set(paddings.keys()) <= set(labeled_tensor.axes.keys()):
raise ValueError('pad axes %r are not contained in the set of axis '
'names %r on the input labeled tensor' %
(paddings.keys(), labeled_tensor.axes))
new_axes = []
padding_pairs = []
for name, axis in labeled_tensor.axes.items():
if name in paddings:
padding_before, padding_after = paddings[name]
axis_before = core.Axis(name, padding_before)
axis_after = core.Axis(name, padding_after)
new_axes.append(core.concat_axes([axis_before, axis, axis_after]))
padding_pairs.append((len(axis_before), len(axis_after)))
else:
new_axes.append(axis)
padding_pairs.append((0, 0))
pad_op = array_ops.pad(labeled_tensor.tensor,
padding_pairs,
mode,
name=scope)
return core.LabeledTensor(pad_op, new_axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(
tc.Union(np.ndarray, list, tuple, core.Scalar),
tc.Optional(dtypes.DType),
tc.Optional(
tc.Union(core.Axes, tc.Collection(
tc.Union(string_types, core.AxisLike)))), tc.Optional(string_types))
def constant(value, dtype=None, axes=None, name=None):
"""Creates a constant tensor.
If `axes` includes any strings, shape is inferred from `value`. Otherwise,
the sizes of the given `axes` are used to set `shape` for `tf.constant`.
See tf.constant for more details.
Args:
value: The input tensor.
dtype: The type of the returned tensor.
axes: Optional Axes, list of strings or list of objects coercible to Axis
objects. By default, axes are assumed to be an empty list (i.e., `value`
is treated as a scalar).
name: Optional op name.
Returns:
The tensor with elements set to zero.
"""
with ops.name_scope(name, 'lt_constant', [value]) as scope:
if axes is None:
axes = []
if isinstance(axes, core.Axes):
axes = axes.values()
if any(isinstance(ax, string_types) for ax in axes):
# need to infer shape
shape = None
else:
# axes already indicate shape
axes = [core.as_axis(a) for a in axes]
shape = [a.size for a in axes]
op = array_ops.constant(value, dtype=dtype, shape=shape, name=scope)
return core.LabeledTensor(op, axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Optional(dtypes.DType), tc.Optional(string_types))
def zeros_like(labeled_tensor, dtype=None, name=None):
"""Creates an identical tensor with all elements set to zero.
Args:
labeled_tensor: The input tensor.
dtype: The type of the returned tensor.
name: Optional op name.
Returns:
The tensor with elements set to zero.
"""
with ops.name_scope(name, 'lt_zeros_like', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
op = array_ops.zeros_like(labeled_tensor.tensor, dtype=dtype, name=scope)
return core.LabeledTensor(op, labeled_tensor.axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Optional(dtypes.DType), tc.Optional(string_types))
def ones_like(labeled_tensor, dtype=None, name=None):
"""Creates an identical tensor with all elements set to one.
Args:
labeled_tensor: The input tensor.
dtype: The type of the returned tensor.
name: Optional op name.
Returns:
The tensor with elements set to one.
"""
with ops.name_scope(name, 'lt_ones_like', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
op = array_ops.ones_like(labeled_tensor.tensor, dtype=dtype, name=scope)
return core.LabeledTensor(op, labeled_tensor.axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Optional(dtypes.DType), tc.Optional(string_types))
def cast(labeled_tensor, dtype=None, name=None):
"""Casts a labeled tensor to a new type.
Args:
labeled_tensor: The input tensor.
dtype: The type of the returned tensor.
name: Optional op name.
Returns:
A labeled tensor with the new dtype.
"""
with ops.name_scope(name, 'lt_cast', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
op = math_ops.cast(labeled_tensor.tensor, dtype=dtype, name=scope)
return core.LabeledTensor(op, labeled_tensor.axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike, string_types, tc.Optional(string_types))
def verify_tensor_all_finite(labeled_tensor, message, name=None):
"""Asserts a tensor doesn't contain NaNs or Infs.
See tf.verify_tensor_all_finite.
Args:
labeled_tensor: The input tensor.
message: Message to log on failure.
name: Optional op name.
Returns:
The input tensor.
"""
with ops.name_scope(name, 'lt_verify_tensor_all_finite',
[labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
op = numerics.verify_tensor_all_finite(
labeled_tensor.tensor, msg=message, name=scope)
return core.LabeledTensor(op, labeled_tensor.axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike, core.LabeledTensorLike,
tc.Optional(string_types))
def boolean_mask(labeled_tensor, mask, name=None):
"""Apply a boolean mask to a labeled tensor.
Unlike `tf.boolean_mask`, this currently only works on 1-dimensional masks.
The mask is applied to the first axis of `labeled_tensor`. Labels on the first
axis are removed, because True indices in `mask` may not be known dynamically.
Args:
labeled_tensor: The input tensor.
mask: The type of the returned tensor.
name: Optional op name.
Returns:
The masked labeled tensor.
Raises:
ValueError: if the first axis of the mask
"""
with ops.name_scope(name, 'lt_boolean_mask', [labeled_tensor, mask]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
mask = core.convert_to_labeled_tensor(mask)
if len(mask.axes) > 1:
raise NotImplementedError(
"LabeledTensor's boolean_mask currently only supports 1D masks")
mask_axis = list(mask.axes.values())[0]
lt_axis = list(labeled_tensor.axes.values())[0]
if mask_axis != lt_axis:
raise ValueError('the first axis of the labeled tensor and the mask '
'are not equal:\n%r\n%r' % (lt_axis, mask_axis))
op = array_ops.boolean_mask(labeled_tensor.tensor, mask.tensor, name=scope)
# TODO(shoyer): attempt to infer labels for the masked values, by calling
# tf.contrib.util.constant_value on the mask?
axes = [lt_axis.name] + list(labeled_tensor.axes.values())[1:]
return core.LabeledTensor(op, axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike, core.LabeledTensorLike,
core.LabeledTensorLike, tc.Optional(string_types))
def where(condition, x, y, name=None):
"""Return elements from x or y depending on condition.
See `tf.where` for more details. This function currently only implements the
three argument version of where.
Args:
condition: LabeledTensor of type `bool`.
x: LabeledTensor for values where condition is true.
y: LabeledTensor for values where condition is false.
name: Optional op name.
Returns:
The labeled tensor with values according to condition.
Raises:
ValueError: if `x` and `y` have different axes, or if the axes of `x` do not
start with the axes of `condition`.
"""
with ops.name_scope(name, 'lt_where', [condition, x, y]) as scope:
condition = core.convert_to_labeled_tensor(condition)
x = core.convert_to_labeled_tensor(x)
y = core.convert_to_labeled_tensor(y)
if not condition.axes == x.axes == y.axes:
raise ValueError('all inputs to `where` must have equal axes')
op = array_ops.where(condition.tensor, x.tensor, y.tensor, name=scope)
return core.LabeledTensor(op, x.axes)
| apache-2.0 |
murali-munna/scikit-learn | examples/svm/plot_svm_margin.py | 318 | 2328 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
SVM Margins Example
=========================================================
The plots below illustrate the effect the parameter `C` has
on the separation line. A large value of `C` basically tells
our model that we do not have that much faith in our data's
distribution, and will only consider points close to line
of separation.
A small value of `C` includes more/all the observations, allowing
the margins to be calculated using all the data in the area.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
# we create 40 separable points
np.random.seed(0)
X = np.r_[np.random.randn(20, 2) - [2, 2], np.random.randn(20, 2) + [2, 2]]
Y = [0] * 20 + [1] * 20
# figure number
fignum = 1
# fit the model
for name, penalty in (('unreg', 1), ('reg', 0.05)):
clf = svm.SVC(kernel='linear', C=penalty)
clf.fit(X, Y)
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(-5, 5)
yy = a * xx - (clf.intercept_[0]) / w[1]
# plot the parallels to the separating hyperplane that pass through the
# support vectors
margin = 1 / np.sqrt(np.sum(clf.coef_ ** 2))
yy_down = yy + a * margin
yy_up = yy - a * margin
# plot the line, the points, and the nearest vectors to the plane
plt.figure(fignum, figsize=(4, 3))
plt.clf()
plt.plot(xx, yy, 'k-')
plt.plot(xx, yy_down, 'k--')
plt.plot(xx, yy_up, 'k--')
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1], s=80,
facecolors='none', zorder=10)
plt.scatter(X[:, 0], X[:, 1], c=Y, zorder=10, cmap=plt.cm.Paired)
plt.axis('tight')
x_min = -4.8
x_max = 4.2
y_min = -6
y_max = 6
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.predict(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
plt.figure(fignum, figsize=(4, 3))
plt.pcolormesh(XX, YY, Z, cmap=plt.cm.Paired)
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
fignum = fignum + 1
plt.show()
| bsd-3-clause |
valexandersaulys/prudential_insurance_kaggle | venv/lib/python2.7/site-packages/sklearn/svm/classes.py | 6 | 40597 | import warnings
import numpy as np
from .base import _fit_liblinear, BaseSVC, BaseLibSVM
from ..base import BaseEstimator, RegressorMixin
from ..linear_model.base import LinearClassifierMixin, SparseCoefMixin, \
LinearModel
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import check_X_y
from ..utils.validation import _num_samples
from ..utils.multiclass import check_classification_targets
class LinearSVC(BaseEstimator, LinearClassifierMixin,
_LearntSelectorMixin, SparseCoefMixin):
"""Linear Support Vector Classification.
Similar to SVC with parameter kernel='linear', but implemented in terms of
liblinear rather than libsvm, so it has more flexibility in the choice of
penalties and loss functions and should scale better to large numbers of
samples.
This class supports both dense and sparse input and the multiclass support
is handled according to a one-vs-the-rest scheme.
Read more in the :ref:`User Guide <svm_classification>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
loss : string, 'hinge' or 'squared_hinge' (default='squared_hinge')
Specifies the loss function. 'hinge' is the standard SVM loss
(used e.g. by the SVC class) while 'squared_hinge' is the
square of the hinge loss.
penalty : string, 'l1' or 'l2' (default='l2')
Specifies the norm used in the penalization. The 'l2'
penalty is the standard used in SVC. The 'l1' leads to ``coef_``
vectors that are sparse.
dual : bool, (default=True)
Select the algorithm to either solve the dual or primal
optimization problem. Prefer dual=False when n_samples > n_features.
tol : float, optional (default=1e-4)
Tolerance for stopping criteria.
multi_class: string, 'ovr' or 'crammer_singer' (default='ovr')
Determines the multi-class strategy if `y` contains more than
two classes.
``"ovr"`` trains n_classes one-vs-rest classifiers, while ``"crammer_singer"``
optimizes a joint objective over all classes.
While `crammer_singer` is interesting from a theoretical perspective
as it is consistent, it is seldom used in practice as it rarely leads
to better accuracy and is more expensive to compute.
If ``"crammer_singer"`` is chosen, the options loss, penalty and dual will
be ignored.
fit_intercept : boolean, optional (default=True)
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be already centered).
intercept_scaling : float, optional (default=1)
When self.fit_intercept is True, instance vector x becomes
``[x, self.intercept_scaling]``,
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
class_weight : {dict, 'balanced'}, optional
Set the parameter C of class i to ``class_weight[i]*C`` for
SVC. If not given, all classes are supposed to have
weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
verbose : int, (default=0)
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in liblinear that, if enabled, may not work
properly in a multithreaded context.
random_state : int seed, RandomState instance, or None (default=None)
The seed of the pseudo random number generator to use when
shuffling the data.
max_iter : int, (default=1000)
The maximum number of iterations to be run.
Attributes
----------
coef_ : array, shape = [n_features] if n_classes == 2 else [n_classes, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
``coef_`` is a readonly property derived from ``raw_coef_`` that
follows the internal memory layout of liblinear.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
Notes
-----
The underlying C implementation uses a random number generator to
select features when fitting the model. It is thus not uncommon
to have slightly different results for the same input data. If
that happens, try with a smaller ``tol`` parameter.
The underlying implementation, liblinear, uses a sparse internal
representation for the data that will incur a memory copy.
Predict output may not match that of standalone liblinear in certain
cases. See :ref:`differences from liblinear <liblinear_differences>`
in the narrative documentation.
References
----------
`LIBLINEAR: A Library for Large Linear Classification
<http://www.csie.ntu.edu.tw/~cjlin/liblinear/>`__
See also
--------
SVC
Implementation of Support Vector Machine classifier using libsvm:
the kernel can be non-linear but its SMO algorithm does not
scale to large number of samples as LinearSVC does.
Furthermore SVC multi-class mode is implemented using one
vs one scheme while LinearSVC uses one vs the rest. It is
possible to implement one vs the rest with SVC by using the
:class:`sklearn.multiclass.OneVsRestClassifier` wrapper.
Finally SVC can fit dense data without memory copy if the input
is C-contiguous. Sparse data will still incur memory copy though.
sklearn.linear_model.SGDClassifier
SGDClassifier can optimize the same cost function as LinearSVC
by adjusting the penalty and loss parameters. In addition it requires
less memory, allows incremental (online) learning, and implements
various loss functions and regularization regimes.
"""
def __init__(self, penalty='l2', loss='squared_hinge', dual=True, tol=1e-4,
C=1.0, multi_class='ovr', fit_intercept=True,
intercept_scaling=1, class_weight=None, verbose=0,
random_state=None, max_iter=1000):
self.dual = dual
self.tol = tol
self.C = C
self.multi_class = multi_class
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.class_weight = class_weight
self.verbose = verbose
self.random_state = random_state
self.max_iter = max_iter
self.penalty = penalty
self.loss = loss
def fit(self, X, y):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target vector relative to X
Returns
-------
self : object
Returns self.
"""
# FIXME Remove l1/l2 support in 1.0 -----------------------------------
loss_l = self.loss.lower()
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the loss='%s' will be removed in %s")
# FIXME change loss_l --> self.loss after 0.18
if loss_l in ('l1', 'l2'):
old_loss = self.loss
self.loss = {'l1': 'hinge', 'l2': 'squared_hinge'}.get(loss_l)
warnings.warn(msg % (old_loss, self.loss, old_loss, '1.0'),
DeprecationWarning)
# ---------------------------------------------------------------------
if self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)"
% self.C)
X, y = check_X_y(X, y, accept_sparse='csr',
dtype=np.float64, order="C")
check_classification_targets(y)
self.classes_ = np.unique(y)
self.coef_, self.intercept_, self.n_iter_ = _fit_liblinear(
X, y, self.C, self.fit_intercept, self.intercept_scaling,
self.class_weight, self.penalty, self.dual, self.verbose,
self.max_iter, self.tol, self.random_state, self.multi_class,
self.loss)
if self.multi_class == "crammer_singer" and len(self.classes_) == 2:
self.coef_ = (self.coef_[1] - self.coef_[0]).reshape(1, -1)
if self.fit_intercept:
intercept = self.intercept_[1] - self.intercept_[0]
self.intercept_ = np.array([intercept])
return self
class LinearSVR(LinearModel, RegressorMixin):
"""Linear Support Vector Regression.
Similar to SVR with parameter kernel='linear', but implemented in terms of
liblinear rather than libsvm, so it has more flexibility in the choice of
penalties and loss functions and should scale better to large numbers of
samples.
This class supports both dense and sparse input.
Read more in the :ref:`User Guide <svm_regression>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term. The penalty is a squared
l2 penalty. The bigger this parameter, the less regularization is used.
loss : string, 'epsilon_insensitive' or 'squared_epsilon_insensitive' (default='epsilon_insensitive')
Specifies the loss function. 'l1' is the epsilon-insensitive loss
(standard SVR) while 'l2' is the squared epsilon-insensitive loss.
epsilon : float, optional (default=0.1)
Epsilon parameter in the epsilon-insensitive loss function. Note
that the value of this parameter depends on the scale of the target
variable y. If unsure, set ``epsilon=0``.
dual : bool, (default=True)
Select the algorithm to either solve the dual or primal
optimization problem. Prefer dual=False when n_samples > n_features.
tol : float, optional (default=1e-4)
Tolerance for stopping criteria.
fit_intercept : boolean, optional (default=True)
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be already centered).
intercept_scaling : float, optional (default=1)
When self.fit_intercept is True, instance vector x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
verbose : int, (default=0)
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in liblinear that, if enabled, may not work
properly in a multithreaded context.
random_state : int seed, RandomState instance, or None (default=None)
The seed of the pseudo random number generator to use when
shuffling the data.
max_iter : int, (default=1000)
The maximum number of iterations to be run.
Attributes
----------
coef_ : array, shape = [n_features] if n_classes == 2 else [n_classes, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is a readonly property derived from `raw_coef_` that
follows the internal memory layout of liblinear.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
See also
--------
LinearSVC
Implementation of Support Vector Machine classifier using the
same library as this class (liblinear).
SVR
Implementation of Support Vector Machine regression using libsvm:
the kernel can be non-linear but its SMO algorithm does not
scale to large number of samples as LinearSVC does.
sklearn.linear_model.SGDRegressor
SGDRegressor can optimize the same cost function as LinearSVR
by adjusting the penalty and loss parameters. In addition it requires
less memory, allows incremental (online) learning, and implements
various loss functions and regularization regimes.
"""
def __init__(self, epsilon=0.0, tol=1e-4, C=1.0,
loss='epsilon_insensitive', fit_intercept=True,
intercept_scaling=1., dual=True, verbose=0,
random_state=None, max_iter=1000):
self.tol = tol
self.C = C
self.epsilon = epsilon
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.verbose = verbose
self.random_state = random_state
self.max_iter = max_iter
self.dual = dual
self.loss = loss
def fit(self, X, y):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target vector relative to X
Returns
-------
self : object
Returns self.
"""
# FIXME Remove l1/l2 support in 1.0 -----------------------------------
loss_l = self.loss.lower()
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the loss='%s' will be removed in %s")
# FIXME change loss_l --> self.loss after 0.18
if loss_l in ('l1', 'l2'):
old_loss = self.loss
self.loss = {'l1': 'epsilon_insensitive',
'l2': 'squared_epsilon_insensitive'
}.get(loss_l)
warnings.warn(msg % (old_loss, self.loss, old_loss, '1.0'),
DeprecationWarning)
# ---------------------------------------------------------------------
if self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)"
% self.C)
X, y = check_X_y(X, y, accept_sparse='csr',
dtype=np.float64, order="C")
penalty = 'l2' # SVR only accepts l2 penalty
self.coef_, self.intercept_, self.n_iter_ = _fit_liblinear(
X, y, self.C, self.fit_intercept, self.intercept_scaling,
None, penalty, self.dual, self.verbose,
self.max_iter, self.tol, self.random_state, loss=self.loss,
epsilon=self.epsilon)
self.coef_ = self.coef_.ravel()
return self
class SVC(BaseSVC):
"""C-Support Vector Classification.
The implementation is based on libsvm. The fit time complexity
is more than quadratic with the number of samples which makes it hard
to scale to dataset with more than a couple of 10000 samples.
The multiclass support is handled according to a one-vs-one scheme.
For details on the precise mathematical formulation of the provided
kernel functions and how `gamma`, `coef0` and `degree` affect each
other, see the corresponding section in the narrative documentation:
:ref:`svm_kernels`.
Read more in the :ref:`User Guide <svm_classification>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to pre-compute the kernel matrix from data matrices; that matrix
should be an array of shape ``(n_samples, n_samples)``.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
probability : boolean, optional (default=False)
Whether to enable probability estimates. This must be enabled prior
to calling `fit`, and will slow down that method.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
class_weight : {dict, 'balanced'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
decision_function_shape : 'ovo', 'ovr' or None, default=None
Whether to return a one-vs-rest ('ovr') ecision function of shape
(n_samples, n_classes) as all other classifiers, or the original
one-vs-one ('ovo') decision function of libsvm which has shape
(n_samples, n_classes * (n_classes - 1) / 2).
The default of None will currently behave as 'ovo' for backward
compatibility and raise a deprecation warning, but will change 'ovr'
in 0.18.
.. versionadded:: 0.17
*decision_function_shape='ovr'* is recommended.
.. versionchanged:: 0.17
Deprecated *decision_function_shape='ovo' and None*.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [n_SV, n_features]
Support vectors.
n_support_ : array-like, dtype=int32, shape = [n_class]
Number of support vectors for each class.
dual_coef_ : array, shape = [n_class-1, n_SV]
Coefficients of the support vector in the decision function.
For multiclass, coefficient for all 1-vs-1 classifiers.
The layout of the coefficients in the multiclass case is somewhat
non-trivial. See the section about multi-class classification in the
SVM section of the User Guide for details.
coef_ : array, shape = [n_class-1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is a readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [n_class * (n_class-1) / 2]
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> from sklearn.svm import SVC
>>> clf = SVC()
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0,
decision_function_shape=None, degree=3, gamma='auto', kernel='rbf',
max_iter=-1, probability=False, random_state=None, shrinking=True,
tol=0.001, verbose=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
SVR
Support Vector Machine for Regression implemented using libsvm.
LinearSVC
Scalable Linear Support Vector Machine for classification
implemented using liblinear. Check the See also section of
LinearSVC for more comparison element.
"""
def __init__(self, C=1.0, kernel='rbf', degree=3, gamma='auto',
coef0=0.0, shrinking=True, probability=False,
tol=1e-3, cache_size=200, class_weight=None,
verbose=False, max_iter=-1, decision_function_shape=None,
random_state=None):
super(SVC, self).__init__(
impl='c_svc', kernel=kernel, degree=degree, gamma=gamma,
coef0=coef0, tol=tol, C=C, nu=0., shrinking=shrinking,
probability=probability, cache_size=cache_size,
class_weight=class_weight, verbose=verbose, max_iter=max_iter,
decision_function_shape=decision_function_shape,
random_state=random_state)
class NuSVC(BaseSVC):
"""Nu-Support Vector Classification.
Similar to SVC but uses a parameter to control the number of support
vectors.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_classification>`.
Parameters
----------
nu : float, optional (default=0.5)
An upper bound on the fraction of training errors and a lower
bound of the fraction of support vectors. Should be in the
interval (0, 1].
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
probability : boolean, optional (default=False)
Whether to enable probability estimates. This must be enabled prior
to calling `fit`, and will slow down that method.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
class_weight : {dict, 'auto'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one. The 'auto' mode uses the values of y to
automatically adjust weights inversely proportional to
class frequencies.
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
decision_function_shape : 'ovo', 'ovr' or None, default=None
Whether to return a one-vs-rest ('ovr') ecision function of shape
(n_samples, n_classes) as all other classifiers, or the original
one-vs-one ('ovo') decision function of libsvm which has shape
(n_samples, n_classes * (n_classes - 1) / 2).
The default of None will currently behave as 'ovo' for backward
compatibility and raise a deprecation warning, but will change 'ovr'
in 0.18.
.. versionadded:: 0.17
*decision_function_shape='ovr'* is recommended.
.. versionchanged:: 0.17
Deprecated *decision_function_shape='ovo' and None*.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [n_SV, n_features]
Support vectors.
n_support_ : array-like, dtype=int32, shape = [n_class]
Number of support vectors for each class.
dual_coef_ : array, shape = [n_class-1, n_SV]
Coefficients of the support vector in the decision function.
For multiclass, coefficient for all 1-vs-1 classifiers.
The layout of the coefficients in the multiclass case is somewhat
non-trivial. See the section about multi-class classification in
the SVM section of the User Guide for details.
coef_ : array, shape = [n_class-1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [n_class * (n_class-1) / 2]
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> from sklearn.svm import NuSVC
>>> clf = NuSVC()
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
NuSVC(cache_size=200, class_weight=None, coef0=0.0,
decision_function_shape=None, degree=3, gamma='auto', kernel='rbf',
max_iter=-1, nu=0.5, probability=False, random_state=None,
shrinking=True, tol=0.001, verbose=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
SVC
Support Vector Machine for classification using libsvm.
LinearSVC
Scalable linear Support Vector Machine for classification using
liblinear.
"""
def __init__(self, nu=0.5, kernel='rbf', degree=3, gamma='auto',
coef0=0.0, shrinking=True, probability=False,
tol=1e-3, cache_size=200, class_weight=None, verbose=False,
max_iter=-1, decision_function_shape=None, random_state=None):
super(NuSVC, self).__init__(
impl='nu_svc', kernel=kernel, degree=degree, gamma=gamma,
coef0=coef0, tol=tol, C=0., nu=nu, shrinking=shrinking,
probability=probability, cache_size=cache_size,
class_weight=class_weight, verbose=verbose, max_iter=max_iter,
decision_function_shape=decision_function_shape,
random_state=random_state)
class SVR(BaseLibSVM, RegressorMixin):
"""Epsilon-Support Vector Regression.
The free parameters in the model are C and epsilon.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_regression>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
epsilon : float, optional (default=0.1)
Epsilon in the epsilon-SVR model. It specifies the epsilon-tube
within which no penalty is associated in the training loss function
with points predicted within a distance epsilon from the actual
value.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [1, n_SV]
Coefficients of the support vector in the decision function.
coef_ : array, shape = [1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [1]
Constants in decision function.
Examples
--------
>>> from sklearn.svm import SVR
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = SVR(C=1.0, epsilon=0.2)
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
SVR(C=1.0, cache_size=200, coef0=0.0, degree=3, epsilon=0.2, gamma='auto',
kernel='rbf', max_iter=-1, shrinking=True, tol=0.001, verbose=False)
See also
--------
NuSVR
Support Vector Machine for regression implemented using libsvm
using a parameter to control the number of support vectors.
LinearSVR
Scalable Linear Support Vector Machine for regression
implemented using liblinear.
"""
def __init__(self, kernel='rbf', degree=3, gamma='auto', coef0=0.0,
tol=1e-3, C=1.0, epsilon=0.1, shrinking=True,
cache_size=200, verbose=False, max_iter=-1):
super(SVR, self).__init__(
'epsilon_svr', kernel=kernel, degree=degree, gamma=gamma,
coef0=coef0, tol=tol, C=C, nu=0., epsilon=epsilon, verbose=verbose,
shrinking=shrinking, probability=False, cache_size=cache_size,
class_weight=None, max_iter=max_iter, random_state=None)
class NuSVR(BaseLibSVM, RegressorMixin):
"""Nu Support Vector Regression.
Similar to NuSVC, for regression, uses a parameter nu to control
the number of support vectors. However, unlike NuSVC, where nu
replaces C, here nu replaces the parameter epsilon of epsilon-SVR.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_regression>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
nu : float, optional
An upper bound on the fraction of training errors and a lower bound of
the fraction of support vectors. Should be in the interval (0, 1]. By
default 0.5 will be taken.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [1, n_SV]
Coefficients of the support vector in the decision function.
coef_ : array, shape = [1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [1]
Constants in decision function.
Examples
--------
>>> from sklearn.svm import NuSVR
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = NuSVR(C=1.0, nu=0.1)
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
NuSVR(C=1.0, cache_size=200, coef0=0.0, degree=3, gamma='auto',
kernel='rbf', max_iter=-1, nu=0.1, shrinking=True, tol=0.001,
verbose=False)
See also
--------
NuSVC
Support Vector Machine for classification implemented with libsvm
with a parameter to control the number of support vectors.
SVR
epsilon Support Vector Machine for regression implemented with libsvm.
"""
def __init__(self, nu=0.5, C=1.0, kernel='rbf', degree=3,
gamma='auto', coef0=0.0, shrinking=True, tol=1e-3,
cache_size=200, verbose=False, max_iter=-1):
super(NuSVR, self).__init__(
'nu_svr', kernel=kernel, degree=degree, gamma=gamma, coef0=coef0,
tol=tol, C=C, nu=nu, epsilon=0., shrinking=shrinking,
probability=False, cache_size=cache_size, class_weight=None,
verbose=verbose, max_iter=max_iter, random_state=None)
class OneClassSVM(BaseLibSVM):
"""Unsupervised Outlier Detection.
Estimate the support of a high-dimensional distribution.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_outlier_detection>`.
Parameters
----------
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
nu : float, optional
An upper bound on the fraction of training
errors and a lower bound of the fraction of support
vectors. Should be in the interval (0, 1]. By default 0.5
will be taken.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
tol : float, optional
Tolerance for stopping criterion.
shrinking : boolean, optional
Whether to use the shrinking heuristic.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [n_classes-1, n_SV]
Coefficients of the support vectors in the decision function.
coef_ : array, shape = [n_classes-1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`
intercept_ : array, shape = [n_classes-1]
Constants in decision function.
"""
def __init__(self, kernel='rbf', degree=3, gamma='auto', coef0=0.0,
tol=1e-3, nu=0.5, shrinking=True, cache_size=200,
verbose=False, max_iter=-1, random_state=None):
super(OneClassSVM, self).__init__(
'one_class', kernel, degree, gamma, coef0, tol, 0., nu, 0.,
shrinking, False, cache_size, None, verbose, max_iter,
random_state)
def fit(self, X, y=None, sample_weight=None, **params):
"""
Detects the soft boundary of the set of samples X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Set of samples, where n_samples is the number of samples and
n_features is the number of features.
sample_weight : array-like, shape (n_samples,)
Per-sample weights. Rescale C per sample. Higher weights
force the classifier to put more emphasis on these points.
Returns
-------
self : object
Returns self.
Notes
-----
If X is not a C-ordered contiguous array it is copied.
"""
super(OneClassSVM, self).fit(X, np.ones(_num_samples(X)), sample_weight=sample_weight,
**params)
return self
def decision_function(self, X):
"""Distance of the samples X to the separating hyperplane.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
X : array-like, shape (n_samples,)
Returns the decision function of the samples.
"""
dec = self._decision_function(X)
return dec
| gpl-2.0 |
kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/site-packages/matplotlib/testing/jpl_units/__init__.py | 21 | 3240 | #=======================================================================
"""
This is a sample set of units for use with testing unit conversion
of matplotlib routines. These are used because they use very strict
enforcement of unitized data which will test the entire spectrum of how
unitized data might be used (it is not always meaningful to convert to
a float without specific units given).
UnitDbl is essentially a unitized floating point number. It has a
minimal set of supported units (enough for testing purposes). All
of the mathematical operation are provided to fully test any behaviour
that might occur with unitized data. Remeber that unitized data has
rules as to how it can be applied to one another (a value of distance
cannot be added to a value of time). Thus we need to guard against any
accidental "default" conversion that will strip away the meaning of the
data and render it neutered.
Epoch is different than a UnitDbl of time. Time is something that can be
measured where an Epoch is a specific moment in time. Epochs are typically
referenced as an offset from some predetermined epoch.
A difference of two epochs is a Duration. The distinction between a
Duration and a UnitDbl of time is made because an Epoch can have different
frames (or units). In the case of our test Epoch class the two allowed
frames are 'UTC' and 'ET' (Note that these are rough estimates provided for
testing purposes and should not be used in production code where accuracy
of time frames is desired). As such a Duration also has a frame of
reference and therefore needs to be called out as different that a simple
measurement of time since a delta-t in one frame may not be the same in another.
"""
#=======================================================================
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from .Duration import Duration
from .Epoch import Epoch
from .UnitDbl import UnitDbl
from .Duration import Duration
from .Epoch import Epoch
from .UnitDbl import UnitDbl
from .StrConverter import StrConverter
from .EpochConverter import EpochConverter
from .UnitDblConverter import UnitDblConverter
from .UnitDblFormatter import UnitDblFormatter
#=======================================================================
__version__ = "1.0"
__all__ = [
'register',
'Duration',
'Epoch',
'UnitDbl',
'UnitDblFormatter',
]
#=======================================================================
def register():
"""Register the unit conversion classes with matplotlib."""
import matplotlib.units as mplU
mplU.registry[ str ] = StrConverter()
mplU.registry[ Epoch ] = EpochConverter()
mplU.registry[ UnitDbl ] = UnitDblConverter()
#=======================================================================
# Some default unit instances
# Distances
m = UnitDbl( 1.0, "m" )
km = UnitDbl( 1.0, "km" )
mile = UnitDbl( 1.0, "mile" )
# Angles
deg = UnitDbl( 1.0, "deg" )
rad = UnitDbl( 1.0, "rad" )
# Time
sec = UnitDbl( 1.0, "sec" )
min = UnitDbl( 1.0, "min" )
hr = UnitDbl( 1.0, "hour" )
day = UnitDbl( 24.0, "hour" )
sec = UnitDbl( 1.0, "sec" )
| gpl-3.0 |
cjayb/mne-python | mne/viz/_brain/tests/test_brain.py | 1 | 23280 | # -*- coding: utf-8 -*-
#
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Eric Larson <larson.eric.d@gmail.com>
# Joan Massich <mailsik@gmail.com>
# Guillaume Favelier <guillaume.favelier@gmail.com>
# Oleh Kozynets <ok7mailbox@gmail.com>
#
# License: Simplified BSD
import os.path as path
import pytest
import numpy as np
from numpy.testing import assert_allclose
from mne import (read_source_estimate, SourceEstimate, MixedSourceEstimate,
VolSourceEstimate)
from mne.source_space import (read_source_spaces, vertex_to_mni,
setup_volume_source_space)
from mne.datasets import testing
from mne.utils import check_version
from mne.viz._brain import _Brain, _TimeViewer, _LinkViewer, _BrainScraper
from mne.viz._brain.colormap import calculate_lut
from matplotlib import cm, image
import matplotlib.pyplot as plt
data_path = testing.data_path(download=False)
subject_id = 'sample'
subjects_dir = path.join(data_path, 'subjects')
fname_stc = path.join(data_path, 'MEG/sample/sample_audvis_trunc-meg')
fname_label = path.join(data_path, 'MEG/sample/labels/Vis-lh.label')
src_fname = path.join(data_path, 'subjects', 'sample', 'bem',
'sample-oct-6-src.fif')
class _Collection(object):
def __init__(self, actors):
self._actors = actors
def GetNumberOfItems(self):
return len(self._actors)
def GetItemAsObject(self, ii):
return self._actors[ii]
class TstVTKPicker(object):
"""Class to test cell picking."""
def __init__(self, mesh, cell_id, hemi, brain):
self.mesh = mesh
self.cell_id = cell_id
self.point_id = None
self.hemi = hemi
self.brain = brain
self._actors = ()
def GetCellId(self):
"""Return the picked cell."""
return self.cell_id
def GetDataSet(self):
"""Return the picked mesh."""
return self.mesh
def GetPickPosition(self):
"""Return the picked position."""
if self.hemi == 'vol':
self.point_id = self.cell_id
return self.brain._data['vol']['grid_coords'][self.cell_id]
else:
vtk_cell = self.mesh.GetCell(self.cell_id)
cell = [vtk_cell.GetPointId(point_id) for point_id
in range(vtk_cell.GetNumberOfPoints())]
self.point_id = cell[0]
return self.mesh.points[self.point_id]
def GetProp3Ds(self):
"""Return all picked Prop3Ds."""
return _Collection(self._actors)
def GetRenderer(self):
"""Return the "renderer"."""
return self # set this to also be the renderer and active camera
GetActiveCamera = GetRenderer
def GetPosition(self):
"""Return the position."""
return np.array(self.GetPickPosition()) - (0, 0, 100)
@testing.requires_testing_data
def test_brain_init(renderer, tmpdir, pixel_ratio):
"""Test initialization of the _Brain instance."""
from mne.label import read_label
hemi = 'lh'
surf = 'inflated'
cortex = 'low_contrast'
title = 'test'
size = (300, 300)
kwargs = dict(subject_id=subject_id, subjects_dir=subjects_dir)
with pytest.raises(ValueError, match='"size" parameter must be'):
_Brain(hemi=hemi, surf=surf, size=[1, 2, 3], **kwargs)
with pytest.raises(TypeError, match='figure'):
_Brain(hemi=hemi, surf=surf, figure='foo', **kwargs)
with pytest.raises(TypeError, match='interaction'):
_Brain(hemi=hemi, surf=surf, interaction=0, **kwargs)
with pytest.raises(ValueError, match='interaction'):
_Brain(hemi=hemi, surf=surf, interaction='foo', **kwargs)
with pytest.raises(KeyError):
_Brain(hemi='foo', surf=surf, **kwargs)
brain = _Brain(hemi=hemi, surf=surf, size=size, title=title,
cortex=cortex, units='m', **kwargs)
assert brain.interaction == 'trackball'
# add_data
stc = read_source_estimate(fname_stc)
fmin = stc.data.min()
fmax = stc.data.max()
for h in brain._hemis:
if h == 'lh':
hi = 0
else:
hi = 1
hemi_data = stc.data[:len(stc.vertices[hi]), 10]
hemi_vertices = stc.vertices[hi]
with pytest.raises(TypeError, match='scale_factor'):
brain.add_data(hemi_data, hemi=h, scale_factor='foo')
with pytest.raises(TypeError, match='vector_alpha'):
brain.add_data(hemi_data, hemi=h, vector_alpha='foo')
with pytest.raises(ValueError, match='thresh'):
brain.add_data(hemi_data, hemi=h, thresh=-1)
with pytest.raises(ValueError, match='remove_existing'):
brain.add_data(hemi_data, hemi=h, remove_existing=-1)
with pytest.raises(ValueError, match='time_label_size'):
brain.add_data(hemi_data, hemi=h, time_label_size=-1,
vertices=hemi_vertices)
with pytest.raises(ValueError, match='is positive'):
brain.add_data(hemi_data, hemi=h, smoothing_steps=-1,
vertices=hemi_vertices)
with pytest.raises(TypeError, match='int or NoneType'):
brain.add_data(hemi_data, hemi=h, smoothing_steps='foo')
with pytest.raises(ValueError, match='dimension mismatch'):
brain.add_data(array=np.array([0, 1, 2]), hemi=h,
vertices=hemi_vertices)
with pytest.raises(ValueError, match='vertices parameter must not be'):
brain.add_data(hemi_data, fmin=fmin, hemi=hemi,
fmax=fmax, vertices=None)
with pytest.raises(ValueError, match='has shape'):
brain.add_data(hemi_data[:, np.newaxis], fmin=fmin, hemi=hemi,
fmax=fmax, vertices=None, time=[0, 1])
brain.add_data(hemi_data, fmin=fmin, hemi=h, fmax=fmax,
colormap='hot', vertices=hemi_vertices,
smoothing_steps='nearest', colorbar=(0, 0), time=None)
assert brain.data['lh']['array'] is hemi_data
assert brain.views == ['lateral']
assert brain.hemis == ('lh',)
brain.add_data(hemi_data[:, np.newaxis], fmin=fmin, hemi=h, fmax=fmax,
colormap='hot', vertices=hemi_vertices,
smoothing_steps=1, initial_time=0., colorbar=False,
time=[0])
brain.set_time_point(0) # should hit _safe_interp1d
with pytest.raises(ValueError, match='consistent with'):
brain.add_data(hemi_data[:, np.newaxis], fmin=fmin, hemi=h,
fmax=fmax, colormap='hot', vertices=hemi_vertices,
smoothing_steps='nearest', colorbar=False,
time=[1])
with pytest.raises(ValueError, match='different from'):
brain.add_data(hemi_data[:, np.newaxis][:, [0, 0]],
fmin=fmin, hemi=h, fmax=fmax, colormap='hot',
vertices=hemi_vertices)
with pytest.raises(ValueError, match='need shape'):
brain.add_data(hemi_data[:, np.newaxis], time=[0, 1],
fmin=fmin, hemi=h, fmax=fmax, colormap='hot',
vertices=hemi_vertices)
with pytest.raises(ValueError, match='If array has 3'):
brain.add_data(hemi_data[:, np.newaxis, np.newaxis],
fmin=fmin, hemi=h, fmax=fmax, colormap='hot',
vertices=hemi_vertices)
# add label
label = read_label(fname_label)
brain.add_label(label, scalar_thresh=0.)
brain.remove_labels()
brain.add_label(fname_label)
brain.add_label('V1', borders=True)
brain.remove_labels()
brain.remove_labels()
# add foci
brain.add_foci([0], coords_as_verts=True,
hemi=hemi, color='blue')
# add text
brain.add_text(x=0, y=0, text='foo')
# add annotation
annots = ['aparc', path.join(subjects_dir, 'fsaverage', 'label',
'lh.PALS_B12_Lobes.annot')]
borders = [True, 2]
alphas = [1, 0.5]
colors = [None, 'r']
brain = _Brain(subject_id='fsaverage', hemi=hemi, size=size,
surf='inflated', subjects_dir=subjects_dir)
for a, b, p, color in zip(annots, borders, alphas, colors):
brain.add_annotation(a, b, p, color=color)
brain.show_view(dict(focalpoint=(1e-5, 1e-5, 1e-5)), roll=1, distance=500)
# image and screenshot
fname = path.join(str(tmpdir), 'test.png')
assert not path.isfile(fname)
brain.save_image(fname)
assert path.isfile(fname)
brain.show_view(view=dict(azimuth=180., elevation=90.))
img = brain.screenshot(mode='rgb')
if renderer._get_3d_backend() == 'mayavi':
pixel_ratio = 1. # no HiDPI when using the testing backend
want_size = np.array([size[0] * pixel_ratio, size[1] * pixel_ratio, 3])
assert_allclose(img.shape, want_size)
brain.close()
@testing.requires_testing_data
@pytest.mark.slowtest
def test_brain_save_movie(tmpdir, renderer):
"""Test saving a movie of a _Brain instance."""
if renderer._get_3d_backend() == "mayavi":
pytest.skip('Save movie only supported on PyVista')
brain_data = _create_testing_brain(hemi='lh', time_viewer=False)
filename = str(path.join(tmpdir, "brain_test.mov"))
brain_data.save_movie(filename, time_dilation=1,
interpolation='nearest')
assert path.isfile(filename)
brain_data.close()
@testing.requires_testing_data
@pytest.mark.slowtest
def test_brain_timeviewer(renderer_interactive, pixel_ratio):
"""Test _TimeViewer primitives."""
if renderer_interactive._get_3d_backend() != 'pyvista':
pytest.skip('TimeViewer tests only supported on PyVista')
brain_data = _create_testing_brain(hemi='both', show_traces=False)
with pytest.raises(RuntimeError, match='already'):
_TimeViewer(brain_data)
time_viewer = brain_data.time_viewer
time_viewer.callbacks["time"](value=0)
time_viewer.callbacks["orientation_lh_0_0"](
value='lat',
update_widget=True
)
time_viewer.callbacks["orientation_lh_0_0"](
value='medial',
update_widget=True
)
time_viewer.callbacks["time"](
value=0.0,
time_as_index=False,
)
time_viewer.callbacks["smoothing"](value=1)
time_viewer.callbacks["fmin"](value=12.0)
time_viewer.callbacks["fmax"](value=4.0)
time_viewer.callbacks["fmid"](value=6.0)
time_viewer.callbacks["fmid"](value=4.0)
time_viewer.callbacks["fscale"](value=1.1)
time_viewer.callbacks["fmin"](value=12.0)
time_viewer.callbacks["fmid"](value=4.0)
time_viewer.toggle_interface()
time_viewer.callbacks["playback_speed"](value=0.1)
time_viewer.toggle_playback()
time_viewer.apply_auto_scaling()
time_viewer.restore_user_scaling()
time_viewer.reset()
plt.close('all')
time_viewer.help()
assert len(plt.get_fignums()) == 1
plt.close('all')
# screenshot
brain_data.show_view(view=dict(azimuth=180., elevation=90.))
img = brain_data.screenshot(mode='rgb')
want_shape = np.array([300 * pixel_ratio, 300 * pixel_ratio, 3])
assert_allclose(img.shape, want_shape)
@testing.requires_testing_data
@pytest.mark.parametrize('hemi', [
'lh',
pytest.param('rh', marks=pytest.mark.slowtest),
pytest.param('split', marks=pytest.mark.slowtest),
pytest.param('both', marks=pytest.mark.slowtest),
])
@pytest.mark.parametrize('src', [
'surface',
pytest.param('volume', marks=pytest.mark.slowtest),
pytest.param('mixed', marks=pytest.mark.slowtest),
])
@pytest.mark.slowtest
def test_brain_timeviewer_traces(renderer_interactive, hemi, src, tmpdir):
"""Test _TimeViewer traces."""
if renderer_interactive._get_3d_backend() != 'pyvista':
pytest.skip('Only PyVista supports traces')
brain_data = _create_testing_brain(
hemi=hemi, surf='white', src=src, show_traces=0.5, initial_time=0,
volume_options=None, # for speed, don't upsample
)
with pytest.raises(RuntimeError, match='already'):
_TimeViewer(brain_data)
time_viewer = brain_data.time_viewer
assert time_viewer.show_traces
assert hasattr(time_viewer, "picked_points")
assert hasattr(time_viewer, "_spheres")
# test points picked by default
picked_points = brain_data.get_picked_points()
spheres = time_viewer._spheres
hemi_str = list()
if src in ('surface', 'mixed'):
hemi_str.extend([hemi] if hemi in ('lh', 'rh') else ['lh', 'rh'])
if src in ('mixed', 'volume'):
hemi_str.extend(['vol'])
for current_hemi in hemi_str:
assert len(picked_points[current_hemi]) == 1
n_spheres = len(hemi_str)
if hemi == 'split' and src in ('mixed', 'volume'):
n_spheres += 1
assert len(spheres) == n_spheres
# test removing points
time_viewer.clear_points()
assert len(spheres) == 0
for key in ('lh', 'rh', 'vol'):
assert len(picked_points[key]) == 0
# test picking a cell at random
rng = np.random.RandomState(0)
for idx, current_hemi in enumerate(hemi_str):
assert len(spheres) == 0
if current_hemi == 'vol':
current_mesh = brain_data._data['vol']['grid']
vertices = brain_data._data['vol']['vertices']
values = current_mesh.cell_arrays['values'][vertices]
cell_id = vertices[np.argmax(np.abs(values))]
else:
current_mesh = brain_data._hemi_meshes[current_hemi]
cell_id = rng.randint(0, current_mesh.n_cells)
test_picker = TstVTKPicker(None, None, current_hemi, brain_data)
assert time_viewer.on_pick(test_picker, None) is None
test_picker = TstVTKPicker(
current_mesh, cell_id, current_hemi, brain_data)
assert cell_id == test_picker.cell_id
assert test_picker.point_id is None
time_viewer.on_pick(test_picker, None)
assert test_picker.point_id is not None
assert len(picked_points[current_hemi]) == 1
assert picked_points[current_hemi][0] == test_picker.point_id
assert len(spheres) > 0
sphere = spheres[-1]
vertex_id = sphere._vertex_id
assert vertex_id == test_picker.point_id
line = sphere._line
hemi_prefix = current_hemi[0].upper()
if current_hemi == 'vol':
assert hemi_prefix + ':' in line.get_label()
assert 'MNI' in line.get_label()
continue # the MNI conversion is more complex
hemi_int = 0 if current_hemi == 'lh' else 1
mni = vertex_to_mni(
vertices=vertex_id,
hemis=hemi_int,
subject=brain_data._subject_id,
subjects_dir=brain_data._subjects_dir
)
label = "{}:{} MNI: {}".format(
hemi_prefix, str(vertex_id).ljust(6),
', '.join('%5.1f' % m for m in mni))
assert line.get_label() == label
# remove the sphere by clicking in its vicinity
old_len = len(spheres)
test_picker._actors = sum((s._actors for s in spheres), [])
time_viewer.on_pick(test_picker, None)
assert len(spheres) < old_len
# and the scraper for it (will close the instance)
if not check_version('sphinx_gallery'):
return
screenshot = brain_data.screenshot()
fnames = [str(tmpdir.join('temp.png'))]
block_vars = dict(image_path_iterator=iter(fnames),
example_globals=dict(brain=brain_data))
gallery_conf = dict(src_dir=str(tmpdir))
scraper = _BrainScraper()
rst = scraper(None, block_vars, gallery_conf)
assert 'temp.png' in rst
assert path.isfile(fnames[0])
img = image.imread(fnames[0])
assert img.shape[1] == screenshot.shape[1] # same width
assert img.shape[0] > screenshot.shape[0] # larger height
@testing.requires_testing_data
@pytest.mark.slowtest
def test_brain_linkviewer(renderer_interactive):
"""Test _LinkViewer primitives."""
if renderer_interactive._get_3d_backend() != 'pyvista':
pytest.skip('Linkviewer only supported on PyVista')
brain1 = _create_testing_brain(hemi='lh', show_traces=False)
brain2 = _create_testing_brain(hemi='lh', show_traces=True)
brain1._times = brain1._times * 2
with pytest.warns(RuntimeWarning, match='linking time'):
link_viewer = _LinkViewer(
[brain1, brain2],
time=True,
camera=False,
colorbar=False,
picking=False,
)
brain_data = _create_testing_brain(hemi='split', show_traces=True)
link_viewer = _LinkViewer(
[brain2, brain_data],
time=True,
camera=True,
colorbar=True,
picking=True,
)
link_viewer.set_time_point(value=0)
link_viewer.time_viewers[0].mpl_canvas.time_func(0)
link_viewer.set_fmin(0)
link_viewer.set_fmid(0.5)
link_viewer.set_fmax(1)
link_viewer.set_playback_speed(value=0.1)
link_viewer.toggle_playback()
def test_brain_colormap():
"""Test brain's colormap functions."""
colormap = "coolwarm"
alpha = 1.0
fmin = 0.0
fmid = 0.5
fmax = 1.0
center = None
calculate_lut(colormap, alpha=alpha, fmin=fmin,
fmid=fmid, fmax=fmax, center=center)
center = 0.0
colormap = cm.get_cmap(colormap)
calculate_lut(colormap, alpha=alpha, fmin=fmin,
fmid=fmid, fmax=fmax, center=center)
cmap = cm.get_cmap(colormap)
zero_alpha = np.array([1., 1., 1., 0])
half_alpha = np.array([1., 1., 1., 0.5])
atol = 1.5 / 256.
# fmin < fmid < fmax
lut = calculate_lut(colormap, alpha, 1, 2, 3)
assert lut.shape == (256, 4)
assert_allclose(lut[0], cmap(0) * zero_alpha, atol=atol)
assert_allclose(lut[127], cmap(0.5), atol=atol)
assert_allclose(lut[-1], cmap(1.), atol=atol)
# divergent
lut = calculate_lut(colormap, alpha, 0, 1, 2, 0)
assert lut.shape == (256, 4)
assert_allclose(lut[0], cmap(0), atol=atol)
assert_allclose(lut[63], cmap(0.25), atol=atol)
assert_allclose(lut[127], cmap(0.5) * zero_alpha, atol=atol)
assert_allclose(lut[192], cmap(0.75), atol=atol)
assert_allclose(lut[-1], cmap(1.), atol=atol)
# fmin == fmid == fmax
lut = calculate_lut(colormap, alpha, 1, 1, 1)
zero_alpha = np.array([1., 1., 1., 0])
assert lut.shape == (256, 4)
assert_allclose(lut[0], cmap(0) * zero_alpha, atol=atol)
assert_allclose(lut[1], cmap(0.5), atol=atol)
assert_allclose(lut[-1], cmap(1.), atol=atol)
# divergent
lut = calculate_lut(colormap, alpha, 0, 0, 0, 0)
assert lut.shape == (256, 4)
assert_allclose(lut[0], cmap(0), atol=atol)
assert_allclose(lut[127], cmap(0.5) * zero_alpha, atol=atol)
assert_allclose(lut[-1], cmap(1.), atol=atol)
# fmin == fmid < fmax
lut = calculate_lut(colormap, alpha, 1, 1, 2)
assert lut.shape == (256, 4)
assert_allclose(lut[0], cmap(0.) * zero_alpha, atol=atol)
assert_allclose(lut[1], cmap(0.5), atol=atol)
assert_allclose(lut[-1], cmap(1.), atol=atol)
# divergent
lut = calculate_lut(colormap, alpha, 1, 1, 2, 0)
assert lut.shape == (256, 4)
assert_allclose(lut[0], cmap(0), atol=atol)
assert_allclose(lut[62], cmap(0.245), atol=atol)
assert_allclose(lut[64], cmap(0.5) * zero_alpha, atol=atol)
assert_allclose(lut[127], cmap(0.5) * zero_alpha, atol=atol)
assert_allclose(lut[191], cmap(0.5) * zero_alpha, atol=atol)
assert_allclose(lut[193], cmap(0.755), atol=atol)
assert_allclose(lut[-1], cmap(1.), atol=atol)
lut = calculate_lut(colormap, alpha, 0, 0, 1, 0)
assert lut.shape == (256, 4)
assert_allclose(lut[0], cmap(0), atol=atol)
assert_allclose(lut[126], cmap(0.25), atol=atol)
assert_allclose(lut[127], cmap(0.5) * zero_alpha, atol=atol)
assert_allclose(lut[129], cmap(0.75), atol=atol)
assert_allclose(lut[-1], cmap(1.), atol=atol)
# fmin < fmid == fmax
lut = calculate_lut(colormap, alpha, 1, 2, 2)
assert lut.shape == (256, 4)
assert_allclose(lut[0], cmap(0) * zero_alpha, atol=atol)
assert_allclose(lut[-2], cmap(0.5), atol=atol)
assert_allclose(lut[-1], cmap(1.), atol=atol)
# divergent
lut = calculate_lut(colormap, alpha, 1, 2, 2, 0)
assert lut.shape == (256, 4)
assert_allclose(lut[0], cmap(0), atol=atol)
assert_allclose(lut[1], cmap(0.25), atol=2 * atol)
assert_allclose(lut[32], cmap(0.375) * half_alpha, atol=atol)
assert_allclose(lut[64], cmap(0.5) * zero_alpha, atol=atol)
assert_allclose(lut[127], cmap(0.5) * zero_alpha, atol=atol)
assert_allclose(lut[191], cmap(0.5) * zero_alpha, atol=atol)
assert_allclose(lut[223], cmap(0.625) * half_alpha, atol=atol)
assert_allclose(lut[-2], cmap(0.7475), atol=2 * atol)
assert_allclose(lut[-1], cmap(1.), atol=2 * atol)
lut = calculate_lut(colormap, alpha, 0, 1, 1, 0)
assert lut.shape == (256, 4)
assert_allclose(lut[0], cmap(0), atol=atol)
assert_allclose(lut[1], cmap(0.25), atol=2 * atol)
assert_allclose(lut[64], cmap(0.375) * half_alpha, atol=atol)
assert_allclose(lut[127], cmap(0.5) * zero_alpha, atol=atol)
assert_allclose(lut[191], cmap(0.625) * half_alpha, atol=atol)
assert_allclose(lut[-2], cmap(0.75), atol=2 * atol)
assert_allclose(lut[-1], cmap(1.), atol=atol)
with pytest.raises(ValueError, match=r'.*fmin \(1\) <= fmid \(0\) <= fma'):
calculate_lut(colormap, alpha, 1, 0, 2)
def _create_testing_brain(hemi, surf='inflated', src='surface', size=300,
**kwargs):
assert src in ('surface', 'mixed', 'volume')
meth = 'plot'
if src in ('surface', 'mixed'):
sample_src = read_source_spaces(src_fname)
klass = MixedSourceEstimate if src == 'mixed' else SourceEstimate
if src in ('volume', 'mixed'):
vol_src = setup_volume_source_space(
subject_id, 7., mri='aseg.mgz',
volume_label='Left-Cerebellum-Cortex',
subjects_dir=subjects_dir, add_interpolator=False)
assert len(vol_src) == 1
assert vol_src[0]['nuse'] == 150
if src == 'mixed':
sample_src = sample_src + vol_src
else:
sample_src = vol_src
klass = VolSourceEstimate
meth = 'plot_3d'
assert sample_src.kind == src
# dense version
rng = np.random.RandomState(0)
vertices = [s['vertno'] for s in sample_src]
n_time = 5
n_verts = sum(len(v) for v in vertices)
stc_data = np.zeros((n_verts * n_time))
stc_size = stc_data.size
stc_data[(rng.rand(stc_size // 20) * stc_size).astype(int)] = \
rng.rand(stc_data.size // 20)
stc_data.shape = (n_verts, n_time)
stc = klass(stc_data, vertices, 1, 1)
fmin = stc.data.min()
fmax = stc.data.max()
fmid = (fmin + fmax) / 2.
brain_data = getattr(stc, meth)(
subject=subject_id, hemi=hemi, surface=surf, size=size,
subjects_dir=subjects_dir, colormap='hot',
clim=dict(kind='value', lims=(fmin, fmid, fmax)), src=sample_src,
**kwargs)
return brain_data
| bsd-3-clause |
wazeerzulfikar/scikit-learn | benchmarks/bench_saga.py | 45 | 8474 | """Author: Arthur Mensch
Benchmarks of sklearn SAGA vs lightning SAGA vs Liblinear. Shows the gain
in using multinomial logistic regression in term of learning time.
"""
import json
import time
from os.path import expanduser
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import fetch_rcv1, load_iris, load_digits, \
fetch_20newsgroups_vectorized
from sklearn.externals.joblib import delayed, Parallel, Memory
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import log_loss
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelBinarizer, LabelEncoder
from sklearn.utils.extmath import safe_sparse_dot, softmax
def fit_single(solver, X, y, penalty='l2', single_target=True, C=1,
max_iter=10, skip_slow=False):
if skip_slow and solver == 'lightning' and penalty == 'l1':
print('skip_slowping l1 logistic regression with solver lightning.')
return
print('Solving %s logistic regression with penalty %s, solver %s.'
% ('binary' if single_target else 'multinomial',
penalty, solver))
if solver == 'lightning':
from lightning.classification import SAGAClassifier
if single_target or solver not in ['sag', 'saga']:
multi_class = 'ovr'
else:
multi_class = 'multinomial'
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42,
stratify=y)
n_samples = X_train.shape[0]
n_classes = np.unique(y_train).shape[0]
test_scores = [1]
train_scores = [1]
accuracies = [1 / n_classes]
times = [0]
if penalty == 'l2':
alpha = 1. / (C * n_samples)
beta = 0
lightning_penalty = None
else:
alpha = 0.
beta = 1. / (C * n_samples)
lightning_penalty = 'l1'
for this_max_iter in range(1, max_iter + 1, 2):
print('[%s, %s, %s] Max iter: %s' %
('binary' if single_target else 'multinomial',
penalty, solver, this_max_iter))
if solver == 'lightning':
lr = SAGAClassifier(loss='log', alpha=alpha, beta=beta,
penalty=lightning_penalty,
tol=-1, max_iter=this_max_iter)
else:
lr = LogisticRegression(solver=solver,
multi_class=multi_class,
C=C,
penalty=penalty,
fit_intercept=False, tol=1e-24,
max_iter=this_max_iter,
random_state=42,
)
t0 = time.clock()
lr.fit(X_train, y_train)
train_time = time.clock() - t0
scores = []
for (X, y) in [(X_train, y_train), (X_test, y_test)]:
try:
y_pred = lr.predict_proba(X)
except NotImplementedError:
# Lightning predict_proba is not implemented for n_classes > 2
y_pred = _predict_proba(lr, X)
score = log_loss(y, y_pred, normalize=False) / n_samples
score += (0.5 * alpha * np.sum(lr.coef_ ** 2) +
beta * np.sum(np.abs(lr.coef_)))
scores.append(score)
train_score, test_score = tuple(scores)
y_pred = lr.predict(X_test)
accuracy = np.sum(y_pred == y_test) / y_test.shape[0]
test_scores.append(test_score)
train_scores.append(train_score)
accuracies.append(accuracy)
times.append(train_time)
return lr, times, train_scores, test_scores, accuracies
def _predict_proba(lr, X):
pred = safe_sparse_dot(X, lr.coef_.T)
if hasattr(lr, "intercept_"):
pred += lr.intercept_
return softmax(pred)
def exp(solvers, penalties, single_target, n_samples=30000, max_iter=20,
dataset='rcv1', n_jobs=1, skip_slow=False):
mem = Memory(cachedir=expanduser('~/cache'), verbose=0)
if dataset == 'rcv1':
rcv1 = fetch_rcv1()
lbin = LabelBinarizer()
lbin.fit(rcv1.target_names)
X = rcv1.data
y = rcv1.target
y = lbin.inverse_transform(y)
le = LabelEncoder()
y = le.fit_transform(y)
if single_target:
y_n = y.copy()
y_n[y > 16] = 1
y_n[y <= 16] = 0
y = y_n
elif dataset == 'digits':
digits = load_digits()
X, y = digits.data, digits.target
if single_target:
y_n = y.copy()
y_n[y < 5] = 1
y_n[y >= 5] = 0
y = y_n
elif dataset == 'iris':
iris = load_iris()
X, y = iris.data, iris.target
elif dataset == '20newspaper':
ng = fetch_20newsgroups_vectorized()
X = ng.data
y = ng.target
if single_target:
y_n = y.copy()
y_n[y > 4] = 1
y_n[y <= 16] = 0
y = y_n
X = X[:n_samples]
y = y[:n_samples]
cached_fit = mem.cache(fit_single)
out = Parallel(n_jobs=n_jobs, mmap_mode=None)(
delayed(cached_fit)(solver, X, y,
penalty=penalty, single_target=single_target,
C=1, max_iter=max_iter, skip_slow=skip_slow)
for solver in solvers
for penalty in penalties)
res = []
idx = 0
for solver in solvers:
for penalty in penalties:
if not (skip_slow and solver == 'lightning' and penalty == 'l1'):
lr, times, train_scores, test_scores, accuracies = out[idx]
this_res = dict(solver=solver, penalty=penalty,
single_target=single_target,
times=times, train_scores=train_scores,
test_scores=test_scores,
accuracies=accuracies)
res.append(this_res)
idx += 1
with open('bench_saga.json', 'w+') as f:
json.dump(res, f)
def plot():
import pandas as pd
with open('bench_saga.json', 'r') as f:
f = json.load(f)
res = pd.DataFrame(f)
res.set_index(['single_target', 'penalty'], inplace=True)
grouped = res.groupby(level=['single_target', 'penalty'])
colors = {'saga': 'blue', 'liblinear': 'orange', 'lightning': 'green'}
for idx, group in grouped:
single_target, penalty = idx
fig = plt.figure(figsize=(12, 4))
ax = fig.add_subplot(131)
train_scores = group['train_scores'].values
ref = np.min(np.concatenate(train_scores)) * 0.999
for scores, times, solver in zip(group['train_scores'], group['times'],
group['solver']):
scores = scores / ref - 1
ax.plot(times, scores, label=solver, color=colors[solver])
ax.set_xlabel('Time (s)')
ax.set_ylabel('Training objective (relative to min)')
ax.set_yscale('log')
ax = fig.add_subplot(132)
test_scores = group['test_scores'].values
ref = np.min(np.concatenate(test_scores)) * 0.999
for scores, times, solver in zip(group['test_scores'], group['times'],
group['solver']):
scores = scores / ref - 1
ax.plot(times, scores, label=solver, color=colors[solver])
ax.set_xlabel('Time (s)')
ax.set_ylabel('Test objective (relative to min)')
ax.set_yscale('log')
ax = fig.add_subplot(133)
for accuracy, times, solver in zip(group['accuracies'], group['times'],
group['solver']):
ax.plot(times, accuracy, label=solver, color=colors[solver])
ax.set_xlabel('Time (s)')
ax.set_ylabel('Test accuracy')
ax.legend()
name = 'single_target' if single_target else 'multi_target'
name += '_%s' % penalty
plt.suptitle(name)
name += '.png'
fig.tight_layout()
fig.subplots_adjust(top=0.9)
plt.savefig(name)
plt.close(fig)
if __name__ == '__main__':
solvers = ['saga', 'liblinear', 'lightning']
penalties = ['l1', 'l2']
single_target = True
exp(solvers, penalties, single_target, n_samples=None, n_jobs=1,
dataset='20newspaper', max_iter=20)
plot()
| bsd-3-clause |
nielmishra/eSIM | python code backup/__main__.py | 1 | 1828 | import os
import sys
import numpy as np
import matplotlib
# Force matplotlib to not use any Xwindows backend.
matplotlib.use('Agg')
import matplotlib.pyplot as plt
def get_plot_files(file):
open_file = open(os.path.realpath(file),'r')
raw_data = open_file.read()
array = raw_data.split('* ')
array.pop(0)
dict_data = {}
parameters = []
parsed_files = []
for i in range(len(array)):
dict_data[i] = array[i]
sample = dict_data[i].split('\n')
write_file = open('parsed_%s.txt'%(i),'w')
for j in sample[3:]:
if j.startswith('Index'):
items = j.split()
parameters.append(items) if len(parameters) < i+1 else None
continue
if j.startswith('--'):
continue
else:
write_file.write(j+'\n')
parsed_files.append('parsed_%s.txt'%(i))
return parameters, parsed_files
def main():
file = sys.argv[1]
parameters, parsed_files = get_plot_files(file)
for plot_file, parameter in zip(parsed_files, parameters):
data = np.loadtxt(plot_file, unpack=True, dtype=str)
for i in range(2, len(data)):
plt.plot([x.strip(',') for x in data[1]], [y.strip(',') for y in data[i]])
try:
plt.xlabel(parameter[1]), plt.ylabel(parameter[i])
plt.title('%s vs %s'%(parameter[1], parameter[i]))
plt.savefig(plot_file+str(i)+'.png'), plt.clf()
except IndexError:
plt.xlabel(parameter[1]), plt.ylabel('missing y label')
plt.title('%s vs missing y label'%(parameter[1], ))
plt.savefig(plot_file+str(i)+'.png'), plt.clf()
if __name__ == '__main__':
main()
| gpl-3.0 |
MLWave/auto-sklearn | source/conf.py | 5 | 8715 | # -*- coding: utf-8 -*-
#
# AutoSklearn documentation build configuration file, created by
# sphinx-quickstart on Thu May 21 13:40:42 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.doctest',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'numpydoc'
]
# Configure the extensions
numpydoc_show_class_members = False
autosummary_generate = True
autodoc_default_flags = ['members', 'inherited-members',
'show-inheritance']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'AutoSklearn'
copyright = u'2015, Matthias Feurer, Aaron Klein, Katharina Eggensperger'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.0.1dev'
# The full version, including alpha/beta/rc tags.
release = '0.0.1dev'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'AutoSklearndoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'AutoSklearn.tex', u'AutoSklearn Documentation',
u'Matthias Feurer, Aaron Klein, Katharina Eggensperger', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'autosklearn', u'AutoSklearn Documentation',
[u'Matthias Feurer, Aaron Klein, Katharina Eggensperger'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'AutoSklearn', u'AutoSklearn Documentation',
u'Matthias Feurer, Aaron Klein, Katharina Eggensperger', 'AutoSklearn', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| bsd-3-clause |
ptonner/GPy | GPy/examples/regression.py | 8 | 18746 | # Copyright (c) 2012-2014, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
"""
Gaussian Processes regression examples
"""
try:
from matplotlib import pyplot as pb
except:
pass
import numpy as np
import GPy
def olympic_marathon_men(optimize=True, plot=True):
"""Run a standard Gaussian process regression on the Olympic marathon data."""
try:import pods
except ImportError:
print('pods unavailable, see https://github.com/sods/ods for example datasets')
return
data = pods.datasets.olympic_marathon_men()
# create simple GP Model
m = GPy.models.GPRegression(data['X'], data['Y'])
# set the lengthscale to be something sensible (defaults to 1)
m.kern.lengthscale = 10.
if optimize:
m.optimize('bfgs', max_iters=200)
if plot:
m.plot(plot_limits=(1850, 2050))
return m
def coregionalization_toy(optimize=True, plot=True):
"""
A simple demonstration of coregionalization on two sinusoidal functions.
"""
#build a design matrix with a column of integers indicating the output
X1 = np.random.rand(50, 1) * 8
X2 = np.random.rand(30, 1) * 5
#build a suitable set of observed variables
Y1 = np.sin(X1) + np.random.randn(*X1.shape) * 0.05
Y2 = np.sin(X2) + np.random.randn(*X2.shape) * 0.05 + 2.
m = GPy.models.GPCoregionalizedRegression(X_list=[X1,X2], Y_list=[Y1,Y2])
if optimize:
m.optimize('bfgs', max_iters=100)
if plot:
slices = GPy.util.multioutput.get_slices([X1,X2])
m.plot(fixed_inputs=[(1,0)],which_data_rows=slices[0],Y_metadata={'output_index':0})
m.plot(fixed_inputs=[(1,1)],which_data_rows=slices[1],Y_metadata={'output_index':1},ax=pb.gca())
return m
def coregionalization_sparse(optimize=True, plot=True):
"""
A simple demonstration of coregionalization on two sinusoidal functions using sparse approximations.
"""
#build a design matrix with a column of integers indicating the output
X1 = np.random.rand(50, 1) * 8
X2 = np.random.rand(30, 1) * 5
#build a suitable set of observed variables
Y1 = np.sin(X1) + np.random.randn(*X1.shape) * 0.05
Y2 = np.sin(X2) + np.random.randn(*X2.shape) * 0.05 + 2.
m = GPy.models.SparseGPCoregionalizedRegression(X_list=[X1,X2], Y_list=[Y1,Y2])
if optimize:
m.optimize('bfgs', max_iters=100)
if plot:
slices = GPy.util.multioutput.get_slices([X1,X2])
m.plot(fixed_inputs=[(1,0)],which_data_rows=slices[0],Y_metadata={'output_index':0})
m.plot(fixed_inputs=[(1,1)],which_data_rows=slices[1],Y_metadata={'output_index':1},ax=pb.gca())
pb.ylim(-3,)
return m
def epomeo_gpx(max_iters=200, optimize=True, plot=True):
"""
Perform Gaussian process regression on the latitude and longitude data
from the Mount Epomeo runs. Requires gpxpy to be installed on your system
to load in the data.
"""
try:import pods
except ImportError:
print('pods unavailable, see https://github.com/sods/ods for example datasets')
return
data = pods.datasets.epomeo_gpx()
num_data_list = []
for Xpart in data['X']:
num_data_list.append(Xpart.shape[0])
num_data_array = np.array(num_data_list)
num_data = num_data_array.sum()
Y = np.zeros((num_data, 2))
t = np.zeros((num_data, 2))
start = 0
for Xpart, index in zip(data['X'], range(len(data['X']))):
end = start+Xpart.shape[0]
t[start:end, :] = np.hstack((Xpart[:, 0:1],
index*np.ones((Xpart.shape[0], 1))))
Y[start:end, :] = Xpart[:, 1:3]
num_inducing = 200
Z = np.hstack((np.linspace(t[:,0].min(), t[:, 0].max(), num_inducing)[:, None],
np.random.randint(0, 4, num_inducing)[:, None]))
k1 = GPy.kern.RBF(1)
k2 = GPy.kern.Coregionalize(output_dim=5, rank=5)
k = k1**k2
m = GPy.models.SparseGPRegression(t, Y, kernel=k, Z=Z, normalize_Y=True)
m.constrain_fixed('.*variance', 1.)
m.inducing_inputs.constrain_fixed()
m.Gaussian_noise.variance.constrain_bounded(1e-3, 1e-1)
m.optimize(max_iters=max_iters,messages=True)
return m
def multiple_optima(gene_number=937, resolution=80, model_restarts=10, seed=10000, max_iters=300, optimize=True, plot=True):
"""
Show an example of a multimodal error surface for Gaussian process
regression. Gene 939 has bimodal behaviour where the noisy mode is
higher.
"""
# Contour over a range of length scales and signal/noise ratios.
length_scales = np.linspace(0.1, 60., resolution)
log_SNRs = np.linspace(-3., 4., resolution)
try:import pods
except ImportError:
print('pods unavailable, see https://github.com/sods/ods for example datasets')
return
data = pods.datasets.della_gatta_TRP63_gene_expression(data_set='della_gatta',gene_number=gene_number)
# data['Y'] = data['Y'][0::2, :]
# data['X'] = data['X'][0::2, :]
data['Y'] = data['Y'] - np.mean(data['Y'])
lls = GPy.examples.regression._contour_data(data, length_scales, log_SNRs, GPy.kern.RBF)
if plot:
pb.contour(length_scales, log_SNRs, np.exp(lls), 20, cmap=pb.cm.jet)
ax = pb.gca()
pb.xlabel('length scale')
pb.ylabel('log_10 SNR')
xlim = ax.get_xlim()
ylim = ax.get_ylim()
# Now run a few optimizations
models = []
optim_point_x = np.empty(2)
optim_point_y = np.empty(2)
np.random.seed(seed=seed)
for i in range(0, model_restarts):
# kern = GPy.kern.RBF(1, variance=np.random.exponential(1.), lengthscale=np.random.exponential(50.))
kern = GPy.kern.RBF(1, variance=np.random.uniform(1e-3, 1), lengthscale=np.random.uniform(5, 50))
m = GPy.models.GPRegression(data['X'], data['Y'], kernel=kern)
m.likelihood.variance = np.random.uniform(1e-3, 1)
optim_point_x[0] = m.rbf.lengthscale
optim_point_y[0] = np.log10(m.rbf.variance) - np.log10(m.likelihood.variance);
# optimize
if optimize:
m.optimize('scg', xtol=1e-6, ftol=1e-6, max_iters=max_iters)
optim_point_x[1] = m.rbf.lengthscale
optim_point_y[1] = np.log10(m.rbf.variance) - np.log10(m.likelihood.variance);
if plot:
pb.arrow(optim_point_x[0], optim_point_y[0], optim_point_x[1] - optim_point_x[0], optim_point_y[1] - optim_point_y[0], label=str(i), head_length=1, head_width=0.5, fc='k', ec='k')
models.append(m)
if plot:
ax.set_xlim(xlim)
ax.set_ylim(ylim)
return m # (models, lls)
def _contour_data(data, length_scales, log_SNRs, kernel_call=GPy.kern.RBF):
"""
Evaluate the GP objective function for a given data set for a range of
signal to noise ratios and a range of lengthscales.
:data_set: A data set from the utils.datasets director.
:length_scales: a list of length scales to explore for the contour plot.
:log_SNRs: a list of base 10 logarithm signal to noise ratios to explore for the contour plot.
:kernel: a kernel to use for the 'signal' portion of the data.
"""
lls = []
total_var = np.var(data['Y'])
kernel = kernel_call(1, variance=1., lengthscale=1.)
model = GPy.models.GPRegression(data['X'], data['Y'], kernel=kernel)
for log_SNR in log_SNRs:
SNR = 10.**log_SNR
noise_var = total_var / (1. + SNR)
signal_var = total_var - noise_var
model.kern['.*variance'] = signal_var
model.likelihood.variance = noise_var
length_scale_lls = []
for length_scale in length_scales:
model['.*lengthscale'] = length_scale
length_scale_lls.append(model.log_likelihood())
lls.append(length_scale_lls)
return np.array(lls)
def olympic_100m_men(optimize=True, plot=True):
"""Run a standard Gaussian process regression on the Rogers and Girolami olympics data."""
try:import pods
except ImportError:
print('pods unavailable, see https://github.com/sods/ods for example datasets')
return
data = pods.datasets.olympic_100m_men()
# create simple GP Model
m = GPy.models.GPRegression(data['X'], data['Y'])
# set the lengthscale to be something sensible (defaults to 1)
m.rbf.lengthscale = 10
if optimize:
m.optimize('bfgs', max_iters=200)
if plot:
m.plot(plot_limits=(1850, 2050))
return m
def toy_rbf_1d(optimize=True, plot=True):
"""Run a simple demonstration of a standard Gaussian process fitting it to data sampled from an RBF covariance."""
try:import pods
except ImportError:
print('pods unavailable, see https://github.com/sods/ods for example datasets')
return
data = pods.datasets.toy_rbf_1d()
# create simple GP Model
m = GPy.models.GPRegression(data['X'], data['Y'])
if optimize:
m.optimize('bfgs')
if plot:
m.plot()
return m
def toy_rbf_1d_50(optimize=True, plot=True):
"""Run a simple demonstration of a standard Gaussian process fitting it to data sampled from an RBF covariance."""
try:import pods
except ImportError:
print('pods unavailable, see https://github.com/sods/ods for example datasets')
return
data = pods.datasets.toy_rbf_1d_50()
# create simple GP Model
m = GPy.models.GPRegression(data['X'], data['Y'])
if optimize:
m.optimize('bfgs')
if plot:
m.plot()
return m
def toy_poisson_rbf_1d_laplace(optimize=True, plot=True):
"""Run a simple demonstration of a standard Gaussian process fitting it to data sampled from an RBF covariance."""
optimizer='scg'
x_len = 30
X = np.linspace(0, 10, x_len)[:, None]
f_true = np.random.multivariate_normal(np.zeros(x_len), GPy.kern.RBF(1).K(X))
Y = np.array([np.random.poisson(np.exp(f)) for f in f_true])[:,None]
kern = GPy.kern.RBF(1)
poisson_lik = GPy.likelihoods.Poisson()
laplace_inf = GPy.inference.latent_function_inference.Laplace()
# create simple GP Model
m = GPy.core.GP(X, Y, kernel=kern, likelihood=poisson_lik, inference_method=laplace_inf)
if optimize:
m.optimize(optimizer)
if plot:
m.plot()
# plot the real underlying rate function
pb.plot(X, np.exp(f_true), '--k', linewidth=2)
return m
def toy_ARD(max_iters=1000, kernel_type='linear', num_samples=300, D=4, optimize=True, plot=True):
# Create an artificial dataset where the values in the targets (Y)
# only depend in dimensions 1 and 3 of the inputs (X). Run ARD to
# see if this dependency can be recovered
X1 = np.sin(np.sort(np.random.rand(num_samples, 1) * 10, 0))
X2 = np.cos(np.sort(np.random.rand(num_samples, 1) * 10, 0))
X3 = np.exp(np.sort(np.random.rand(num_samples, 1), 0))
X4 = np.log(np.sort(np.random.rand(num_samples, 1), 0))
X = np.hstack((X1, X2, X3, X4))
Y1 = np.asarray(2 * X[:, 0] + 3).reshape(-1, 1)
Y2 = np.asarray(4 * (X[:, 2] - 1.5 * X[:, 0])).reshape(-1, 1)
Y = np.hstack((Y1, Y2))
Y = np.dot(Y, np.random.rand(2, D));
Y = Y + 0.2 * np.random.randn(Y.shape[0], Y.shape[1])
Y -= Y.mean()
Y /= Y.std()
if kernel_type == 'linear':
kernel = GPy.kern.Linear(X.shape[1], ARD=1)
elif kernel_type == 'rbf_inv':
kernel = GPy.kern.RBF_inv(X.shape[1], ARD=1)
else:
kernel = GPy.kern.RBF(X.shape[1], ARD=1)
kernel += GPy.kern.White(X.shape[1]) + GPy.kern.Bias(X.shape[1])
m = GPy.models.GPRegression(X, Y, kernel)
# len_prior = GPy.priors.inverse_gamma(1,18) # 1, 25
# m.set_prior('.*lengthscale',len_prior)
if optimize:
m.optimize(optimizer='scg', max_iters=max_iters)
if plot:
m.kern.plot_ARD()
return m
def toy_ARD_sparse(max_iters=1000, kernel_type='linear', num_samples=300, D=4, optimize=True, plot=True):
# Create an artificial dataset where the values in the targets (Y)
# only depend in dimensions 1 and 3 of the inputs (X). Run ARD to
# see if this dependency can be recovered
X1 = np.sin(np.sort(np.random.rand(num_samples, 1) * 10, 0))
X2 = np.cos(np.sort(np.random.rand(num_samples, 1) * 10, 0))
X3 = np.exp(np.sort(np.random.rand(num_samples, 1), 0))
X4 = np.log(np.sort(np.random.rand(num_samples, 1), 0))
X = np.hstack((X1, X2, X3, X4))
Y1 = np.asarray(2 * X[:, 0] + 3)[:, None]
Y2 = np.asarray(4 * (X[:, 2] - 1.5 * X[:, 0]))[:, None]
Y = np.hstack((Y1, Y2))
Y = np.dot(Y, np.random.rand(2, D));
Y = Y + 0.2 * np.random.randn(Y.shape[0], Y.shape[1])
Y -= Y.mean()
Y /= Y.std()
if kernel_type == 'linear':
kernel = GPy.kern.Linear(X.shape[1], ARD=1)
elif kernel_type == 'rbf_inv':
kernel = GPy.kern.RBF_inv(X.shape[1], ARD=1)
else:
kernel = GPy.kern.RBF(X.shape[1], ARD=1)
#kernel += GPy.kern.Bias(X.shape[1])
X_variance = np.ones(X.shape) * 0.5
m = GPy.models.SparseGPRegression(X, Y, kernel, X_variance=X_variance)
# len_prior = GPy.priors.inverse_gamma(1,18) # 1, 25
# m.set_prior('.*lengthscale',len_prior)
if optimize:
m.optimize(optimizer='scg', max_iters=max_iters)
if plot:
m.kern.plot_ARD()
return m
def robot_wireless(max_iters=100, kernel=None, optimize=True, plot=True):
"""Predict the location of a robot given wirelss signal strength readings."""
try:import pods
except ImportError:
print('pods unavailable, see https://github.com/sods/ods for example datasets')
return
data = pods.datasets.robot_wireless()
# create simple GP Model
m = GPy.models.GPRegression(data['Y'], data['X'], kernel=kernel)
# optimize
if optimize:
m.optimize(max_iters=max_iters)
Xpredict = m.predict(data['Ytest'])[0]
if plot:
pb.plot(data['Xtest'][:, 0], data['Xtest'][:, 1], 'r-')
pb.plot(Xpredict[:, 0], Xpredict[:, 1], 'b-')
pb.axis('equal')
pb.title('WiFi Localization with Gaussian Processes')
pb.legend(('True Location', 'Predicted Location'))
sse = ((data['Xtest'] - Xpredict)**2).sum()
print(('Sum of squares error on test data: ' + str(sse)))
return m
def silhouette(max_iters=100, optimize=True, plot=True):
"""Predict the pose of a figure given a silhouette. This is a task from Agarwal and Triggs 2004 ICML paper."""
try:import pods
except ImportError:
print('pods unavailable, see https://github.com/sods/ods for example datasets')
return
data = pods.datasets.silhouette()
# create simple GP Model
m = GPy.models.GPRegression(data['X'], data['Y'])
# optimize
if optimize:
m.optimize(messages=True, max_iters=max_iters)
print(m)
return m
def sparse_GP_regression_1D(num_samples=400, num_inducing=5, max_iters=100, optimize=True, plot=True, checkgrad=False):
"""Run a 1D example of a sparse GP regression."""
# sample inputs and outputs
X = np.random.uniform(-3., 3., (num_samples, 1))
Y = np.sin(X) + np.random.randn(num_samples, 1) * 0.05
# construct kernel
rbf = GPy.kern.RBF(1)
# create simple GP Model
m = GPy.models.SparseGPRegression(X, Y, kernel=rbf, num_inducing=num_inducing)
if checkgrad:
m.checkgrad()
if optimize:
m.optimize('tnc', max_iters=max_iters)
if plot:
m.plot()
return m
def sparse_GP_regression_2D(num_samples=400, num_inducing=50, max_iters=100, optimize=True, plot=True, nan=False):
"""Run a 2D example of a sparse GP regression."""
np.random.seed(1234)
X = np.random.uniform(-3., 3., (num_samples, 2))
Y = np.sin(X[:, 0:1]) * np.sin(X[:, 1:2]) + np.random.randn(num_samples, 1) * 0.05
if nan:
inan = np.random.binomial(1,.2,size=Y.shape)
Y[inan] = np.nan
# construct kernel
rbf = GPy.kern.RBF(2)
# create simple GP Model
m = GPy.models.SparseGPRegression(X, Y, kernel=rbf, num_inducing=num_inducing)
# contrain all parameters to be positive (but not inducing inputs)
m['.*len'] = 2.
m.checkgrad()
# optimize
if optimize:
m.optimize('tnc', messages=1, max_iters=max_iters)
# plot
if plot:
m.plot()
print(m)
return m
def uncertain_inputs_sparse_regression(max_iters=200, optimize=True, plot=True):
"""Run a 1D example of a sparse GP regression with uncertain inputs."""
fig, axes = pb.subplots(1, 2, figsize=(12, 5), sharex=True, sharey=True)
# sample inputs and outputs
S = np.ones((20, 1))
X = np.random.uniform(-3., 3., (20, 1))
Y = np.sin(X) + np.random.randn(20, 1) * 0.05
# likelihood = GPy.likelihoods.Gaussian(Y)
Z = np.random.uniform(-3., 3., (7, 1))
k = GPy.kern.RBF(1)
# create simple GP Model - no input uncertainty on this one
m = GPy.models.SparseGPRegression(X, Y, kernel=k, Z=Z)
if optimize:
m.optimize('scg', messages=1, max_iters=max_iters)
if plot:
m.plot(ax=axes[0])
axes[0].set_title('no input uncertainty')
print(m)
# the same Model with uncertainty
m = GPy.models.SparseGPRegression(X, Y, kernel=GPy.kern.RBF(1), Z=Z, X_variance=S)
if optimize:
m.optimize('scg', messages=1, max_iters=max_iters)
if plot:
m.plot(ax=axes[1])
axes[1].set_title('with input uncertainty')
fig.canvas.draw()
print(m)
return m
def simple_mean_function(max_iters=100, optimize=True, plot=True):
"""
The simplest possible mean function. No parameters, just a simple Sinusoid.
"""
#create simple mean function
mf = GPy.core.Mapping(1,1)
mf.f = np.sin
mf.update_gradients = lambda a,b: None
X = np.linspace(0,10,50).reshape(-1,1)
Y = np.sin(X) + 0.5*np.cos(3*X) + 0.1*np.random.randn(*X.shape)
k =GPy.kern.RBF(1)
lik = GPy.likelihoods.Gaussian()
m = GPy.core.GP(X, Y, kernel=k, likelihood=lik, mean_function=mf)
if optimize:
m.optimize(max_iters=max_iters)
if plot:
m.plot(plot_limits=(-10,15))
return m
def parametric_mean_function(max_iters=100, optimize=True, plot=True):
"""
A linear mean function with parameters that we'll learn alongside the kernel
"""
#create simple mean function
mf = GPy.core.Mapping(1,1)
mf.f = np.sin
X = np.linspace(0,10,50).reshape(-1,1)
Y = np.sin(X) + 0.5*np.cos(3*X) + 0.1*np.random.randn(*X.shape) + 3*X
mf = GPy.mappings.Linear(1,1)
k =GPy.kern.RBF(1)
lik = GPy.likelihoods.Gaussian()
m = GPy.core.GP(X, Y, kernel=k, likelihood=lik, mean_function=mf)
if optimize:
m.optimize(max_iters=max_iters)
if plot:
m.plot()
return m
| bsd-3-clause |
jakobj/UP-Tasks | NEST/single_neuron_task/single_neuron.py | 3 | 1344 | import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import nest # import NEST module
def single_neuron(spike_times, sim_duration):
nest.set_verbosity('M_WARNING') # reduce NEST output
nest.ResetKernel() # reset simulation kernel
# create LIF neuron with exponential synaptic currents
neuron = nest.Create('iaf_psc_exp')
# create a voltmeter
voltmeter = nest.Create('voltmeter', params={'interval': 0.1})
# create a spike generator
spikegenerator = nest.Create('spike_generator')
# ... and let it spike at predefined times
nest.SetStatus(spikegenerator, {'spike_times': spike_times})
# connect spike generator and voltmeter to the neuron
nest.Connect(spikegenerator, neuron)
nest.Connect(voltmeter, neuron)
# run simulation for sim_duration
nest.Simulate(sim_duration)
# read out recording time and voltage from voltmeter
times = nest.GetStatus(voltmeter)[0]['events']['times']
voltage = nest.GetStatus(voltmeter)[0]['events']['V_m']
# plot results
plt.plot(times, voltage)
plt.xlabel('Time (ms)')
plt.ylabel('Membrane potential (mV)')
filename = 'single_neuron.png'
plt.savefig(filename, dpi=300)
if __name__ == '__main__':
spike_times = [10., 50.]
sim_duration = 100.
single_neuron(spike_times, sim_duration)
| gpl-2.0 |
jmschrei/scikit-learn | sklearn/utils/tests/test_class_weight.py | 90 | 12846 | import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.datasets import make_blobs
from sklearn.utils.class_weight import compute_class_weight
from sklearn.utils.class_weight import compute_sample_weight
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns
def test_compute_class_weight():
# Test (and demo) compute_class_weight.
y = np.asarray([2, 2, 2, 3, 3, 4])
classes = np.unique(y)
cw = assert_warns(DeprecationWarning,
compute_class_weight, "auto", classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_true(cw[0] < cw[1] < cw[2])
cw = compute_class_weight("balanced", classes, y)
# total effect of samples is preserved
class_counts = np.bincount(y)[2:]
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_true(cw[0] < cw[1] < cw[2])
def test_compute_class_weight_not_present():
# Raise error when y does not contain all class labels
classes = np.arange(4)
y = np.asarray([0, 0, 0, 1, 1, 2])
assert_raises(ValueError, compute_class_weight, "auto", classes, y)
assert_raises(ValueError, compute_class_weight, "balanced", classes, y)
def test_compute_class_weight_dict():
classes = np.arange(3)
class_weights = {0: 1.0, 1: 2.0, 2: 3.0}
y = np.asarray([0, 0, 1, 2])
cw = compute_class_weight(class_weights, classes, y)
# When the user specifies class weights, compute_class_weights should just
# return them.
assert_array_almost_equal(np.asarray([1.0, 2.0, 3.0]), cw)
# When a class weight is specified that isn't in classes, a ValueError
# should get raised
msg = 'Class label 4 not present.'
class_weights = {0: 1.0, 1: 2.0, 2: 3.0, 4: 1.5}
assert_raise_message(ValueError, msg, compute_class_weight, class_weights,
classes, y)
msg = 'Class label -1 not present.'
class_weights = {-1: 5.0, 0: 1.0, 1: 2.0, 2: 3.0}
assert_raise_message(ValueError, msg, compute_class_weight, class_weights,
classes, y)
def test_compute_class_weight_invariance():
# Test that results with class_weight="balanced" is invariant wrt
# class imbalance if the number of samples is identical.
# The test uses a balanced two class dataset with 100 datapoints.
# It creates three versions, one where class 1 is duplicated
# resulting in 150 points of class 1 and 50 of class 0,
# one where there are 50 points in class 1 and 150 in class 0,
# and one where there are 100 points of each class (this one is balanced
# again).
# With balancing class weights, all three should give the same model.
X, y = make_blobs(centers=2, random_state=0)
# create dataset where class 1 is duplicated twice
X_1 = np.vstack([X] + [X[y == 1]] * 2)
y_1 = np.hstack([y] + [y[y == 1]] * 2)
# create dataset where class 0 is duplicated twice
X_0 = np.vstack([X] + [X[y == 0]] * 2)
y_0 = np.hstack([y] + [y[y == 0]] * 2)
# cuplicate everything
X_ = np.vstack([X] * 2)
y_ = np.hstack([y] * 2)
# results should be identical
logreg1 = LogisticRegression(class_weight="balanced").fit(X_1, y_1)
logreg0 = LogisticRegression(class_weight="balanced").fit(X_0, y_0)
logreg = LogisticRegression(class_weight="balanced").fit(X_, y_)
assert_array_almost_equal(logreg1.coef_, logreg0.coef_)
assert_array_almost_equal(logreg.coef_, logreg0.coef_)
def test_compute_class_weight_auto_negative():
# Test compute_class_weight when labels are negative
# Test with balanced class labels.
classes = np.array([-2, -1, 0])
y = np.asarray([-1, -1, 0, 0, -2, -2])
cw = assert_warns(DeprecationWarning, compute_class_weight, "auto",
classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1., 1., 1.]))
cw = compute_class_weight("balanced", classes, y)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1., 1., 1.]))
# Test with unbalanced class labels.
y = np.asarray([-1, 0, 0, -2, -2, -2])
cw = assert_warns(DeprecationWarning, compute_class_weight, "auto",
classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([0.545, 1.636, 0.818]), decimal=3)
cw = compute_class_weight("balanced", classes, y)
assert_equal(len(cw), len(classes))
class_counts = np.bincount(y + 2)
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_array_almost_equal(cw, [2. / 3, 2., 1.])
def test_compute_class_weight_auto_unordered():
# Test compute_class_weight when classes are unordered
classes = np.array([1, 0, 3])
y = np.asarray([1, 0, 0, 3, 3, 3])
cw = assert_warns(DeprecationWarning, compute_class_weight, "auto",
classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1.636, 0.818, 0.545]), decimal=3)
cw = compute_class_weight("balanced", classes, y)
class_counts = np.bincount(y)[classes]
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_array_almost_equal(cw, [2., 1., 2. / 3])
def test_compute_sample_weight():
# Test (and demo) compute_sample_weight.
# Test with balanced classes
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with user-defined weights
sample_weight = compute_sample_weight({1: 2, 2: 1}, y)
assert_array_almost_equal(sample_weight, [2., 2., 2., 1., 1., 1.])
# Test with column vector of balanced classes
y = np.asarray([[1], [1], [1], [2], [2], [2]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with unbalanced classes
y = np.asarray([1, 1, 1, 2, 2, 2, 3])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
expected_auto = np.asarray([.6, .6, .6, .6, .6, .6, 1.8])
assert_array_almost_equal(sample_weight, expected_auto)
sample_weight = compute_sample_weight("balanced", y)
expected_balanced = np.array([0.7777, 0.7777, 0.7777, 0.7777, 0.7777, 0.7777, 2.3333])
assert_array_almost_equal(sample_weight, expected_balanced, decimal=4)
# Test with `None` weights
sample_weight = compute_sample_weight(None, y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 1.])
# Test with multi-output of balanced classes
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with multi-output with user-defined weights
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = compute_sample_weight([{1: 2, 2: 1}, {0: 1, 1: 2}], y)
assert_array_almost_equal(sample_weight, [2., 2., 2., 2., 2., 2.])
# Test with multi-output of unbalanced classes
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1], [3, -1]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, expected_auto ** 2)
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, expected_balanced ** 2, decimal=3)
def test_compute_sample_weight_with_subsample():
# Test compute_sample_weight with subsamples specified.
# Test with balanced classes and all samples present
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with column vector of balanced classes and all samples present
y = np.asarray([[1], [1], [1], [2], [2], [2]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with a subsample
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y, range(4))
assert_array_almost_equal(sample_weight, [.5, .5, .5, 1.5, 1.5, 1.5])
sample_weight = compute_sample_weight("balanced", y, range(4))
assert_array_almost_equal(sample_weight, [2. / 3, 2. / 3,
2. / 3, 2., 2., 2.])
# Test with a bootstrap subsample
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, [0, 1, 1, 2, 2, 3])
expected_auto = np.asarray([1 / 3., 1 / 3., 1 / 3., 5 / 3., 5 / 3., 5 / 3.])
assert_array_almost_equal(sample_weight, expected_auto)
sample_weight = compute_sample_weight("balanced", y, [0, 1, 1, 2, 2, 3])
expected_balanced = np.asarray([0.6, 0.6, 0.6, 3., 3., 3.])
assert_array_almost_equal(sample_weight, expected_balanced)
# Test with a bootstrap subsample for multi-output
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, [0, 1, 1, 2, 2, 3])
assert_array_almost_equal(sample_weight, expected_auto ** 2)
sample_weight = compute_sample_weight("balanced", y, [0, 1, 1, 2, 2, 3])
assert_array_almost_equal(sample_weight, expected_balanced ** 2)
# Test with a missing class
y = np.asarray([1, 1, 1, 2, 2, 2, 3])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
# Test with a missing class for multi-output
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1], [2, 2]])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
def test_compute_sample_weight_errors():
# Test compute_sample_weight raises errors expected.
# Invalid preset string
y = np.asarray([1, 1, 1, 2, 2, 2])
y_ = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
assert_raises(ValueError, compute_sample_weight, "ni", y)
assert_raises(ValueError, compute_sample_weight, "ni", y, range(4))
assert_raises(ValueError, compute_sample_weight, "ni", y_)
assert_raises(ValueError, compute_sample_weight, "ni", y_, range(4))
# Not "auto" for subsample
assert_raises(ValueError,
compute_sample_weight, {1: 2, 2: 1}, y, range(4))
# Not a list or preset for multi-output
assert_raises(ValueError, compute_sample_weight, {1: 2, 2: 1}, y_)
# Incorrect length list for multi-output
assert_raises(ValueError, compute_sample_weight, [{1: 2, 2: 1}], y_)
| bsd-3-clause |
kernc/scikit-learn | sklearn/linear_model/least_angle.py | 11 | 57260 | """
Least Angle Regression algorithm. See the documentation on the
Generalized Linear Model for a complete discussion.
"""
from __future__ import print_function
# Author: Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Gael Varoquaux
#
# License: BSD 3 clause
from math import log
import sys
import warnings
from distutils.version import LooseVersion
import numpy as np
from scipy import linalg, interpolate
from scipy.linalg.lapack import get_lapack_funcs
from .base import LinearModel
from ..base import RegressorMixin
from ..utils import arrayfuncs, as_float_array, check_X_y
from ..model_selection import check_cv
from ..exceptions import ConvergenceWarning
from ..externals.joblib import Parallel, delayed
from ..externals.six.moves import xrange
from ..externals.six import string_types
import scipy
solve_triangular_args = {}
if LooseVersion(scipy.__version__) >= LooseVersion('0.12'):
solve_triangular_args = {'check_finite': False}
def lars_path(X, y, Xy=None, Gram=None, max_iter=500,
alpha_min=0, method='lar', copy_X=True,
eps=np.finfo(np.float).eps,
copy_Gram=True, verbose=0, return_path=True,
return_n_iter=False, positive=False):
"""Compute Least Angle Regression or Lasso path using LARS algorithm [1]
The optimization objective for the case method='lasso' is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
in the case of method='lars', the objective function is only known in
the form of an implicit equation (see discussion in [1])
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
-----------
X : array, shape: (n_samples, n_features)
Input data.
y : array, shape: (n_samples)
Input targets.
positive : boolean (default=False)
Restrict coefficients to be >= 0.
When using this option together with method 'lasso' the model
coefficients will not converge to the ordinary-least-squares solution
for small values of alpha (neither will they when using method 'lar'
..). Only coeffiencts up to the smallest alpha value (``alphas_[alphas_ >
0.].min()`` when fit_path=True) reached by the stepwise Lars-Lasso
algorithm are typically in congruence with the solution of the
coordinate descent lasso_path function.
max_iter : integer, optional (default=500)
Maximum number of iterations to perform, set to infinity for no limit.
Gram : None, 'auto', array, shape: (n_features, n_features), optional
Precomputed Gram matrix (X' * X), if ``'auto'``, the Gram
matrix is precomputed from the given X, if there are more samples
than features.
alpha_min : float, optional (default=0)
Minimum correlation along the path. It corresponds to the
regularization parameter alpha parameter in the Lasso.
method : {'lar', 'lasso'}, optional (default='lar')
Specifies the returned model. Select ``'lar'`` for Least Angle
Regression, ``'lasso'`` for the Lasso.
eps : float, optional (default=``np.finfo(np.float).eps``)
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
copy_X : bool, optional (default=True)
If ``False``, ``X`` is overwritten.
copy_Gram : bool, optional (default=True)
If ``False``, ``Gram`` is overwritten.
verbose : int (default=0)
Controls output verbosity.
return_path : bool, optional (default=True)
If ``return_path==True`` returns the entire path, else returns only the
last point of the path.
return_n_iter : bool, optional (default=False)
Whether to return the number of iterations.
Returns
--------
alphas : array, shape: [n_alphas + 1]
Maximum of covariances (in absolute value) at each iteration.
``n_alphas`` is either ``max_iter``, ``n_features`` or the
number of nodes in the path with ``alpha >= alpha_min``, whichever
is smaller.
active : array, shape [n_alphas]
Indices of active variables at the end of the path.
coefs : array, shape (n_features, n_alphas + 1)
Coefficients along the path
n_iter : int
Number of iterations run. Returned only if return_n_iter is set
to True.
See also
--------
lasso_path
LassoLars
Lars
LassoLarsCV
LarsCV
sklearn.decomposition.sparse_encode
References
----------
.. [1] "Least Angle Regression", Effron et al.
http://www-stat.stanford.edu/~tibs/ftp/lars.pdf
.. [2] `Wikipedia entry on the Least-angle regression
<http://en.wikipedia.org/wiki/Least-angle_regression>`_
.. [3] `Wikipedia entry on the Lasso
<http://en.wikipedia.org/wiki/Lasso_(statistics)#Lasso_method>`_
"""
n_features = X.shape[1]
n_samples = y.size
max_features = min(max_iter, n_features)
if return_path:
coefs = np.zeros((max_features + 1, n_features))
alphas = np.zeros(max_features + 1)
else:
coef, prev_coef = np.zeros(n_features), np.zeros(n_features)
alpha, prev_alpha = np.array([0.]), np.array([0.]) # better ideas?
n_iter, n_active = 0, 0
active, indices = list(), np.arange(n_features)
# holds the sign of covariance
sign_active = np.empty(max_features, dtype=np.int8)
drop = False
# will hold the cholesky factorization. Only lower part is
# referenced.
# We are initializing this to "zeros" and not empty, because
# it is passed to scipy linalg functions and thus if it has NaNs,
# even if they are in the upper part that it not used, we
# get errors raised.
# Once we support only scipy > 0.12 we can use check_finite=False and
# go back to "empty"
L = np.zeros((max_features, max_features), dtype=X.dtype)
swap, nrm2 = linalg.get_blas_funcs(('swap', 'nrm2'), (X,))
solve_cholesky, = get_lapack_funcs(('potrs',), (X,))
if Gram is None:
if copy_X:
# force copy. setting the array to be fortran-ordered
# speeds up the calculation of the (partial) Gram matrix
# and allows to easily swap columns
X = X.copy('F')
elif isinstance(Gram, string_types) and Gram == 'auto':
Gram = None
if X.shape[0] > X.shape[1]:
Gram = np.dot(X.T, X)
elif copy_Gram:
Gram = Gram.copy()
if Xy is None:
Cov = np.dot(X.T, y)
else:
Cov = Xy.copy()
if verbose:
if verbose > 1:
print("Step\t\tAdded\t\tDropped\t\tActive set size\t\tC")
else:
sys.stdout.write('.')
sys.stdout.flush()
tiny = np.finfo(np.float).tiny # to avoid division by 0 warning
tiny32 = np.finfo(np.float32).tiny # to avoid division by 0 warning
equality_tolerance = np.finfo(np.float32).eps
while True:
if Cov.size:
if positive:
C_idx = np.argmax(Cov)
else:
C_idx = np.argmax(np.abs(Cov))
C_ = Cov[C_idx]
if positive:
C = C_
else:
C = np.fabs(C_)
else:
C = 0.
if return_path:
alpha = alphas[n_iter, np.newaxis]
coef = coefs[n_iter]
prev_alpha = alphas[n_iter - 1, np.newaxis]
prev_coef = coefs[n_iter - 1]
alpha[0] = C / n_samples
if alpha[0] <= alpha_min + equality_tolerance: # early stopping
if abs(alpha[0] - alpha_min) > equality_tolerance:
# interpolation factor 0 <= ss < 1
if n_iter > 0:
# In the first iteration, all alphas are zero, the formula
# below would make ss a NaN
ss = ((prev_alpha[0] - alpha_min) /
(prev_alpha[0] - alpha[0]))
coef[:] = prev_coef + ss * (coef - prev_coef)
alpha[0] = alpha_min
if return_path:
coefs[n_iter] = coef
break
if n_iter >= max_iter or n_active >= n_features:
break
if not drop:
##########################################################
# Append x_j to the Cholesky factorization of (Xa * Xa') #
# #
# ( L 0 ) #
# L -> ( ) , where L * w = Xa' x_j #
# ( w z ) and z = ||x_j|| #
# #
##########################################################
if positive:
sign_active[n_active] = np.ones_like(C_)
else:
sign_active[n_active] = np.sign(C_)
m, n = n_active, C_idx + n_active
Cov[C_idx], Cov[0] = swap(Cov[C_idx], Cov[0])
indices[n], indices[m] = indices[m], indices[n]
Cov_not_shortened = Cov
Cov = Cov[1:] # remove Cov[0]
if Gram is None:
X.T[n], X.T[m] = swap(X.T[n], X.T[m])
c = nrm2(X.T[n_active]) ** 2
L[n_active, :n_active] = \
np.dot(X.T[n_active], X.T[:n_active].T)
else:
# swap does only work inplace if matrix is fortran
# contiguous ...
Gram[m], Gram[n] = swap(Gram[m], Gram[n])
Gram[:, m], Gram[:, n] = swap(Gram[:, m], Gram[:, n])
c = Gram[n_active, n_active]
L[n_active, :n_active] = Gram[n_active, :n_active]
# Update the cholesky decomposition for the Gram matrix
if n_active:
linalg.solve_triangular(L[:n_active, :n_active],
L[n_active, :n_active],
trans=0, lower=1,
overwrite_b=True,
**solve_triangular_args)
v = np.dot(L[n_active, :n_active], L[n_active, :n_active])
diag = max(np.sqrt(np.abs(c - v)), eps)
L[n_active, n_active] = diag
if diag < 1e-7:
# The system is becoming too ill-conditioned.
# We have degenerate vectors in our active set.
# We'll 'drop for good' the last regressor added.
# Note: this case is very rare. It is no longer triggered by
# the test suite. The `equality_tolerance` margin added in 0.16
# to get early stopping to work consistently on all versions of
# Python including 32 bit Python under Windows seems to make it
# very difficult to trigger the 'drop for good' strategy.
warnings.warn('Regressors in active set degenerate. '
'Dropping a regressor, after %i iterations, '
'i.e. alpha=%.3e, '
'with an active set of %i regressors, and '
'the smallest cholesky pivot element being %.3e'
% (n_iter, alpha, n_active, diag),
ConvergenceWarning)
# XXX: need to figure a 'drop for good' way
Cov = Cov_not_shortened
Cov[0] = 0
Cov[C_idx], Cov[0] = swap(Cov[C_idx], Cov[0])
continue
active.append(indices[n_active])
n_active += 1
if verbose > 1:
print("%s\t\t%s\t\t%s\t\t%s\t\t%s" % (n_iter, active[-1], '',
n_active, C))
if method == 'lasso' and n_iter > 0 and prev_alpha[0] < alpha[0]:
# alpha is increasing. This is because the updates of Cov are
# bringing in too much numerical error that is greater than
# than the remaining correlation with the
# regressors. Time to bail out
warnings.warn('Early stopping the lars path, as the residues '
'are small and the current value of alpha is no '
'longer well controlled. %i iterations, alpha=%.3e, '
'previous alpha=%.3e, with an active set of %i '
'regressors.'
% (n_iter, alpha, prev_alpha, n_active),
ConvergenceWarning)
break
# least squares solution
least_squares, info = solve_cholesky(L[:n_active, :n_active],
sign_active[:n_active],
lower=True)
if least_squares.size == 1 and least_squares == 0:
# This happens because sign_active[:n_active] = 0
least_squares[...] = 1
AA = 1.
else:
# is this really needed ?
AA = 1. / np.sqrt(np.sum(least_squares * sign_active[:n_active]))
if not np.isfinite(AA):
# L is too ill-conditioned
i = 0
L_ = L[:n_active, :n_active].copy()
while not np.isfinite(AA):
L_.flat[::n_active + 1] += (2 ** i) * eps
least_squares, info = solve_cholesky(
L_, sign_active[:n_active], lower=True)
tmp = max(np.sum(least_squares * sign_active[:n_active]),
eps)
AA = 1. / np.sqrt(tmp)
i += 1
least_squares *= AA
if Gram is None:
# equiangular direction of variables in the active set
eq_dir = np.dot(X.T[:n_active].T, least_squares)
# correlation between each unactive variables and
# eqiangular vector
corr_eq_dir = np.dot(X.T[n_active:], eq_dir)
else:
# if huge number of features, this takes 50% of time, I
# think could be avoided if we just update it using an
# orthogonal (QR) decomposition of X
corr_eq_dir = np.dot(Gram[:n_active, n_active:].T,
least_squares)
g1 = arrayfuncs.min_pos((C - Cov) / (AA - corr_eq_dir + tiny))
if positive:
gamma_ = min(g1, C / AA)
else:
g2 = arrayfuncs.min_pos((C + Cov) / (AA + corr_eq_dir + tiny))
gamma_ = min(g1, g2, C / AA)
# TODO: better names for these variables: z
drop = False
z = -coef[active] / (least_squares + tiny32)
z_pos = arrayfuncs.min_pos(z)
if z_pos < gamma_:
# some coefficients have changed sign
idx = np.where(z == z_pos)[0][::-1]
# update the sign, important for LAR
sign_active[idx] = -sign_active[idx]
if method == 'lasso':
gamma_ = z_pos
drop = True
n_iter += 1
if return_path:
if n_iter >= coefs.shape[0]:
del coef, alpha, prev_alpha, prev_coef
# resize the coefs and alphas array
add_features = 2 * max(1, (max_features - n_active))
coefs = np.resize(coefs, (n_iter + add_features, n_features))
alphas = np.resize(alphas, n_iter + add_features)
coef = coefs[n_iter]
prev_coef = coefs[n_iter - 1]
alpha = alphas[n_iter, np.newaxis]
prev_alpha = alphas[n_iter - 1, np.newaxis]
else:
# mimic the effect of incrementing n_iter on the array references
prev_coef = coef
prev_alpha[0] = alpha[0]
coef = np.zeros_like(coef)
coef[active] = prev_coef[active] + gamma_ * least_squares
# update correlations
Cov -= gamma_ * corr_eq_dir
# See if any coefficient has changed sign
if drop and method == 'lasso':
# handle the case when idx is not length of 1
[arrayfuncs.cholesky_delete(L[:n_active, :n_active], ii) for ii in
idx]
n_active -= 1
m, n = idx, n_active
# handle the case when idx is not length of 1
drop_idx = [active.pop(ii) for ii in idx]
if Gram is None:
# propagate dropped variable
for ii in idx:
for i in range(ii, n_active):
X.T[i], X.T[i + 1] = swap(X.T[i], X.T[i + 1])
# yeah this is stupid
indices[i], indices[i + 1] = indices[i + 1], indices[i]
# TODO: this could be updated
residual = y - np.dot(X[:, :n_active], coef[active])
temp = np.dot(X.T[n_active], residual)
Cov = np.r_[temp, Cov]
else:
for ii in idx:
for i in range(ii, n_active):
indices[i], indices[i + 1] = indices[i + 1], indices[i]
Gram[i], Gram[i + 1] = swap(Gram[i], Gram[i + 1])
Gram[:, i], Gram[:, i + 1] = swap(Gram[:, i],
Gram[:, i + 1])
# Cov_n = Cov_j + x_j * X + increment(betas) TODO:
# will this still work with multiple drops ?
# recompute covariance. Probably could be done better
# wrong as Xy is not swapped with the rest of variables
# TODO: this could be updated
residual = y - np.dot(X, coef)
temp = np.dot(X.T[drop_idx], residual)
Cov = np.r_[temp, Cov]
sign_active = np.delete(sign_active, idx)
sign_active = np.append(sign_active, 0.) # just to maintain size
if verbose > 1:
print("%s\t\t%s\t\t%s\t\t%s\t\t%s" % (n_iter, '', drop_idx,
n_active, abs(temp)))
if return_path:
# resize coefs in case of early stop
alphas = alphas[:n_iter + 1]
coefs = coefs[:n_iter + 1]
if return_n_iter:
return alphas, active, coefs.T, n_iter
else:
return alphas, active, coefs.T
else:
if return_n_iter:
return alpha, active, coef, n_iter
else:
return alpha, active, coef
###############################################################################
# Estimator classes
class Lars(LinearModel, RegressorMixin):
"""Least Angle Regression model a.k.a. LAR
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
n_nonzero_coefs : int, optional
Target number of non-zero coefficients. Use ``np.inf`` for no limit.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
positive : boolean (default=False)
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
This parameter is ignored when `fit_intercept` is set to False.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
`preprocessing.StandardScaler` before calling `fit` on an estimator
with `normalize=False`.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
fit_path : boolean
If True the full path is stored in the ``coef_path_`` attribute.
If you compute the solution for a large problem or many targets,
setting ``fit_path`` to ``False`` will lead to a speedup, especially
with a small alpha.
Attributes
----------
alphas_ : array, shape (n_alphas + 1,) | list of n_targets such arrays
Maximum of covariances (in absolute value) at each iteration. \
``n_alphas`` is either ``n_nonzero_coefs`` or ``n_features``, \
whichever is smaller.
active_ : list, length = n_alphas | list of n_targets such lists
Indices of active variables at the end of the path.
coef_path_ : array, shape (n_features, n_alphas + 1) \
| list of n_targets such arrays
The varying values of the coefficients along the path. It is not
present if the ``fit_path`` parameter is ``False``.
coef_ : array, shape (n_features,) or (n_targets, n_features)
Parameter vector (w in the formulation formula).
intercept_ : float | array, shape (n_targets,)
Independent term in decision function.
n_iter_ : array-like or int
The number of iterations taken by lars_path to find the
grid of alphas for each target.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.Lars(n_nonzero_coefs=1)
>>> clf.fit([[-1, 1], [0, 0], [1, 1]], [-1.1111, 0, -1.1111])
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
Lars(copy_X=True, eps=..., fit_intercept=True, fit_path=True,
n_nonzero_coefs=1, normalize=True, positive=False, precompute='auto',
verbose=False)
>>> print(clf.coef_) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[ 0. -1.11...]
See also
--------
lars_path, LarsCV
sklearn.decomposition.sparse_encode
"""
def __init__(self, fit_intercept=True, verbose=False, normalize=True,
precompute='auto', n_nonzero_coefs=500,
eps=np.finfo(np.float).eps, copy_X=True, fit_path=True,
positive=False):
self.fit_intercept = fit_intercept
self.verbose = verbose
self.normalize = normalize
self.method = 'lar'
self.precompute = precompute
self.n_nonzero_coefs = n_nonzero_coefs
self.positive = positive
self.eps = eps
self.copy_X = copy_X
self.fit_path = fit_path
def _get_gram(self):
# precompute if n_samples > n_features
precompute = self.precompute
if hasattr(precompute, '__array__'):
Gram = precompute
elif precompute == 'auto':
Gram = 'auto'
else:
Gram = None
return Gram
def fit(self, X, y, Xy=None):
"""Fit the model using X, y as training data.
parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
Xy : array-like, shape (n_samples,) or (n_samples, n_targets), \
optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
returns
-------
self : object
returns an instance of self.
"""
X, y = check_X_y(X, y, y_numeric=True, multi_output=True)
n_features = X.shape[1]
X, y, X_offset, y_offset, X_scale = self._preprocess_data(X, y,
self.fit_intercept,
self.normalize,
self.copy_X)
if y.ndim == 1:
y = y[:, np.newaxis]
n_targets = y.shape[1]
alpha = getattr(self, 'alpha', 0.)
if hasattr(self, 'n_nonzero_coefs'):
alpha = 0. # n_nonzero_coefs parametrization takes priority
max_iter = self.n_nonzero_coefs
else:
max_iter = self.max_iter
precompute = self.precompute
if not hasattr(precompute, '__array__') and (
precompute is True or
(precompute == 'auto' and X.shape[0] > X.shape[1]) or
(precompute == 'auto' and y.shape[1] > 1)):
Gram = np.dot(X.T, X)
else:
Gram = self._get_gram()
self.alphas_ = []
self.n_iter_ = []
if self.fit_path:
self.coef_ = []
self.active_ = []
self.coef_path_ = []
for k in xrange(n_targets):
this_Xy = None if Xy is None else Xy[:, k]
alphas, active, coef_path, n_iter_ = lars_path(
X, y[:, k], Gram=Gram, Xy=this_Xy, copy_X=self.copy_X,
copy_Gram=True, alpha_min=alpha, method=self.method,
verbose=max(0, self.verbose - 1), max_iter=max_iter,
eps=self.eps, return_path=True,
return_n_iter=True, positive=self.positive)
self.alphas_.append(alphas)
self.active_.append(active)
self.n_iter_.append(n_iter_)
self.coef_path_.append(coef_path)
self.coef_.append(coef_path[:, -1])
if n_targets == 1:
self.alphas_, self.active_, self.coef_path_, self.coef_ = [
a[0] for a in (self.alphas_, self.active_, self.coef_path_,
self.coef_)]
self.n_iter_ = self.n_iter_[0]
else:
self.coef_ = np.empty((n_targets, n_features))
for k in xrange(n_targets):
this_Xy = None if Xy is None else Xy[:, k]
alphas, _, self.coef_[k], n_iter_ = lars_path(
X, y[:, k], Gram=Gram, Xy=this_Xy, copy_X=self.copy_X,
copy_Gram=True, alpha_min=alpha, method=self.method,
verbose=max(0, self.verbose - 1), max_iter=max_iter,
eps=self.eps, return_path=False, return_n_iter=True,
positive=self.positive)
self.alphas_.append(alphas)
self.n_iter_.append(n_iter_)
if n_targets == 1:
self.alphas_ = self.alphas_[0]
self.n_iter_ = self.n_iter_[0]
self._set_intercept(X_offset, y_offset, X_scale)
return self
class LassoLars(Lars):
"""Lasso model fit with Least Angle Regression a.k.a. Lars
It is a Linear Model trained with an L1 prior as regularizer.
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
alpha : float
Constant that multiplies the penalty term. Defaults to 1.0.
``alpha = 0`` is equivalent to an ordinary least square, solved
by :class:`LinearRegression`. For numerical reasons, using
``alpha = 0`` with the LassoLars object is not advised and you
should prefer the LinearRegression object.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
positive : boolean (default=False)
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
Under the positive restriction the model coefficients will not converge
to the ordinary-least-squares solution for small values of alpha.
Only coeffiencts up to the smallest alpha value (``alphas_[alphas_ >
0.].min()`` when fit_path=True) reached by the stepwise Lars-Lasso
algorithm are typically in congruence with the solution of the
coordinate descent Lasso estimator.
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
This parameter is ignored when `fit_intercept` is set to False.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
`preprocessing.StandardScaler` before calling `fit` on an estimator
with `normalize=False`.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
fit_path : boolean
If ``True`` the full path is stored in the ``coef_path_`` attribute.
If you compute the solution for a large problem or many targets,
setting ``fit_path`` to ``False`` will lead to a speedup, especially
with a small alpha.
Attributes
----------
alphas_ : array, shape (n_alphas + 1,) | list of n_targets such arrays
Maximum of covariances (in absolute value) at each iteration. \
``n_alphas`` is either ``max_iter``, ``n_features``, or the number of \
nodes in the path with correlation greater than ``alpha``, whichever \
is smaller.
active_ : list, length = n_alphas | list of n_targets such lists
Indices of active variables at the end of the path.
coef_path_ : array, shape (n_features, n_alphas + 1) or list
If a list is passed it's expected to be one of n_targets such arrays.
The varying values of the coefficients along the path. It is not
present if the ``fit_path`` parameter is ``False``.
coef_ : array, shape (n_features,) or (n_targets, n_features)
Parameter vector (w in the formulation formula).
intercept_ : float | array, shape (n_targets,)
Independent term in decision function.
n_iter_ : array-like or int.
The number of iterations taken by lars_path to find the
grid of alphas for each target.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.LassoLars(alpha=0.01)
>>> clf.fit([[-1, 1], [0, 0], [1, 1]], [-1, 0, -1])
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
LassoLars(alpha=0.01, copy_X=True, eps=..., fit_intercept=True,
fit_path=True, max_iter=500, normalize=True, positive=False,
precompute='auto', verbose=False)
>>> print(clf.coef_) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[ 0. -0.963257...]
See also
--------
lars_path
lasso_path
Lasso
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
"""
def __init__(self, alpha=1.0, fit_intercept=True, verbose=False,
normalize=True, precompute='auto', max_iter=500,
eps=np.finfo(np.float).eps, copy_X=True, fit_path=True,
positive=False):
self.alpha = alpha
self.fit_intercept = fit_intercept
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.method = 'lasso'
self.positive = positive
self.precompute = precompute
self.copy_X = copy_X
self.eps = eps
self.fit_path = fit_path
###############################################################################
# Cross-validated estimator classes
def _check_copy_and_writeable(array, copy=False):
if copy or not array.flags.writeable:
return array.copy()
return array
def _lars_path_residues(X_train, y_train, X_test, y_test, Gram=None,
copy=True, method='lars', verbose=False,
fit_intercept=True, normalize=True, max_iter=500,
eps=np.finfo(np.float).eps, positive=False):
"""Compute the residues on left-out data for a full LARS path
Parameters
-----------
X_train : array, shape (n_samples, n_features)
The data to fit the LARS on
y_train : array, shape (n_samples)
The target variable to fit LARS on
X_test : array, shape (n_samples, n_features)
The data to compute the residues on
y_test : array, shape (n_samples)
The target variable to compute the residues on
Gram : None, 'auto', array, shape: (n_features, n_features), optional
Precomputed Gram matrix (X' * X), if ``'auto'``, the Gram
matrix is precomputed from the given X, if there are more samples
than features
copy : boolean, optional
Whether X_train, X_test, y_train and y_test should be copied;
if False, they may be overwritten.
method : 'lar' | 'lasso'
Specifies the returned model. Select ``'lar'`` for Least Angle
Regression, ``'lasso'`` for the Lasso.
verbose : integer, optional
Sets the amount of verbosity
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
positive : boolean (default=False)
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
See reservations for using this option in combination with method
'lasso' for expected small values of alpha in the doc of LassoLarsCV
and LassoLarsIC.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
This parameter is ignored when `fit_intercept` is set to False.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
`preprocessing.StandardScaler` before calling `fit` on an estimator
with `normalize=False`.
max_iter : integer, optional
Maximum number of iterations to perform.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
Returns
--------
alphas : array, shape (n_alphas,)
Maximum of covariances (in absolute value) at each iteration.
``n_alphas`` is either ``max_iter`` or ``n_features``, whichever
is smaller.
active : list
Indices of active variables at the end of the path.
coefs : array, shape (n_features, n_alphas)
Coefficients along the path
residues : array, shape (n_alphas, n_samples)
Residues of the prediction on the test data
"""
X_train = _check_copy_and_writeable(X_train, copy)
y_train = _check_copy_and_writeable(y_train, copy)
X_test = _check_copy_and_writeable(X_test, copy)
y_test = _check_copy_and_writeable(y_test, copy)
if fit_intercept:
X_mean = X_train.mean(axis=0)
X_train -= X_mean
X_test -= X_mean
y_mean = y_train.mean(axis=0)
y_train = as_float_array(y_train, copy=False)
y_train -= y_mean
y_test = as_float_array(y_test, copy=False)
y_test -= y_mean
if normalize:
norms = np.sqrt(np.sum(X_train ** 2, axis=0))
nonzeros = np.flatnonzero(norms)
X_train[:, nonzeros] /= norms[nonzeros]
alphas, active, coefs = lars_path(
X_train, y_train, Gram=Gram, copy_X=False, copy_Gram=False,
method=method, verbose=max(0, verbose - 1), max_iter=max_iter, eps=eps,
positive=positive)
if normalize:
coefs[nonzeros] /= norms[nonzeros][:, np.newaxis]
residues = np.dot(X_test, coefs) - y_test[:, np.newaxis]
return alphas, active, coefs, residues.T
class LarsCV(Lars):
"""Cross-validated Least Angle Regression model
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
positive : boolean (default=False)
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
This parameter is ignored when `fit_intercept` is set to False.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
`preprocessing.StandardScaler` before calling `fit` on an estimator
with `normalize=False`.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter: integer, optional
Maximum number of iterations to perform.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
max_n_alphas : integer, optional
The maximum number of points on the path used to compute the
residuals in the cross-validation
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
Attributes
----------
coef_ : array, shape (n_features,)
parameter vector (w in the formulation formula)
intercept_ : float
independent term in decision function
coef_path_ : array, shape (n_features, n_alphas)
the varying values of the coefficients along the path
alpha_ : float
the estimated regularization parameter alpha
alphas_ : array, shape (n_alphas,)
the different values of alpha along the path
cv_alphas_ : array, shape (n_cv_alphas,)
all the values of alpha along the path for the different folds
cv_mse_path_ : array, shape (n_folds, n_cv_alphas)
the mean square error on left-out for each fold along the path
(alpha values given by ``cv_alphas``)
n_iter_ : array-like or int
the number of iterations run by Lars with the optimal alpha.
See also
--------
lars_path, LassoLars, LassoLarsCV
"""
method = 'lar'
def __init__(self, fit_intercept=True, verbose=False, max_iter=500,
normalize=True, precompute='auto', cv=None,
max_n_alphas=1000, n_jobs=1, eps=np.finfo(np.float).eps,
copy_X=True, positive=False):
self.fit_intercept = fit_intercept
self.positive = positive
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.precompute = precompute
self.copy_X = copy_X
self.cv = cv
self.max_n_alphas = max_n_alphas
self.n_jobs = n_jobs
self.eps = eps
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,)
Target values.
Returns
-------
self : object
returns an instance of self.
"""
self.fit_path = True
X, y = check_X_y(X, y, y_numeric=True)
X = as_float_array(X, copy=self.copy_X)
y = as_float_array(y, copy=self.copy_X)
# init cross-validation generator
cv = check_cv(self.cv, classifier=False)
Gram = 'auto' if self.precompute else None
cv_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)(
delayed(_lars_path_residues)(
X[train], y[train], X[test], y[test], Gram=Gram, copy=False,
method=self.method, verbose=max(0, self.verbose - 1),
normalize=self.normalize, fit_intercept=self.fit_intercept,
max_iter=self.max_iter, eps=self.eps, positive=self.positive)
for train, test in cv.split(X, y))
all_alphas = np.concatenate(list(zip(*cv_paths))[0])
# Unique also sorts
all_alphas = np.unique(all_alphas)
# Take at most max_n_alphas values
stride = int(max(1, int(len(all_alphas) / float(self.max_n_alphas))))
all_alphas = all_alphas[::stride]
mse_path = np.empty((len(all_alphas), len(cv_paths)))
for index, (alphas, active, coefs, residues) in enumerate(cv_paths):
alphas = alphas[::-1]
residues = residues[::-1]
if alphas[0] != 0:
alphas = np.r_[0, alphas]
residues = np.r_[residues[0, np.newaxis], residues]
if alphas[-1] != all_alphas[-1]:
alphas = np.r_[alphas, all_alphas[-1]]
residues = np.r_[residues, residues[-1, np.newaxis]]
this_residues = interpolate.interp1d(alphas,
residues,
axis=0)(all_alphas)
this_residues **= 2
mse_path[:, index] = np.mean(this_residues, axis=-1)
mask = np.all(np.isfinite(mse_path), axis=-1)
all_alphas = all_alphas[mask]
mse_path = mse_path[mask]
# Select the alpha that minimizes left-out error
i_best_alpha = np.argmin(mse_path.mean(axis=-1))
best_alpha = all_alphas[i_best_alpha]
# Store our parameters
self.alpha_ = best_alpha
self.cv_alphas_ = all_alphas
self.cv_mse_path_ = mse_path
# Now compute the full model
# it will call a lasso internally when self if LassoLarsCV
# as self.method == 'lasso'
Lars.fit(self, X, y)
return self
@property
def alpha(self):
# impedance matching for the above Lars.fit (should not be documented)
return self.alpha_
class LassoLarsCV(LarsCV):
"""Cross-validated Lasso, using the LARS algorithm
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
positive : boolean (default=False)
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
Under the positive restriction the model coefficients do not converge
to the ordinary-least-squares solution for small values of alpha.
Only coeffiencts up to the smallest alpha value (``alphas_[alphas_ >
0.].min()`` when fit_path=True) reached by the stepwise Lars-Lasso
algorithm are typically in congruence with the solution of the
coordinate descent Lasso estimator.
As a consequence using LassoLarsCV only makes sense for problems where
a sparse solution is expected and/or reached.
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
This parameter is ignored when `fit_intercept` is set to False.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
`preprocessing.StandardScaler` before calling `fit` on an estimator
with `normalize=False`.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
max_n_alphas : integer, optional
The maximum number of points on the path used to compute the
residuals in the cross-validation
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
Attributes
----------
coef_ : array, shape (n_features,)
parameter vector (w in the formulation formula)
intercept_ : float
independent term in decision function.
coef_path_ : array, shape (n_features, n_alphas)
the varying values of the coefficients along the path
alpha_ : float
the estimated regularization parameter alpha
alphas_ : array, shape (n_alphas,)
the different values of alpha along the path
cv_alphas_ : array, shape (n_cv_alphas,)
all the values of alpha along the path for the different folds
cv_mse_path_ : array, shape (n_folds, n_cv_alphas)
the mean square error on left-out for each fold along the path
(alpha values given by ``cv_alphas``)
n_iter_ : array-like or int
the number of iterations run by Lars with the optimal alpha.
Notes
-----
The object solves the same problem as the LassoCV object. However,
unlike the LassoCV, it find the relevant alphas values by itself.
In general, because of this property, it will be more stable.
However, it is more fragile to heavily multicollinear datasets.
It is more efficient than the LassoCV if only a small number of
features are selected compared to the total number, for instance if
there are very few samples compared to the number of features.
See also
--------
lars_path, LassoLars, LarsCV, LassoCV
"""
method = 'lasso'
class LassoLarsIC(LassoLars):
"""Lasso model fit with Lars using BIC or AIC for model selection
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
AIC is the Akaike information criterion and BIC is the Bayes
Information criterion. Such criteria are useful to select the value
of the regularization parameter by making a trade-off between the
goodness of fit and the complexity of the model. A good model should
explain well the data while being simple.
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
criterion : 'bic' | 'aic'
The type of criterion to use.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
positive : boolean (default=False)
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
Under the positive restriction the model coefficients do not converge
to the ordinary-least-squares solution for small values of alpha.
Only coeffiencts up to the smallest alpha value (``alphas_[alphas_ >
0.].min()`` when fit_path=True) reached by the stepwise Lars-Lasso
algorithm are typically in congruence with the solution of the
coordinate descent Lasso estimator.
As a consequence using LassoLarsIC only makes sense for problems where
a sparse solution is expected and/or reached.
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
This parameter is ignored when `fit_intercept` is set to False.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
`preprocessing.StandardScaler` before calling `fit` on an estimator
with `normalize=False`.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform. Can be used for
early stopping.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
Attributes
----------
coef_ : array, shape (n_features,)
parameter vector (w in the formulation formula)
intercept_ : float
independent term in decision function.
alpha_ : float
the alpha parameter chosen by the information criterion
n_iter_ : int
number of iterations run by lars_path to find the grid of
alphas.
criterion_ : array, shape (n_alphas,)
The value of the information criteria ('aic', 'bic') across all
alphas. The alpha which has the smallest information criteria
is chosen.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.LassoLarsIC(criterion='bic')
>>> clf.fit([[-1, 1], [0, 0], [1, 1]], [-1.1111, 0, -1.1111])
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
LassoLarsIC(copy_X=True, criterion='bic', eps=..., fit_intercept=True,
max_iter=500, normalize=True, positive=False, precompute='auto',
verbose=False)
>>> print(clf.coef_) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[ 0. -1.11...]
Notes
-----
The estimation of the number of degrees of freedom is given by:
"On the degrees of freedom of the lasso"
Hui Zou, Trevor Hastie, and Robert Tibshirani
Ann. Statist. Volume 35, Number 5 (2007), 2173-2192.
http://en.wikipedia.org/wiki/Akaike_information_criterion
http://en.wikipedia.org/wiki/Bayesian_information_criterion
See also
--------
lars_path, LassoLars, LassoLarsCV
"""
def __init__(self, criterion='aic', fit_intercept=True, verbose=False,
normalize=True, precompute='auto', max_iter=500,
eps=np.finfo(np.float).eps, copy_X=True, positive=False):
self.criterion = criterion
self.fit_intercept = fit_intercept
self.positive = positive
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.copy_X = copy_X
self.precompute = precompute
self.eps = eps
def fit(self, X, y, copy_X=True):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
training data.
y : array-like, shape (n_samples,)
target values.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
Returns
-------
self : object
returns an instance of self.
"""
self.fit_path = True
X, y = check_X_y(X, y, y_numeric=True)
X, y, Xmean, ymean, Xstd = LinearModel._preprocess_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
max_iter = self.max_iter
Gram = self._get_gram()
alphas_, active_, coef_path_, self.n_iter_ = lars_path(
X, y, Gram=Gram, copy_X=copy_X, copy_Gram=True, alpha_min=0.0,
method='lasso', verbose=self.verbose, max_iter=max_iter,
eps=self.eps, return_n_iter=True, positive=self.positive)
n_samples = X.shape[0]
if self.criterion == 'aic':
K = 2 # AIC
elif self.criterion == 'bic':
K = log(n_samples) # BIC
else:
raise ValueError('criterion should be either bic or aic')
R = y[:, np.newaxis] - np.dot(X, coef_path_) # residuals
mean_squared_error = np.mean(R ** 2, axis=0)
df = np.zeros(coef_path_.shape[1], dtype=np.int) # Degrees of freedom
for k, coef in enumerate(coef_path_.T):
mask = np.abs(coef) > np.finfo(coef.dtype).eps
if not np.any(mask):
continue
# get the number of degrees of freedom equal to:
# Xc = X[:, mask]
# Trace(Xc * inv(Xc.T, Xc) * Xc.T) ie the number of non-zero coefs
df[k] = np.sum(mask)
self.alphas_ = alphas_
with np.errstate(divide='ignore'):
self.criterion_ = n_samples * np.log(mean_squared_error) + K * df
n_best = np.argmin(self.criterion_)
self.alpha_ = alphas_[n_best]
self.coef_ = coef_path_[:, n_best]
self._set_intercept(Xmean, ymean, Xstd)
return self
| bsd-3-clause |
tom-f-oconnell/multi_tracker | multi_tracker_analysis/read_hdf5_file_to_pandas.py | 1 | 27945 |
import copy
import os
import imp
import pickle
import warnings
import time
import inspect
import warnings
# used?
import types
import numpy as np
import h5py
import pandas
import scipy.interpolate
def get_filenames(path, contains, does_not_contain=['~', '.pyc']):
cmd = 'ls ' + '"' + path + '"'
ls = os.popen(cmd).read()
all_filelist = ls.split('\n')
try:
all_filelist.remove('')
except:
pass
filelist = []
for i, filename in enumerate(all_filelist):
if contains in filename:
fileok = True
for nc in does_not_contain:
if nc in filename:
fileok = False
if fileok:
filelist.append( os.path.join(path, filename) )
return filelist
def get_filename(path, contains, does_not_contain=['~', '.pyc'], verbose=False):
filelist = get_filenames(path, contains, does_not_contain)
if len(filelist) == 1:
return filelist[0]
elif len(filelist) > 0 and 'bgimg' in contains:
pick = sorted(filelist)[-1]
print('Found multiple background images, using ' + str(pick))
return pick
else:
# TODO rewrite to exclude default printing, and call on check?
print 'Found too many, or too few files, with ' + str(contains) + \
' and without ' + str(does_not_contain)
return None
def load_bag_as_hdf5(bag, skip_messages=[]):
output_fname = bag.split('.')[0] + '.hdf5'
print output_fname
if not os.path.exists(output_fname):
mta.bag2hdf5.bag2hdf5( bag,
output_fname,
max_strlen=200,
skip_messages=skip_messages)
metadata = h5py.File(output_fname, 'r')
return metadata
class Trajectory(object):
def __init__(self, pd, objid, functions=None):
self.pd = pd[pd['objid']==objid]
for column in self.pd.columns:
self.__setattr__(column, self.pd[column].values)
if functions is not None:
self.__attach_analysis_functions__(functions)
def __getitem__(self, key): # trajec attributes can be accessed just like a dictionary this way
return self.__getattribute__(key)
class Dataset(object):
def __init__(self, pd, path=None, save=False, convert_to_units=False, annotations=None):
'''
highly recommended to provide directory path
convert_to_units requires that path is given, and that path contains a config file, which has attributes:
- pixels_per_mm (or pixels_per_cm, etc)
- position_zero = [x, y] # the x and y pixels of position zero
- frames_per_second defined
'''
self.pd = pd
self.keys = []
self.__processed_trajecs__ = {}
self.save = save
self.path = path
self.annotations = annotations
self.units = {'length': 'pixels', 'speed': 'pixels per frame'}
if path is not None:
if convert_to_units:
self.load_config()
pixels_per_unit_key = []
for key in self.config.__dict__.keys():
if 'pixels_per_' in key:
pixels_per_unit_key = key
self.units['length'] = key.split('pixels_per_')[1]
self.units['speed'] = self.units['length'] + ' per second'
break
self.pixels_per_unit = self.config.__dict__[pixels_per_unit_key]
self.frames_per_second = self.config.frames_per_second
self.convert_to_units()
self.set_dataset_filename()
if save:
self.load_keys()
self.copy_trajectory_objects_to_dataset()
del(self.pd)
self.pd = None
print
print 'Dataset loaded as a stand alone object - to save your dataset, use: '
print 'dataset.save_dataset()'
print
print ' -- OR -- '
print
print 'del (dataset.config)'
print 'import pickle'
print 'f = open(dataset.dataset_filename, "w+")'
print 'pickle.dump(dataset, f)'
print 'f.close()'
def set_dataset_filename(self):
raw_data_filename = get_filename(self.path, 'trackedobjects.hdf5')
self.dataset_filename = raw_data_filename.split('trackedobjects.hdf5')[0] + 'trackedobjects_dataset.pickle'
def convert_to_units(self):
self.pd.position_x = (self.pd.position_x-self.config.position_zero[0])/float(self.pixels_per_unit)
self.pd.position_y = (self.pd.position_y-self.config.position_zero[1])/float(self.pixels_per_unit)
self.pd.speed = self.pd.speed/float(self.pixels_per_unit)*self.frames_per_second
self.pd.velocity_x = self.pd.velocity_x/float(self.pixels_per_unit)*self.frames_per_second
self.pd.velocity_y = self.pd.velocity_y/float(self.pixels_per_unit)*self.frames_per_second
def load_config(self):
self.config = load_config_from_path(self.path)
def save_dataset(self):
try:
del(self.config)
except:
pass
f = open(self.dataset_filename, "w+")
pickle.dump(self, f)
f.close()
def trajec(self, key):
if self.pd is not None:
trajec = Trajectory(self.pd, key)
return trajec
else:
return self.trajecs[key]
#raise ValueError('This is a saved dataset, use dict access: Dataset.trajecs[key] for data')
def framestamp_to_timestamp(self, frame):
t = self.pd.ix[frame]['time_epoch']
try:
return t.iloc[0]
except:
return t
def timestamp_to_framestamp(self, t):
first_time = self.pd['time_epoch'].values[0]
first_frame = self.pd['frames'].values[0]
last_time = self.pd['time_epoch'].values[-1]
last_frame = self.pd['frames'].values[-1]
func = scipy.interpolate.interp1d([first_time, last_time],[first_frame, last_frame])
return int(func(t))
def load_keys(self, keys=None):
if self.annotations is None:
if keys is None:
self.keys = np.unique(self.pd.objid).tolist()
else:
self.keys = keys
else:
self.keys = []
for key, note in self.annotations.items():
if 'confirmed' in note['notes']:
self.keys.append(key)
def copy_trajectory_objects_to_dataset(self):
self.trajecs = {}
for key in self.keys:
trajec = copy.copy( Trajectory(self.pd, key) )
self.trajecs.setdefault(key, trajec)
def calculate_function_for_all_trajecs(self, function):
for key, trajec in self.trajecs.items():
function(trajec)
def remove_zero_length_objects(self):
if 'trajecs' in self.__dict__:
for key, trajec in self.trajecs.items():
if len(trajec.speed) == 0:
try:
del(self.trajecs[key])
except:
pass
try:
self.keys.remove(key)
except:
pass
for key in self.keys:
if key not in self.trajecs.keys():
self.keys.remove(key)
else:
warnings.warn('remove zero length objects only works on copyied datasets')
def has_zero_length_objects(self):
if 'trajecs' in self.__dict__:
for key, trajec in self.trajecs.items():
if len(trajec.speed) == 0:
return True
for key in self.keys:
if key not in self.trajecs.keys():
return True
return False
else:
warnings.warn('remove zero length objects only works on copyied datasets')
def load_dataset_from_path(path, load_saved=False, convert_to_units=True, use_annotations=True):
'''
load_saved only recommended for reasonably sized datasets, < 500 mb
convert_to_units - see Dataset; converts pixels and frames to mm (or cm) and seconds, based on config
'''
if load_saved:
data_filename = get_filename(path, 'trackedobjects_dataset.pickle')
if data_filename is not None:
print data_filename
delete_cut_join_instructions_filename = get_filename(path, 'delete_cut_join_instructions.pickle')
epoch_time_when_dcjif_modified = os.path.getmtime(delete_cut_join_instructions_filename)
epoch_time_when_dataset_modified = os.path.getmtime(data_filename)
if epoch_time_when_dcjif_modified > epoch_time_when_dataset_modified:
print 'Delete cut join instructions modified - recalculating new dataset'
else:
f = open(data_filename)
dataset = pickle.load(f)
f.close()
# check path
# an issue if the files get moved around
if dataset.path != path:
dataset.path = path
dataset.set_dataset_filename()
if dataset.has_zero_length_objects():
dataset.remove_zero_length_objects()
dataset.save_dataset()
print 'Loaded cached dataset last modified: '
print time.localtime(epoch_time_when_dataset_modified)
print
return dataset
else:
print 'Could not find cached dataset in path: '
print path
print ' Loading dataset from raw data now...'
data_filename = get_filename(path, 'trackedobjects.hdf5')
pd, config = load_and_preprocess_data(data_filename)
if use_annotations:
annotations_file = open(get_filename(path, 'annotations'))
annotations = pickle.load(annotations_file)
annotations_file.close()
else:
annotations = None
# if load_saved is True, copy the dataset, so it can be cached
dataset = Dataset(pd, path=path,
save=load_saved,
convert_to_units=convert_to_units,
annotations=annotations)
if load_saved:
dataset.remove_zero_length_objects()
dataset.save_dataset()
return dataset
# TODO make methods like this "private". anywhere else accidentally calling it?
# (gui v2 was)
# TODO fix / suppress FutureWarning generated in here (was it here or gui in
# background?)
def load_data_as_pandas_dataframe_from_hdf5_file(filename, attributes=None):
# TODO
"""
"""
if '.pickle' in filename:
pd = pandas.read_pickle(filename)
return pd
try:
data = h5py.File(filename, 'r', swmr=True)['data']
except ValueError:
data = h5py.File(filename, 'r', swmr=False)['data']
if attributes is None:
attributes = {
'objid' : 'objid',
'time_epoch_secs' : 'header.stamp.secs',
'time_epoch_nsecs': 'header.stamp.nsecs',
'position_x' : 'position.x',
'position_y' : 'position.y',
'measurement_x' : 'measurement.x',
'measurement_y' : 'measurement.y',
'velocity_x' : 'velocity.x',
'velocity_y' : 'velocity.y',
'angle' : 'angle',
'frames' : 'header.frame_id',
'area' : 'size',
}
index = data['header.frame_id'].flat
d = {}
for attribute, name in attributes.items():
d.setdefault(attribute, data[name].flat)
pd = pandas.DataFrame(d, index=index)
# delete 0 frames (frames with no data)
# TODO is this really working as expected?
pd = pd.drop(pd.index == [0])
pd = calc_additional_columns(pd)
return pd
# TODO TODO clarify exactly when any functions convert units. I don't really
# want it to do the conversion silently just based on whether a config file
# exists and has a certain value. At least include option to never convert
# units / another function.
def load_and_preprocess_data(hdf5_filename):
'''
Requires that a configuration file be found in the same directory as the
hdf5 file, with the same prefix.
Returns: pandas dataframe, processed according to configuration file,
and the configuration file instance.
'''
if 'trackedobjects' not in hdf5_filename:
print 'File is not a trackedobjects file, looking for a trackedobjects file in this directory'
fname = get_filename(hdf5_filename, 'trackedobjects.hdf5')
if fname is not None:
hdf5_filename = fname
print 'Found: ', fname
else:
raise ValueError('Could not find trackedobjects.hdf5 file')
print 'in func w/ long name'
pd = load_data_as_pandas_dataframe_from_hdf5_file(hdf5_filename, attributes=None)
print 'done with long named function'
hdf5_basename = os.path.basename(hdf5_filename)
directory = os.path.dirname(hdf5_filename)
identifiercode = hdf5_basename.split('_trackedobjects')[0]
config_filename = 'config_' + identifiercode + '.py'
print 'looking for', config_filename, 'in', directory
config_filename = get_filename(directory, config_filename)
print 'done looking for config file'
# TODO shouldn't this be calling load_config_from_path?
if config_filename is not None:
Config = imp.load_source('Config', config_filename)
print 'creating config object'
config = Config.Config(directory, identifiercode)
print 'done creating config object'
if config.__dict__.has_key('preprocess_data_function'):
pd = config.__getattribute__('preprocess_data_function')(pd)
print 'done preprocessing data'
else:
config = None
return pd, config
def load_config_from_path(path):
config_filename = get_filename(path, 'config')
try:
hdf5_file = os.path.basename(get_filename(path, 'trackedobjects.hdf5'))
identifiercode = hdf5_file.split('_trackedobjects')[0]
except:
config_file_basename = os.path.basename(config_filename)
identifiercode = config_file_basename.split('config_')[1].split('.py')[0]
print 'identifiercode: ', identifiercode
if config_filename is not None:
Config = imp.load_source('Config', config_filename)
config = Config.Config(path, identifiercode)
else:
config = None
return config
def load_data_selection_from_path(path):
filename = get_filename(path, contains='dataframe_')
pd = pandas.read_pickle(filename)
config = load_config_from_path(os.path.dirname(path))
return pd, config
def find_instructions_related_to_objid(instructions, objid):
for i, instruction in enumerate(instructions):
if 'new_objid' in instruction.keys():
if objid == instruction['new_objid']:
print i
if 'objids' in instruction.keys():
if objid in instruction['objids']:
print i
def delete_cut_join_trajectories_according_to_instructions(pd, instructions, interpolate_joined_trajectories=True):
if type(instructions) is str:
f = open(instructions)
instructions = pickle.load(f)
f.close()
elif type(instructions) is not list:
instructions = [instructions]
def get_proper_order_of_objects(dataset, keys):
trajecs = []
ts = []
goodkeys = []
for key in keys:
trajec = dataset.trajec(key)
if len(trajec.speed) > 0:
trajecs.append(trajec)
ts.append(trajec.time_epoch[0])
goodkeys.append(key)
order = np.argsort(ts)
return np.array(goodkeys)[order]
def get_indices_to_use_for_interpolation(key1, key2):
length_key1 = len(dataset.trajec(key1).position_x)
first_index_key1 = np.max( [length_key1-4, 0] )
indices_key1 = np.arange( first_index_key1, length_key1 )
length_key2 = len(dataset.trajec(key2).position_x)
last_index_key2 = np.min( [length_key2, 0+4] )
indices_key2 = np.arange( 0, last_index_key2 )
return indices_key1, indices_key2
for instruction in instructions:
if instruction['action'] == 'delete':
#pass
pd = pd[pd.objid!=instruction['objid']]
elif instruction['action'] == 'cut':
mask = (pd['objid']==instruction['objid']) & (pd['frames']>instruction['cut_frame_global'])
pd.loc[mask,'objid'] = instruction['new_objid']
elif instruction['action'] == 'join':
if interpolate_joined_trajectories is False:
for key in instruction['objids']:
mask = pd['objid']==key
if 'new_objid' in instruction.keys():
print '*** ASSIGNING NEW OBJID: ', instruction['new_objid']
pd.loc[mask,'objid'] = instruction['new_objid']
else:
warnings.warn("Warning: using old join method; not using unique objid numbers")
pd.loc[mask,'objid'] = instruction['objids'][0]
elif interpolate_joined_trajectories is True:
dataset = Dataset(pd)
keys = get_proper_order_of_objects(dataset, instruction['objids'])
for k, key in enumerate(keys[0:-1]):
dataset = Dataset(pd)
last_frame = dataset.trajec(keys[k]).frames[-1]
first_frame = dataset.trajec(keys[k+1]).frames[0]
if first_frame < last_frame: # overlap between objects, keep the second object's data, since the first is likely bad kalman projections
mask = np.invert( (pd['objid']==keys[k]) & (pd['frames']>=first_frame) )
pd = pd[mask]
else:
frames_to_interpolate = np.arange(last_frame+1, first_frame)
if len(frames_to_interpolate) > 0:
indices_key1, indices_key2 = get_indices_to_use_for_interpolation(keys[k], keys[k+1])
x = np.hstack((dataset.trajec(keys[k]).frames[indices_key1], dataset.trajec(keys[k+1]).frames[indices_key2]))
new_pd_dict = {attribute: None for attribute in pd.columns}
new_pd_dict.setdefault('interpolated', None)
index = frames_to_interpolate
if 'data_to_add' in instruction.keys():
data_to_add_frames = []
data_to_add_x = []
data_to_add_y = []
for index, data_to_add in enumerate(instruction['data_to_add']):
frame_for_data_to_add = dataset.timestamp_to_framestamp(data_to_add[0])
print frame_for_data_to_add, last_frame, first_frame
if frame_for_data_to_add > last_frame and frame_for_data_to_add < first_frame:
data_to_add_frames.append(frame_for_data_to_add)
data_to_add_x.append(data_to_add[1])
data_to_add_y.append(data_to_add[2])
order = np.argsort(data_to_add_frames)
data_to_add_frames = np.array(data_to_add_frames)[order]
data_to_add_x = np.array(data_to_add_x)[order]
data_to_add_y = np.array(data_to_add_y)[order]
for attribute in pd.columns:
if attribute == 'objid':
attribute_values = [keys[0] for f in frames_to_interpolate]
elif attribute == 'frames':
attribute_values = frames_to_interpolate
else:
y = np.hstack((dataset.trajec(keys[k])[attribute][indices_key1], dataset.trajec(keys[k+1])[attribute][indices_key2]))
if 'data_to_add' in instruction.keys():
if 'position' in attribute:
x_with_added_data = np.hstack((x, data_to_add_frames))
if attribute == 'position_x':
y_with_added_data = np.hstack((y, data_to_add_y))
elif attribute == 'position_y':
y_with_added_data = np.hstack((y, data_to_add_x))
order = np.argsort(x_with_added_data)
x_with_added_data = x_with_added_data[order]
y_with_added_data = y_with_added_data[order]
func = scipy.interpolate.interp1d(x_with_added_data,y_with_added_data)
else:
func = scipy.interpolate.interp1d(x,y)
else:
func = scipy.interpolate.interp1d(x,y)
attribute_values = func(frames_to_interpolate)
new_pd_dict[attribute] = attribute_values
interpolated_values = np.ones_like(new_pd_dict['position_x'])
new_pd_dict['interpolated'] = interpolated_values
#return pd, new_pd_dict, frames_to_interpolate
new_pd = pandas.DataFrame(new_pd_dict, index=frames_to_interpolate)
pd = pandas.concat([pd, new_pd])
pd = pd.sort_index()
for key in instruction['objids']:
mask = pd['objid']==key
if 'new_objid' in instruction.keys():
print '*** ASSIGNING NEW OBJID: ', key, ' to : ', instruction['new_objid']
pd.loc[mask,'objid'] = instruction['new_objid']
else:
warnings.warn("Warning: using old join method; not using unique objid numbers")
pd.loc[mask,'objid'] = instruction['objids'][0]
return pd
def calc_additional_columns(pd):
pd['time_epoch'] = pd['time_epoch_secs'] + pd['time_epoch_nsecs']*1e-9
pd['speed'] = np.linalg.norm( [pd['velocity_x'], pd['velocity_y']], axis=0 )
return pd
def framestamp_to_timestamp(pd, frame):
return pd.ix[frame]['time_epoch'].iloc[0]
def timestamp_to_framestamp(pd, t):
pd_subset = pd[pd['time_epoch_secs']==np.floor(t)]
return np.argmin(pd_subset['time_epoch'] - t)
def pixels_to_units(pd, pixels_per_unit, center=[0,0]):
attributes = ['speed',
'position_x',
'position_y',
'velocity_x',
'velocity_y',
]
for attribute in attributes:
pd[attribute] = pd[attribute] / pixels_per_unit
return pd
def load_multiple_datasets_into_single_pandas_data_frame(filenames, sync_frames=None):
'''
filenames - list of hdf5 files to load, full path name
sync_frames - list of frames, one for each filename, these sync_frames will all be set to zero
defaults to using first frame for each dataset as sync
'''
pds = [load_data_as_pandas_dataframe_from_hdf5_file(filename) for filename in filenames]
if sync_frames is None:
sync_frames = [np.min(pd.frames) for pd in pds]
for i, pd in enumerate(pds):
pd.index -= sync_frames[i]
pd.frames -= sync_frames[i]
combined_pd = pandas.concat(pds)
return combined_pd
def cull_short_trajectories(pd, min_length=4):
key_length_dict = get_objid_lengths(pd)
keys, lengths = zip(*key_length_dict.items())
keys = list(keys)
lengths = list(lengths)
indices = np.where(np.array(lengths)>min_length)[0]
keys_ok = np.array(keys)[indices]
culled_pd = pd.query('objid in @keys_ok')
print('Removing ' + str(len(keys) - len(indices)) + ' trajectories below minimum length')
return culled_pd
def compare_objids_from_two_dataframes(pd1, pd2):
objids_1 = np.unique(pd1.objid.values)
objids_2 = np.unique(pd2.objid.values)
unique_to_1 = [k for k in objids_1 if k not in objids_2]
unique_to_2 = [k for k in objids_2 if k not in objids_1]
return unique_to_1, unique_to_2
def cull_trajectories_that_do_not_cover_much_ground(pd, min_distance_travelled=10, print_keys=False):
distance_travelled = pd.speed.groupby(pd.objid).agg('sum')
indices = np.where(distance_travelled > min_distance_travelled)[0]
objids = distance_travelled.index[indices]
indices_where_object_acceptable = pd.objid.isin(objids)
culled_pd = pd[indices_where_object_acceptable]
print('Removing ' + str(len(pd.index) - len(culled_pd.index)) + ' trajectories ' + \
'not covering enough distance.')
return culled_pd
def cull_trajectories_that_do_not_cover_much_x_or_y_distance(pd, min_distance_travelled=10):
min_x = pd.position_x.groupby(pd.objid).agg('min')
max_x = pd.position_x.groupby(pd.objid).agg('max')
distance_travelled = max_x - min_x
indices = np.where(distance_travelled > min_distance_travelled)[0]
objids = distance_travelled.index[indices]
indices_where_object_acceptable = pd.objid.isin(objids)
culled_pd = pd[indices_where_object_acceptable]
pd = culled_pd
min_y = pd.position_y.groupby(pd.objid).agg('min')
max_y = pd.position_y.groupby(pd.objid).agg('max')
distance_travelled = max_y - min_y
indices = np.where(distance_travelled > min_distance_travelled)[0]
objids = distance_travelled.index[indices]
indices_where_object_acceptable = pd.objid.isin(objids)
culled_pd = pd[indices_where_object_acceptable]
print('Removing ' + str(len(pd.index) - len(culled_pd.index)) + \
' trajectories not covering enough x or y distance.')
return culled_pd
def get_objid_lengths(pd, objid_attribute='objid'):
keys = np.unique(pd[objid_attribute])
lengths = np.bincount(pd[objid_attribute])
true_lengths = lengths[np.nonzero(lengths)[0]]
key_length_dict = dict(zip(keys,true_lengths))
return key_length_dict
def remove_rows_above_speed_threshold(pd, speed_threshold=10):
q = 'speed < ' + str(speed_threshold)
pd_q = pd.query(q)
print('Removing ' + str(len(pd.index) - len(pd_q.index)) + \
' trajectories above speed threshold.')
return pd_q
def remove_objects_that_never_exceed_minimum_speed(pd, speed_threshold=1):
speeds = pd.speed.groupby(pd.objid).max()
keysok = np.where(speeds.values > speed_threshold)
objidsok = speeds.iloc[keysok].index
pd_q = pd.query('objid in @objidsok')
print('Removing ' + str(len(pd.index) - len(pd_q.index)) + ' trajectories never exceeding ' + \
'minimum speed.')
return pd_q
| mit |
ch3ll0v3k/scikit-learn | sklearn/cluster/tests/test_bicluster.py | 226 | 9457 | """Testing for Spectral Biclustering methods"""
import numpy as np
from scipy.sparse import csr_matrix, issparse
from sklearn.grid_search import ParameterGrid
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import SkipTest
from sklearn.base import BaseEstimator, BiclusterMixin
from sklearn.cluster.bicluster import SpectralCoclustering
from sklearn.cluster.bicluster import SpectralBiclustering
from sklearn.cluster.bicluster import _scale_normalize
from sklearn.cluster.bicluster import _bistochastic_normalize
from sklearn.cluster.bicluster import _log_normalize
from sklearn.metrics import consensus_score
from sklearn.datasets import make_biclusters, make_checkerboard
class MockBiclustering(BaseEstimator, BiclusterMixin):
# Mock object for testing get_submatrix.
def __init__(self):
pass
def get_indices(self, i):
# Overridden to reproduce old get_submatrix test.
return (np.where([True, True, False, False, True])[0],
np.where([False, False, True, True])[0])
def test_get_submatrix():
data = np.arange(20).reshape(5, 4)
model = MockBiclustering()
for X in (data, csr_matrix(data), data.tolist()):
submatrix = model.get_submatrix(0, X)
if issparse(submatrix):
submatrix = submatrix.toarray()
assert_array_equal(submatrix, [[2, 3],
[6, 7],
[18, 19]])
submatrix[:] = -1
if issparse(X):
X = X.toarray()
assert_true(np.all(X != -1))
def _test_shape_indices(model):
# Test get_shape and get_indices on fitted model.
for i in range(model.n_clusters):
m, n = model.get_shape(i)
i_ind, j_ind = model.get_indices(i)
assert_equal(len(i_ind), m)
assert_equal(len(j_ind), n)
def test_spectral_coclustering():
# Test Dhillon's Spectral CoClustering on a simple problem.
param_grid = {'svd_method': ['randomized', 'arpack'],
'n_svd_vecs': [None, 20],
'mini_batch': [False, True],
'init': ['k-means++'],
'n_init': [10],
'n_jobs': [1]}
random_state = 0
S, rows, cols = make_biclusters((30, 30), 3, noise=0.5,
random_state=random_state)
S -= S.min() # needs to be nonnegative before making it sparse
S = np.where(S < 1, 0, S) # threshold some values
for mat in (S, csr_matrix(S)):
for kwargs in ParameterGrid(param_grid):
model = SpectralCoclustering(n_clusters=3,
random_state=random_state,
**kwargs)
model.fit(mat)
assert_equal(model.rows_.shape, (3, 30))
assert_array_equal(model.rows_.sum(axis=0), np.ones(30))
assert_array_equal(model.columns_.sum(axis=0), np.ones(30))
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
_test_shape_indices(model)
def test_spectral_biclustering():
# Test Kluger methods on a checkerboard dataset.
S, rows, cols = make_checkerboard((30, 30), 3, noise=0.5,
random_state=0)
non_default_params = {'method': ['scale', 'log'],
'svd_method': ['arpack'],
'n_svd_vecs': [20],
'mini_batch': [True]}
for mat in (S, csr_matrix(S)):
for param_name, param_values in non_default_params.items():
for param_value in param_values:
model = SpectralBiclustering(
n_clusters=3,
n_init=3,
init='k-means++',
random_state=0,
)
model.set_params(**dict([(param_name, param_value)]))
if issparse(mat) and model.get_params().get('method') == 'log':
# cannot take log of sparse matrix
assert_raises(ValueError, model.fit, mat)
continue
else:
model.fit(mat)
assert_equal(model.rows_.shape, (9, 30))
assert_equal(model.columns_.shape, (9, 30))
assert_array_equal(model.rows_.sum(axis=0),
np.repeat(3, 30))
assert_array_equal(model.columns_.sum(axis=0),
np.repeat(3, 30))
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
_test_shape_indices(model)
def _do_scale_test(scaled):
"""Check that rows sum to one constant, and columns to another."""
row_sum = scaled.sum(axis=1)
col_sum = scaled.sum(axis=0)
if issparse(scaled):
row_sum = np.asarray(row_sum).squeeze()
col_sum = np.asarray(col_sum).squeeze()
assert_array_almost_equal(row_sum, np.tile(row_sum.mean(), 100),
decimal=1)
assert_array_almost_equal(col_sum, np.tile(col_sum.mean(), 100),
decimal=1)
def _do_bistochastic_test(scaled):
"""Check that rows and columns sum to the same constant."""
_do_scale_test(scaled)
assert_almost_equal(scaled.sum(axis=0).mean(),
scaled.sum(axis=1).mean(),
decimal=1)
def test_scale_normalize():
generator = np.random.RandomState(0)
X = generator.rand(100, 100)
for mat in (X, csr_matrix(X)):
scaled, _, _ = _scale_normalize(mat)
_do_scale_test(scaled)
if issparse(mat):
assert issparse(scaled)
def test_bistochastic_normalize():
generator = np.random.RandomState(0)
X = generator.rand(100, 100)
for mat in (X, csr_matrix(X)):
scaled = _bistochastic_normalize(mat)
_do_bistochastic_test(scaled)
if issparse(mat):
assert issparse(scaled)
def test_log_normalize():
# adding any constant to a log-scaled matrix should make it
# bistochastic
generator = np.random.RandomState(0)
mat = generator.rand(100, 100)
scaled = _log_normalize(mat) + 1
_do_bistochastic_test(scaled)
def test_fit_best_piecewise():
model = SpectralBiclustering(random_state=0)
vectors = np.array([[0, 0, 0, 1, 1, 1],
[2, 2, 2, 3, 3, 3],
[0, 1, 2, 3, 4, 5]])
best = model._fit_best_piecewise(vectors, n_best=2, n_clusters=2)
assert_array_equal(best, vectors[:2])
def test_project_and_cluster():
model = SpectralBiclustering(random_state=0)
data = np.array([[1, 1, 1],
[1, 1, 1],
[3, 6, 3],
[3, 6, 3]])
vectors = np.array([[1, 0],
[0, 1],
[0, 0]])
for mat in (data, csr_matrix(data)):
labels = model._project_and_cluster(data, vectors,
n_clusters=2)
assert_array_equal(labels, [0, 0, 1, 1])
def test_perfect_checkerboard():
raise SkipTest("This test is failing on the buildbot, but cannot"
" reproduce. Temporarily disabling it until it can be"
" reproduced and fixed.")
model = SpectralBiclustering(3, svd_method="arpack", random_state=0)
S, rows, cols = make_checkerboard((30, 30), 3, noise=0,
random_state=0)
model.fit(S)
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
S, rows, cols = make_checkerboard((40, 30), 3, noise=0,
random_state=0)
model.fit(S)
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
S, rows, cols = make_checkerboard((30, 40), 3, noise=0,
random_state=0)
model.fit(S)
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
def test_errors():
data = np.arange(25).reshape((5, 5))
model = SpectralBiclustering(n_clusters=(3, 3, 3))
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_clusters='abc')
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_clusters=(3, 'abc'))
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(method='unknown')
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(svd_method='unknown')
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_components=0)
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_best=0)
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_components=3, n_best=4)
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering()
data = np.arange(27).reshape((3, 3, 3))
assert_raises(ValueError, model.fit, data)
| bsd-3-clause |
fcole90/nemesys-qos | nemesys/netgraph.py | 9 | 3506 | #!/usr/bin/env python
# printing_in_wx.py
#
from collections import deque
from contabyte import Contabyte
from pcapper import Pcapper
from threading import Thread
import math
import matplotlib
import numpy
import socket
import time
import wx
SECONDS = 60
POINTS_PER_SECONDS = 1
SAMPLE_INTERVAL = 0.8
matplotlib.use('WXAgg')
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigCanvas
from matplotlib.figure import Figure
class Updater(Thread):
def __init__(self, window, ip, nap):
Thread.__init__(self)
self._window = window
self._ip = ip
self._nap = nap
maxlen = int(math.ceil(SECONDS * POINTS_PER_SECONDS))
self._samples_down = deque(maxlen=maxlen)
self._samples_up = deque(maxlen=maxlen)
for i in range (0, maxlen):
self._samples_down.append(0)
self._samples_up.append(0)
self._p = Pcapper(self._ip)
self._p.start()
def _get_sample(self):
self._p.sniff(Contabyte(self._ip, self._nap))
time.sleep(SAMPLE_INTERVAL)
self._p.stop_sniff()
stats = self._p.get_stats()
down = stats.byte_down_all_net * 8.0 / (SAMPLE_INTERVAL * 1000.0)
up = -stats.byte_up_all_net * 8.0 / (SAMPLE_INTERVAL * 1000.0)
return (down, up)
def _update_samples(self):
(down, up) = self._get_sample()
self._samples_down.popleft()
self._samples_down.append(down)
self._samples_up.popleft()
self._samples_up.append(up)
def run(self):
while(self._window):
try:
self._update_samples()
wx.CallAfter(self._window.Plot_Data, list(self._samples_down), list(self._samples_up))
time.sleep(1.0 / POINTS_PER_SECONDS - SAMPLE_INTERVAL)
except:
break
self._p.stop()
self._p.join()
def stop(self):
self._window = None
class Netgraph(wx.Frame):
def __init__(self):
wx.Frame.__init__ (self, None, id=wx.ID_ANY, title='Netgraph', size=wx.Size(400, 200), style=wx.DEFAULT_FRAME_STYLE & ~(wx.RESIZE_BORDER | wx.RESIZE_BOX))
self.SetBackgroundColour(wx.SystemSettings_GetColour(wx.SYS_COLOUR_WINDOW))
self.figure = Figure()
self.axes = self.figure.add_subplot(111)
self.axes.set_xticklabels([])
self.axes.set_ylabel("Kbps")
self._min = 0
self._max = 0
t = numpy.arange(-SECONDS, 0, 1.0 / POINTS_PER_SECONDS)
self.d, = self.axes.plot(t, numpy.zeros(60 * 1.0 / POINTS_PER_SECONDS), linewidth=2, color='red')
self.u, = self.axes.plot(t, numpy.zeros(60 * 1.0 / POINTS_PER_SECONDS), linewidth=2, color='blue')
self.canvas = FigCanvas(self, -1, self.figure)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(self.canvas, 1, wx.LEFT | wx.TOP | wx.GROW)
self.SetSizer(sizer)
self.Fit()
def onExit(self, event=None):
self.Destroy()
def _check_limits(self, u, d):
min = numpy.min(u)
max = numpy.max(d)
if (min < self._min or max > self._max):
self._min = min
self._max = max
self.axes.set_ylim(self._min - 50, self._max + 50)
def Plot_Data(self, d, u):
self._check_limits(u, d)
self.d.set_ydata(d)
self.u.set_ydata(u)
self.canvas.draw()
if __name__ == '__main__':
app = wx.PySimpleApp()
fig = Netgraph()
fig.Show()
s = socket.socket(socket.AF_INET)
s.connect(('www.fub.it', 80))
ip = s.getsockname()[0]
s.close()
nap = '193.104.137.133'
u = Updater(fig, ip, nap)
u.start()
app.MainLoop()
u.stop()
u.join()
| gpl-3.0 |
stelat/GoRec | train_SVM.py | 1 | 1689 | import numpy as np
import cv2
from sklearn import svm, cross_validation
import pickle
from sklearn.externals import joblib
from extract_feature import *
from os import listdir
from os.path import isfile, join
from random import shuffle
def data_shuffle(X,Y):
list1_shuf = []
list2_shuf = []
index_shuf = range(len(X))
shuffle(index_shuf)
for i in index_shuf:
list1_shuf.append(X[i])
list2_shuf.append(Y[i])
return list1_shuf, list2_shuf
def read_data(path):
X=[]
Y=[]
empty_path = path + "/empty"
white_path = path + "/white"
black_path = path + "/black"
paths = [empty_path, white_path, black_path]
for i,path in enumerate(paths):
onlyfiles = [ f for f in listdir(path) if isfile(join(path,f)) ]
for image in onlyfiles:
image_array = cv2.imread(path + "/" + image)
X.append(extract_feature(image_array))
Y.append(i)
X,Y = data_shuffle(X,Y)
return (np.asarray(X),np.asarray(Y))
#http://scikit-learn.org/stable/modules/svm.html
def trainSVM(X,y):
clf = svm.SVC(kernel='linear')
#try different kernel
clf.fit(X, y)
return clf
def testSVM(testDataSetpath,clf):
for image in testDataSetpath:
if label(image)==clf.predict(extract_feature(image)):
ok+=1
else:
ko+=1
print "accuracy:" + float( ok)/(ok+ko)
#main:
(X,Y)=read_data("data")
clf = svm.SVC(kernel='linear', C=1)
scores = cross_validation.cross_val_score(clf, X, Y, cv=5)
print X.shape
print Y
print scores
#clf=trainSVM(X,Y)
#save the SVM parameters to use it later
#joblib.dump(clf, 'linearSVM.pkl')
#testSVM("dataTest",clf)
| bsd-3-clause |
peterwilletts24/Python-Scripts | plot_scripts/Rain/Diurnal/sea_diurnal_rain_plot_domain_constrain_southern_eastern_indian_ocean.py | 1 | 10337 | """
Load npy xy, plot and save
"""
import os, sys
import matplotlib
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
import matplotlib.pyplot as plt
import matplotlib.cm as mpl_cm
from matplotlib import rc
from matplotlib.font_manager import FontProperties
from matplotlib import rcParams
from matplotlib import cm
rc('text', usetex=True)
rcParams['text.usetex']=True
rcParams['text.latex.unicode']=True
rc('font', family = 'serif', serif = 'cmr10')
import numpy as np
from datetime import timedelta
import datetime
import imp
import re
from textwrap import wrap
model_name_convert_legend = imp.load_source('util', '/nfs/see-fs-01_users/eepdw/python_scripts/modules/model_name_convert_legend.py')
#unrotate = imp.load_source('util', '/home/pwille/python_scripts/modules/unrotate_pole.py')
###############
# Things to change
top_dir='/nfs/a90/eepdw/Data/Rain_Land_Sea_Diurnal'
pp_file = 'avg.5216'
lon_max = 110
lon_min = 80
lat_max= 5
lat_min=-10
trmm_dir = '/nfs/a90/eepdw/Data/Observations/Satellite/TRMM/Diurnal/'
trmm_file = "trmm_diurnal_average_lat_%s_%s_lon_%s_%s_southern_eastern_indian_ocean.npz" % (lat_min,lat_max, lon_min, lon_max)
#############
# Make own time x-axis
d = matplotlib.dates.drange(datetime.datetime(2011, 8, 21, 6,30), datetime.datetime(2011, 8, 22, 6, 30), timedelta(hours=1))
formatter = matplotlib.dates.DateFormatter('%H:%M')
def main():
#experiment_ids = ['djznw', 'djzny', 'djznq', 'djzns', 'dkjxq', 'dklyu', 'dkmbq', 'dklwu', 'dklzq', 'dkbhu', 'djznu', 'dkhgu' ] # All 12
experiment_ids_p = ['djznw', 'djzny', 'djznq', 'dklzq', 'dkmbq', 'dkjxq' ] # Most of Params
experiment_ids_e = ['dklwu', 'dklyu', 'djzns', 'dkbhu', 'djznu', 'dkhgu'] # Most of Explicit
#experiment_ids = ['djzny', 'djznq', 'djzns', 'djznw', 'dkjxq', 'dklyu', 'dkmbq', 'dklwu', 'dklzq' ]
#plt.ion()
NUM_COLOURS = 15
cmap=cm.get_cmap(cm.Set1, NUM_COLOURS)
#cgen = (cmap(1.*i/NUM_COLORS) for i in range(NUM_COLORS))
for ls in ['sea']:
fig = plt.figure(figsize=(12,6))
ax = fig.add_subplot(111)
legendEntries=[]
legendtext=[]
plot_trmm = np.load('%s%s_%s' % (trmm_dir, ls, trmm_file))
dates_trmm=[]
p=[]
for dp in plot_trmm['hour']:
print dp
if ((int(dp)<23) & (int(dp)>=6)):
dates_trmm.append(datetime.datetime(2011, 8, 21, int(dp), 0))
p.append(plot_trmm['mean'][plot_trmm['hour']==dp])
if ((int(dp)>=0) & (int(dp)<=6)):
dates_trmm.append(datetime.datetime(2011, 8, 22, int(dp), 0))
p.append(plot_trmm['mean'][plot_trmm['hour']==dp])
#print dates_trmm
a = np.argsort(dates_trmm,axis=0)
d_trmm = np.array(dates_trmm)[a]
pl = (np.array(p)[a])
#pl=np.sort(pl,axis=1)
l, = plt.plot_date(d_trmm, pl, label='TRMM', linewidth=2, linestyle='-', marker='', markersize=2, fmt='', color='#262626')
legendEntries.append(l)
legendtext.append('TRMM')
l0=plt.legend(legendEntries, legendtext,title='', frameon=False, prop={'size':8}, loc=9, bbox_to_anchor=(0.21, 0,1, 1))
# Change the legend label colors to almost black
texts = l0.texts
for t in texts:
t.set_color('#262626')
legendEntries=[]
legendtext=[]
for c, experiment_id in enumerate(experiment_ids_p):
expmin1 = experiment_id[:-1]
if (experiment_id=='djznw'):
print experiment_id
colour = cmap(1.*1/NUM_COLOURS)
linewidth=0.2
linestylez='--'
if (experiment_id=='djzny'):
print experiment_id
colour = cmap(1.*3/NUM_COLOURS)
linewidth=0.5
linestylez='--'
if ((experiment_id=='djznq') or (experiment_id=='dkjxq')):
print experiment_id
colour = cmap(1.*5/NUM_COLOURS)
linewidth=0.8
if (experiment_id=='djznq'):
linestylez='--'
if (experiment_id=='dkjxq'):
linestylez=':'
if ((experiment_id=='dklzq') or (experiment_id=='dklwu')):
print experiment_id
colour = cmap(1.*7/NUM_COLOURS)
linewidth=1
if (experiment_id=='dklzq'):
linestylez='--'
if (experiment_id=='dklwu'):
linestylez='-'
if ((experiment_id=='dklyu') or (experiment_id=='dkmbq')):
print experiment_id
colour = cmap(1.*9/NUM_COLOURS)
linewidth=1.3
if (experiment_id=='dkmbq'):
linestylez='--'
if (experiment_id=='dklyu'):
linestylez='-'
if (experiment_id=='djzns'):
print experiment_id
colour = cmap(1.*11/NUM_COLOURS)
linewidth=1.6
linestylez='-'
if ((experiment_id=='dkbhu')or (experiment_id=='dkhgu')):
print experiment_id
colour = cmap(1.*13/NUM_COLOURS)
linewidth=1.9
if (experiment_id=='dkbhu'):
linestylez='-'
if (experiment_id=='dkhgu'):
linestylez=':'
if (experiment_id=='djznu'):
print experiment_id
colour = cmap(1.*15/NUM_COLOURS)
linewidth=2.
linestylez='-'
try:
plotnp = np.load('%s/%s/%s/%s_%s_rainfall_diurnal_np_domain_constrain_lat_%s-%s_lon-%s-%s.npy' % (top_dir, expmin1, experiment_id, pp_file, ls, lat_min, lat_max, lon_min, lon_max))
l, = plt.plot_date(d, plotnp[0]*3600, label='%s' % (model_name_convert_legend.main(experiment_id)), linewidth=linewidth, linestyle=linestylez, marker='', markersize=2, fmt='', color=colour)
legendEntries.append(l)
legendtext.append('%s' % (model_name_convert_legend.main(experiment_id)))
except Exception, e:
print e
pass
l1=plt.legend(legendEntries, legendtext, title='Parametrised', loc=9, frameon=False, prop={'size':8}, bbox_to_anchor=(0, 0,1, 1))
# Change the legend label colors to almost black
texts = l1.texts
for t in texts:
t.set_color('#262626')
legendEntries=[]
legendtext=[]
c1=0
for c, experiment_id in enumerate(experiment_ids_e):
if (experiment_id=='djznw'):
print experiment_id
colour = cmap(1.*1/NUM_COLOURS)
linewidth=0.2
linestylez='--'
if (experiment_id=='djzny'):
print experiment_id
colour = cmap(1.*3/NUM_COLOURS)
linewidth=0.5
linestylez='--'
if ((experiment_id=='djznq') or (experiment_id=='dkjxq')):
print experiment_id
colour = cmap(1.*5/NUM_COLOURS)
linewidth=0.8
if (experiment_id=='djznq'):
linestylez='--'
if (experiment_id=='dkjxq'):
linestylez=':'
if ((experiment_id=='dklzq') or (experiment_id=='dklwu')):
print experiment_id
colour = cmap(1.*7/NUM_COLOURS)
linewidth=1
if (experiment_id=='dklzq'):
linestylez='--'
if (experiment_id=='dklwu'):
linestylez='-'
if ((experiment_id=='dklyu') or (experiment_id=='dkmbq')):
print experiment_id
colour = cmap(1.*9/NUM_COLOURS)
linewidth=1.3
if (experiment_id=='dkmbq'):
linestylez='--'
if (experiment_id=='dklyu'):
linestylez='-'
if (experiment_id=='djzns'):
print experiment_id
colour = cmap(1.*11/NUM_COLOURS)
linewidth=1.6
linestylez='-'
if ((experiment_id=='dkbhu')or (experiment_id=='dkhgu')):
print experiment_id
colour = cmap(1.*13/NUM_COLOURS)
linewidth=1.9
if (experiment_id=='dkbhu'):
linestylez='-'
if (experiment_id=='dkhgu'):
linestylez=':'
if (experiment_id=='djznu'):
print experiment_id
colour = cmap(1.*15/NUM_COLOURS)
linewidth=2.
linestylez='-'
expmin1 = experiment_id[:-1]
try:
plotnp = np.load('%s/%s/%s/%s_%s_rainfall_diurnal_np_domain_constrain_lat_%s-%s_lon-%s-%s.npy' % (top_dir, expmin1, experiment_id, pp_file, ls, lat_min, lat_max, lon_min, lon_max))
l, = plt.plot_date(d, plotnp[0]*3600, label='%s' % (model_name_convert_legend.main(experiment_id)), linewidth=linewidth, linestyle=linestylez, marker='', markersize=2, fmt='', color=colour)
legendEntries.append(l)
legendtext.append('%s' % (model_name_convert_legend.main(experiment_id)))
except Exception, e:
print e
pass
l2=plt.legend(legendEntries, legendtext, title='Explicit', loc=9, frameon=False, bbox_to_anchor=(0.11, 0,1, 1), prop={'size':8})
plt.gca().add_artist(l1)
plt.gca().add_artist(l0)
plt.gca().xaxis.set_major_formatter(formatter)
# Change the legend label colors to almost black
texts = l2.texts
for t in texts:
t.set_color('#262626')
plt.xlabel('Time (UTC)')
plt.ylabel('mm/h')
title="Domain Averaged Rainfall - %s" % ls
t=re.sub('(.{68} )', '\\1\n', str(title), 0, re.DOTALL)
t = re.sub(r'[(\']', ' ', t)
t = re.sub(r'[\',)]', ' ', t)
pp_filenodot= pp_file.replace(".", "")
# Bit of formatting
# Set colour of axis lines
spines_to_keep = ['bottom', 'left']
for spine in spines_to_keep:
ax.spines[spine].set_linewidth(0.5)
ax.spines[spine].set_color('#262626')
# Remove top and right axes lines ("spines")
spines_to_remove = ['top', 'right']
for spine in spines_to_remove:
ax.spines[spine].set_visible(False)
# Get rid of ticks. The position of the numbers is informative enough of
# the position of the value.
ax.xaxis.set_ticks_position('none')
ax.yaxis.set_ticks_position('none')
# Change the labels to the off-black
ax.xaxis.label.set_color('#262626')
ax.yaxis.label.set_color('#262626')
if not os.path.exists('/nfs/a90/eepdw/Figures/EMBRACE/Diurnal/'): os.makedirs('/nfs/a90/eepdw/Figures/EMBRACE/Diurnal/')
plt.savefig('/nfs/a90/eepdw/Figures/EMBRACE/Diurnal/%s_%s_latlon_southern_eastern_indian_ocean_notitle.png' % (pp_filenodot, ls), format='png', bbox_inches='tight')
plt.title('\n'.join(wrap('%s' % (t.title()), 1000,replace_whitespace=False)), fontsize=16, color='#262626')
#plt.show()
plt.savefig('/nfs/a90/eepdw/Figures/EMBRACE/Diurnal/%s_%s_latlon_southern_eastern_indian_ocean.png' % (pp_filenodot, ls), format='png', bbox_inches='tight')
plt.close()
if __name__ == '__main__':
main()
| mit |
marcharper/stationary | examples/entropic_equilibria_plots.py | 1 | 9181 | """Figures for the publication
"Entropic Equilibria Selection of Stationary Extrema in Finite Populations"
"""
from __future__ import print_function
import math
import os
import pickle
import sys
import matplotlib
from matplotlib import pyplot as plt
import matplotlib.gridspec as gridspec
import numpy as np
import scipy.misc
import ternary
import stationary
from stationary.processes import incentives, incentive_process
## Global Font config for plots ###
font = {'size': 14}
matplotlib.rc('font', **font)
def compute_entropy_rate(N=30, n=2, m=None, incentive_func=None, beta=1.,
mu=None, exact=False, lim=1e-13, logspace=False):
if not m:
m = np.ones((n, n))
if not incentive_func:
incentive_func = incentives.fermi
if not mu:
# mu = (n-1.)/n * 1./(N+1)
mu = 1. / N
fitness_landscape = incentives.linear_fitness_landscape(m)
incentive = incentive_func(fitness_landscape, beta=beta, q=1)
edges = incentive_process.multivariate_transitions(
N, incentive, num_types=n, mu=mu)
s = stationary.stationary_distribution(edges, exact=exact, lim=lim,
logspace=logspace)
e = stationary.entropy_rate(edges, s)
return e, s
# Entropy Characterization Plots
def dict_max(d):
k0, v0 = list(d.items())[0]
for k, v in d.items():
if v > v0:
k0, v0 = k, v
return k0, v0
def plot_data_sub(domain, plot_data, gs, labels=None, sci=True, use_log=False):
# Plot Entropy Rate
ax1 = plt.subplot(gs[0, 0])
ax1.plot(domain, [x[0] for x in plot_data[0]], linewidth=2)
# Plot Stationary Probabilities and entropies
ax2 = plt.subplot(gs[1, 0])
ax3 = plt.subplot(gs[2, 0])
if use_log:
transform = math.log
else:
transform = lambda x: x
for i, ax, t in [(1, ax2, lambda x: x), (2, ax3, transform)]:
if labels:
for data, label in zip(plot_data, labels):
ys = list(map(t, [x[i] for x in data]))
ax.plot(domain, ys, linewidth=2, label=label)
else:
for data in plot_data:
ys = list(map(t, [x[i] for x in data]))
ax.plot(domain, ys, linewidth=2)
ax1.set_ylabel("Entropy Rate")
ax2.set_ylabel("Stationary\nExtrema")
if use_log:
ax3.set_ylabel("log RTE $H_v$")
else:
ax3.set_ylabel("RTE $H_v$")
if sci:
ax2.yaxis.get_major_formatter().set_powerlimits((0, 0))
ax3.yaxis.get_major_formatter().set_powerlimits((0, 0))
return ax1, ax2, ax3
def ER_figure_beta2(N, m, betas):
"""Varying Beta, two dimensional example"""
# Beta test
# m = [[1, 4], [4, 1]]
# Compute the data
ss = []
plot_data = [[]]
for beta in betas:
print(beta)
e, s = compute_entropy_rate(N=N, m=m, beta=beta, exact=True)
ss.append(s)
state, s_max = dict_max(s)
plot_data[0].append((e, s_max, e / s_max))
gs = gridspec.GridSpec(3, 2)
ax1, ax2, ax3 = plot_data_sub(betas, plot_data, gs, sci=False)
ax3.set_xlabel("Strength of Selection $\\beta$")
# Plot stationary distribution
ax4 = plt.subplot(gs[:, 1])
for s in ss[::4]:
ax4.plot(range(0, N+1), [s[(i, N-i)] for i in range(0, N+1)])
ax4.set_title("Stationary Distributions")
ax4.set_xlabel("Population States $(i , N - i)$")
def remove_boundary(s):
s1 = dict()
for k, v in s.items():
a, b, c = k
if a * b * c != 0:
s1[k] = v
return s1
def ER_figure_beta3(N, m, mu, betas, iss_states, labels, stationary_beta=0.35,
pickle_filename="figure_beta3.pickle"):
"""Varying Beta, three dimensional example"""
ss = []
plot_data = [[] for _ in range(len(iss_states))]
if os.path.exists(pickle_filename):
with open(pickle_filename, 'rb') as f:
plot_data = pickle.load(f)
else:
for beta in betas:
print(beta)
e, s = compute_entropy_rate(
N=N, m=m, n=3, beta=beta, exact=False, mu=mu, lim=1e-10)
ss.append(s)
for i, iss_state in enumerate(iss_states):
s_max = s[iss_state]
plot_data[i].append((e, s_max, e / s_max))
with open(pickle_filename, 'wb') as f:
pickle.dump(plot_data, f)
gs = gridspec.GridSpec(3, 2)
ax1, ax2, ax3 = plot_data_sub(betas, plot_data, gs, labels=labels,
use_log=True, sci=False)
ax3.set_xlabel("Strength of selection $\\beta$")
ax2.legend(loc="upper right")
# Plot example stationary
ax4 = plt.subplot(gs[:, 1])
_, s = compute_entropy_rate(
N=N, m=m, n=3, beta=stationary_beta, exact=False, mu=mu, lim=1e-15)
_, tax = ternary.figure(ax=ax4, scale=N,)
tax.heatmap(s, cmap="jet", style="triangular")
tax.ticks(axis='lbr', linewidth=1, multiple=10, offset=0.015)
tax.clear_matplotlib_ticks()
ax4.set_xlabel("Population States $a_1 + a_2 + a_3 = N$")
# tax.left_axis_label("$a_1$")
# tax.right_axis_label("$a_2$")
# tax.bottom_axis_label("$a_3$")
def ER_figure_N(Ns, m, beta=1, labels=None):
"""Varying population size."""
ss = []
plot_data = [[] for _ in range(3)]
n = len(m[0])
for N in Ns:
print(N)
mu = 1 / N
norm = float(scipy.misc.comb(N+n, n))
e, s = compute_entropy_rate(
N=N, m=m, n=3, beta=beta, exact=False, mu=mu, lim=1e-10)
ss.append(s)
iss_states = [(N, 0, 0), (N / 2, N / 2, 0), (N / 3, N / 3, N / 3)]
for i, iss_state in enumerate(iss_states):
s_max = s[iss_state]
plot_data[i].append((e, s_max, e / (s_max * norm)))
# Plot data
gs = gridspec.GridSpec(3, 1)
ax1, ax2, ax3 = plot_data_sub(Ns, plot_data, gs, labels, use_log=True, sci=False)
ax2.legend(loc="upper right")
ax3.set_xlabel("Population Size $N$")
def ER_figure_mu(N, mus, m, iss_states, labels, beta=1.,
pickle_filename="figure_mu.pickle"):
"""
Plot entropy rates and trajectory entropies for varying mu.
"""
# Compute the data
ss = []
plot_data = [[] for _ in range(len(iss_states))]
if os.path.exists(pickle_filename):
with open(pickle_filename, 'rb') as f:
plot_data = pickle.load(f)
else:
for mu in mus:
print(mu)
e, s = compute_entropy_rate(
N=N, m=m, n=3, beta=beta, exact=False, mu=mu, lim=1e-10,
logspace=True)
ss.append(s)
for i, iss_state in enumerate(iss_states):
s_max = s[iss_state]
plot_data[i].append((e, s_max, e / s_max))
with open(pickle_filename, 'wb') as f:
pickle.dump(plot_data, f)
# Plot data
gs = gridspec.GridSpec(3, 1)
gs.update(hspace=0.5)
ax1, ax2, ax3 = plot_data_sub(mus, plot_data, gs, labels, use_log=True)
ax2.legend(loc="upper right")
ax3.set_xlabel("Mutation rate $\mu$")
if __name__ == '__main__':
fig_num = sys.argv[1]
if fig_num == "1":
## Figure 1
# Varying beta, two dimensional
N = 30
m = [[1, 2], [2, 1]]
betas = np.arange(0, 8, 0.2)
ER_figure_beta2(N, m, betas)
plt.tight_layout()
plt.show()
if fig_num == "2":
## Figure 2
# # Varying beta, three dimensional
N = 60
mu = 1. / N
m = [[0, 1, 1], [1, 0, 1], [1, 1, 0]]
iss_states = [(N, 0, 0), (N / 2, N / 2, 0), (N / 3, N / 3, N / 3)]
labels = ["$v_0$", "$v_1$", "$v_2$"]
betas = np.arange(0.02, 0.6, 0.02)
ER_figure_beta3(N, m, mu, betas, iss_states, labels)
plt.show()
if fig_num == "3":
## Figure 3
# Varying mutation rate figure
N = 42
mus = np.arange(0.0001, 0.015, 0.0005)
m = [[0, 1, 1], [1, 0, 1], [1, 1, 0]]
iss_states = [(N, 0, 0), (N / 2, N / 2, 0), (N / 3, N / 3, N / 3)]
labels = ["$v_0$: (42, 0, 0)", "$v_1$: (21, 21, 0)", "$v_2$: (14, 14, 14)"]
# labels = ["$v_0$", "$v_1$", "$v_2$"]
ER_figure_mu(N, mus, m, iss_states, labels, beta=1.)
plt.show()
if fig_num == "4":
## Figure 4
# Note: The RPS landscape takes MUCH longer to converge!
# Consider using the C++ implementation instead for larger N.
N = 120 # Manuscript uses 180
mu = 1. / N
m = incentives.rock_paper_scissors(a=-1, b=-1)
_, s = compute_entropy_rate(
N=N, m=m, n=3, beta=1.5, exact=False, mu=mu, lim=1e-16)
_, tax = ternary.figure(scale=N)
tax.heatmap(remove_boundary(s), cmap="jet", style="triangular")
tax.ticks(axis='lbr', linewidth=1, multiple=60)
tax.clear_matplotlib_ticks()
plt.show()
if fig_num == "5":
# ## Figure 5
# Varying Population Size
Ns = range(6, 6*6, 6)
m = [[0, 1, 1], [1, 0, 1], [1, 1, 0]]
labels = ["$v_0$", "$v_1$", "$v_2$"]
ER_figure_N(Ns, m, beta=1, labels=labels)
plt.show()
| mit |
Clyde-fare/scikit-learn | examples/linear_model/plot_ols.py | 220 | 1940 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Linear Regression Example
=========================================================
This example uses the only the first feature of the `diabetes` dataset, in
order to illustrate a two-dimensional plot of this regression technique. The
straight line can be seen in the plot, showing how linear regression attempts
to draw a straight line that will best minimize the residual sum of squares
between the observed responses in the dataset, and the responses predicted by
the linear approximation.
The coefficients, the residual sum of squares and the variance score are also
calculated.
"""
print(__doc__)
# Code source: Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets, linear_model
# Load the diabetes dataset
diabetes = datasets.load_diabetes()
# Use only one feature
diabetes_X = diabetes.data[:, np.newaxis, 2]
# Split the data into training/testing sets
diabetes_X_train = diabetes_X[:-20]
diabetes_X_test = diabetes_X[-20:]
# Split the targets into training/testing sets
diabetes_y_train = diabetes.target[:-20]
diabetes_y_test = diabetes.target[-20:]
# Create linear regression object
regr = linear_model.LinearRegression()
# Train the model using the training sets
regr.fit(diabetes_X_train, diabetes_y_train)
# The coefficients
print('Coefficients: \n', regr.coef_)
# The mean square error
print("Residual sum of squares: %.2f"
% np.mean((regr.predict(diabetes_X_test) - diabetes_y_test) ** 2))
# Explained variance score: 1 is perfect prediction
print('Variance score: %.2f' % regr.score(diabetes_X_test, diabetes_y_test))
# Plot outputs
plt.scatter(diabetes_X_test, diabetes_y_test, color='black')
plt.plot(diabetes_X_test, regr.predict(diabetes_X_test), color='blue',
linewidth=3)
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
shenzebang/scikit-learn | examples/applications/plot_out_of_core_classification.py | 255 | 13919 | """
======================================================
Out-of-core classification of text documents
======================================================
This is an example showing how scikit-learn can be used for classification
using an out-of-core approach: learning from data that doesn't fit into main
memory. We make use of an online classifier, i.e., one that supports the
partial_fit method, that will be fed with batches of examples. To guarantee
that the features space remains the same over time we leverage a
HashingVectorizer that will project each example into the same feature space.
This is especially useful in the case of text classification where new
features (words) may appear in each batch.
The dataset used in this example is Reuters-21578 as provided by the UCI ML
repository. It will be automatically downloaded and uncompressed on first run.
The plot represents the learning curve of the classifier: the evolution
of classification accuracy over the course of the mini-batches. Accuracy is
measured on the first 1000 samples, held out as a validation set.
To limit the memory consumption, we queue examples up to a fixed amount before
feeding them to the learner.
"""
# Authors: Eustache Diemert <eustache@diemert.fr>
# @FedericoV <https://github.com/FedericoV/>
# License: BSD 3 clause
from __future__ import print_function
from glob import glob
import itertools
import os.path
import re
import tarfile
import time
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rcParams
from sklearn.externals.six.moves import html_parser
from sklearn.externals.six.moves import urllib
from sklearn.datasets import get_data_home
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import Perceptron
from sklearn.naive_bayes import MultinomialNB
def _not_in_sphinx():
# Hack to detect whether we are running by the sphinx builder
return '__file__' in globals()
###############################################################################
# Reuters Dataset related routines
###############################################################################
class ReutersParser(html_parser.HTMLParser):
"""Utility class to parse a SGML file and yield documents one at a time."""
def __init__(self, encoding='latin-1'):
html_parser.HTMLParser.__init__(self)
self._reset()
self.encoding = encoding
def handle_starttag(self, tag, attrs):
method = 'start_' + tag
getattr(self, method, lambda x: None)(attrs)
def handle_endtag(self, tag):
method = 'end_' + tag
getattr(self, method, lambda: None)()
def _reset(self):
self.in_title = 0
self.in_body = 0
self.in_topics = 0
self.in_topic_d = 0
self.title = ""
self.body = ""
self.topics = []
self.topic_d = ""
def parse(self, fd):
self.docs = []
for chunk in fd:
self.feed(chunk.decode(self.encoding))
for doc in self.docs:
yield doc
self.docs = []
self.close()
def handle_data(self, data):
if self.in_body:
self.body += data
elif self.in_title:
self.title += data
elif self.in_topic_d:
self.topic_d += data
def start_reuters(self, attributes):
pass
def end_reuters(self):
self.body = re.sub(r'\s+', r' ', self.body)
self.docs.append({'title': self.title,
'body': self.body,
'topics': self.topics})
self._reset()
def start_title(self, attributes):
self.in_title = 1
def end_title(self):
self.in_title = 0
def start_body(self, attributes):
self.in_body = 1
def end_body(self):
self.in_body = 0
def start_topics(self, attributes):
self.in_topics = 1
def end_topics(self):
self.in_topics = 0
def start_d(self, attributes):
self.in_topic_d = 1
def end_d(self):
self.in_topic_d = 0
self.topics.append(self.topic_d)
self.topic_d = ""
def stream_reuters_documents(data_path=None):
"""Iterate over documents of the Reuters dataset.
The Reuters archive will automatically be downloaded and uncompressed if
the `data_path` directory does not exist.
Documents are represented as dictionaries with 'body' (str),
'title' (str), 'topics' (list(str)) keys.
"""
DOWNLOAD_URL = ('http://archive.ics.uci.edu/ml/machine-learning-databases/'
'reuters21578-mld/reuters21578.tar.gz')
ARCHIVE_FILENAME = 'reuters21578.tar.gz'
if data_path is None:
data_path = os.path.join(get_data_home(), "reuters")
if not os.path.exists(data_path):
"""Download the dataset."""
print("downloading dataset (once and for all) into %s" %
data_path)
os.mkdir(data_path)
def progress(blocknum, bs, size):
total_sz_mb = '%.2f MB' % (size / 1e6)
current_sz_mb = '%.2f MB' % ((blocknum * bs) / 1e6)
if _not_in_sphinx():
print('\rdownloaded %s / %s' % (current_sz_mb, total_sz_mb),
end='')
archive_path = os.path.join(data_path, ARCHIVE_FILENAME)
urllib.request.urlretrieve(DOWNLOAD_URL, filename=archive_path,
reporthook=progress)
if _not_in_sphinx():
print('\r', end='')
print("untarring Reuters dataset...")
tarfile.open(archive_path, 'r:gz').extractall(data_path)
print("done.")
parser = ReutersParser()
for filename in glob(os.path.join(data_path, "*.sgm")):
for doc in parser.parse(open(filename, 'rb')):
yield doc
###############################################################################
# Main
###############################################################################
# Create the vectorizer and limit the number of features to a reasonable
# maximum
vectorizer = HashingVectorizer(decode_error='ignore', n_features=2 ** 18,
non_negative=True)
# Iterator over parsed Reuters SGML files.
data_stream = stream_reuters_documents()
# We learn a binary classification between the "acq" class and all the others.
# "acq" was chosen as it is more or less evenly distributed in the Reuters
# files. For other datasets, one should take care of creating a test set with
# a realistic portion of positive instances.
all_classes = np.array([0, 1])
positive_class = 'acq'
# Here are some classifiers that support the `partial_fit` method
partial_fit_classifiers = {
'SGD': SGDClassifier(),
'Perceptron': Perceptron(),
'NB Multinomial': MultinomialNB(alpha=0.01),
'Passive-Aggressive': PassiveAggressiveClassifier(),
}
def get_minibatch(doc_iter, size, pos_class=positive_class):
"""Extract a minibatch of examples, return a tuple X_text, y.
Note: size is before excluding invalid docs with no topics assigned.
"""
data = [(u'{title}\n\n{body}'.format(**doc), pos_class in doc['topics'])
for doc in itertools.islice(doc_iter, size)
if doc['topics']]
if not len(data):
return np.asarray([], dtype=int), np.asarray([], dtype=int)
X_text, y = zip(*data)
return X_text, np.asarray(y, dtype=int)
def iter_minibatches(doc_iter, minibatch_size):
"""Generator of minibatches."""
X_text, y = get_minibatch(doc_iter, minibatch_size)
while len(X_text):
yield X_text, y
X_text, y = get_minibatch(doc_iter, minibatch_size)
# test data statistics
test_stats = {'n_test': 0, 'n_test_pos': 0}
# First we hold out a number of examples to estimate accuracy
n_test_documents = 1000
tick = time.time()
X_test_text, y_test = get_minibatch(data_stream, 1000)
parsing_time = time.time() - tick
tick = time.time()
X_test = vectorizer.transform(X_test_text)
vectorizing_time = time.time() - tick
test_stats['n_test'] += len(y_test)
test_stats['n_test_pos'] += sum(y_test)
print("Test set is %d documents (%d positive)" % (len(y_test), sum(y_test)))
def progress(cls_name, stats):
"""Report progress information, return a string."""
duration = time.time() - stats['t0']
s = "%20s classifier : \t" % cls_name
s += "%(n_train)6d train docs (%(n_train_pos)6d positive) " % stats
s += "%(n_test)6d test docs (%(n_test_pos)6d positive) " % test_stats
s += "accuracy: %(accuracy).3f " % stats
s += "in %.2fs (%5d docs/s)" % (duration, stats['n_train'] / duration)
return s
cls_stats = {}
for cls_name in partial_fit_classifiers:
stats = {'n_train': 0, 'n_train_pos': 0,
'accuracy': 0.0, 'accuracy_history': [(0, 0)], 't0': time.time(),
'runtime_history': [(0, 0)], 'total_fit_time': 0.0}
cls_stats[cls_name] = stats
get_minibatch(data_stream, n_test_documents)
# Discard test set
# We will feed the classifier with mini-batches of 1000 documents; this means
# we have at most 1000 docs in memory at any time. The smaller the document
# batch, the bigger the relative overhead of the partial fit methods.
minibatch_size = 1000
# Create the data_stream that parses Reuters SGML files and iterates on
# documents as a stream.
minibatch_iterators = iter_minibatches(data_stream, minibatch_size)
total_vect_time = 0.0
# Main loop : iterate on mini-batchs of examples
for i, (X_train_text, y_train) in enumerate(minibatch_iterators):
tick = time.time()
X_train = vectorizer.transform(X_train_text)
total_vect_time += time.time() - tick
for cls_name, cls in partial_fit_classifiers.items():
tick = time.time()
# update estimator with examples in the current mini-batch
cls.partial_fit(X_train, y_train, classes=all_classes)
# accumulate test accuracy stats
cls_stats[cls_name]['total_fit_time'] += time.time() - tick
cls_stats[cls_name]['n_train'] += X_train.shape[0]
cls_stats[cls_name]['n_train_pos'] += sum(y_train)
tick = time.time()
cls_stats[cls_name]['accuracy'] = cls.score(X_test, y_test)
cls_stats[cls_name]['prediction_time'] = time.time() - tick
acc_history = (cls_stats[cls_name]['accuracy'],
cls_stats[cls_name]['n_train'])
cls_stats[cls_name]['accuracy_history'].append(acc_history)
run_history = (cls_stats[cls_name]['accuracy'],
total_vect_time + cls_stats[cls_name]['total_fit_time'])
cls_stats[cls_name]['runtime_history'].append(run_history)
if i % 3 == 0:
print(progress(cls_name, cls_stats[cls_name]))
if i % 3 == 0:
print('\n')
###############################################################################
# Plot results
###############################################################################
def plot_accuracy(x, y, x_legend):
"""Plot accuracy as a function of x."""
x = np.array(x)
y = np.array(y)
plt.title('Classification accuracy as a function of %s' % x_legend)
plt.xlabel('%s' % x_legend)
plt.ylabel('Accuracy')
plt.grid(True)
plt.plot(x, y)
rcParams['legend.fontsize'] = 10
cls_names = list(sorted(cls_stats.keys()))
# Plot accuracy evolution
plt.figure()
for _, stats in sorted(cls_stats.items()):
# Plot accuracy evolution with #examples
accuracy, n_examples = zip(*stats['accuracy_history'])
plot_accuracy(n_examples, accuracy, "training examples (#)")
ax = plt.gca()
ax.set_ylim((0.8, 1))
plt.legend(cls_names, loc='best')
plt.figure()
for _, stats in sorted(cls_stats.items()):
# Plot accuracy evolution with runtime
accuracy, runtime = zip(*stats['runtime_history'])
plot_accuracy(runtime, accuracy, 'runtime (s)')
ax = plt.gca()
ax.set_ylim((0.8, 1))
plt.legend(cls_names, loc='best')
# Plot fitting times
plt.figure()
fig = plt.gcf()
cls_runtime = []
for cls_name, stats in sorted(cls_stats.items()):
cls_runtime.append(stats['total_fit_time'])
cls_runtime.append(total_vect_time)
cls_names.append('Vectorization')
bar_colors = rcParams['axes.color_cycle'][:len(cls_names)]
ax = plt.subplot(111)
rectangles = plt.bar(range(len(cls_names)), cls_runtime, width=0.5,
color=bar_colors)
ax.set_xticks(np.linspace(0.25, len(cls_names) - 0.75, len(cls_names)))
ax.set_xticklabels(cls_names, fontsize=10)
ymax = max(cls_runtime) * 1.2
ax.set_ylim((0, ymax))
ax.set_ylabel('runtime (s)')
ax.set_title('Training Times')
def autolabel(rectangles):
"""attach some text vi autolabel on rectangles."""
for rect in rectangles:
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width() / 2.,
1.05 * height, '%.4f' % height,
ha='center', va='bottom')
autolabel(rectangles)
plt.show()
# Plot prediction times
plt.figure()
#fig = plt.gcf()
cls_runtime = []
cls_names = list(sorted(cls_stats.keys()))
for cls_name, stats in sorted(cls_stats.items()):
cls_runtime.append(stats['prediction_time'])
cls_runtime.append(parsing_time)
cls_names.append('Read/Parse\n+Feat.Extr.')
cls_runtime.append(vectorizing_time)
cls_names.append('Hashing\n+Vect.')
bar_colors = rcParams['axes.color_cycle'][:len(cls_names)]
ax = plt.subplot(111)
rectangles = plt.bar(range(len(cls_names)), cls_runtime, width=0.5,
color=bar_colors)
ax.set_xticks(np.linspace(0.25, len(cls_names) - 0.75, len(cls_names)))
ax.set_xticklabels(cls_names, fontsize=8)
plt.setp(plt.xticks()[1], rotation=30)
ymax = max(cls_runtime) * 1.2
ax.set_ylim((0, ymax))
ax.set_ylabel('runtime (s)')
ax.set_title('Prediction Times (%d instances)' % n_test_documents)
autolabel(rectangles)
plt.show()
| bsd-3-clause |
DiegoCorrea/ouvidoMusical | apps/evaluators/MRR/analyzer/benchmark.py | 1 | 11662 | import matplotlib.pyplot as plt
import numpy as np
import logging
import os
from collections import Counter
from apps.CONSTANTS import (
SET_SIZE_LIST,
INTERVAL,
AT_LIST,
GRAPH_SET_COLORS_LIST
)
from apps.data.users.models import User
from apps.evaluators.MRR.algorithm.models import MRR
logger = logging.getLogger(__name__)
def bench_gLine(songSetLimit, at=5):
logger.info("[Start Bench MRR (Graph Line)]")
allBenchmarks = []
for evalution in MRR.objects.filter(at=at):
if evalution.life.setSize == songSetLimit:
allBenchmarks.append(evalution.benchmrr)
benchmarkTimes = []
benchmarkMeanTimes = []
benchmarkMedianTimes = []
for benchmark in allBenchmarks:
timeRun = (benchmark.finished_at - benchmark.started_at)
benchmarkTimes.append(timeRun.total_seconds() / 60.0)
benchmarkMeanTimes.append(np.mean(benchmarkTimes))
benchmarkMedianTimes.append(np.median(benchmarkTimes))
logger.debug(
"MRR Benchmark -> Mean (minutes): "
+ str(benchmarkMeanTimes[-1])
)
logger.debug(
"MRR Benchmark -> Median (minutes): "
+ str(benchmarkMedianTimes[-1])
)
logger.debug(
"MRR Benchmark -> Run Number: "
+ str(len(benchmarkTimes))
)
directory = str(
'files/apps/evaluators/MRR/graphs/'
+ str(songSetLimit)
+ '/benchmark/'
+ str(at) + '/'
)
if not os.path.exists(directory):
os.makedirs(directory)
plt.figure()
plt.grid(True)
plt.title(
'MRR - Mean Reciprocal Rank@'
+ str(at)
+ '\nBenchmark - set '
+ str(songSetLimit)
)
plt.xlabel('ID da execução')
plt.ylabel('Tempo de execução (minutos)')
plt.plot(
[i for i in range(len(allBenchmarks))],
[benchmark for benchmark in benchmarkTimes],
color='red',
label='Tempo'
)
plt.plot(
[i for i in range(len(allBenchmarks))],
[benchmark for benchmark in benchmarkMeanTimes],
color='green',
label='Media'
)
plt.plot(
[i for i in range(len(allBenchmarks))],
[benchmark for benchmark in benchmarkMedianTimes],
color='blue',
label='Mediana'
)
plt.legend(loc='best')
plt.savefig(
str(directory)
+ 'value_gLine.png'
)
plt.close()
logger.info("[Finish Bench MRR (Graph Line)]")
def bench_gScatter(songSetLimit, at=5):
logger.info("[Start Bench MRR (Graph Scatter)]")
allBenchmarks = []
for evalution in MRR.objects.filter(at=at):
if evalution.life.setSize == songSetLimit:
allBenchmarks.append(evalution.benchmrr)
benchmarkTimes = []
benchmarkMeanTimes = []
benchmarkMedianTimes = []
for benchmark in allBenchmarks:
timeRun = (benchmark.finished_at - benchmark.started_at)
benchmarkTimes.append(timeRun.total_seconds() / 60.0)
benchmarkMeanTimes.append(np.mean(benchmarkTimes))
benchmarkMedianTimes.append(np.median(benchmarkTimes))
logger.debug(
"MRR Benchmark -> Mean (minutes): "
+ str(benchmarkMeanTimes[-1])
)
logger.debug(
"MRR Benchmark -> Median (minutes): "
+ str(benchmarkMedianTimes[-1])
)
logger.debug(
"MRR Benchmark -> Run Number: "
+ str(len(benchmarkTimes))
)
directory = str(
'files/apps/evaluators/MRR/graphs/'
+ str(songSetLimit)
+ '/benchmark/'
+ str(at) + '/'
)
if not os.path.exists(directory):
os.makedirs(directory)
plt.figure()
plt.grid(True)
plt.title(
'MRR - Mean Reciprocal Rank@'
+ str(at)
+ '\nBenchmark - set '
+ str(songSetLimit)
)
plt.ylabel('Tempo de execução (minutos)')
plt.xlabel('Tempo de execução (minutos)')
plt.scatter(
benchmarkTimes,
benchmarkTimes,
label='Media: '
+ str(float("{0:.4f}".format(benchmarkMeanTimes[-1])))
)
plt.legend(loc='upper left')
plt.savefig(str(directory) + 'value_gScatter.png')
plt.close()
logger.info("[Finish Bench MRR (Graph Scatter)]")
def bench_gBoxPlot(songSetLimit, at=5):
logger.info("[Start Bench MRR (Graph BoxPlot)]")
allBenchmarks = []
for evalution in MRR.objects.filter(at=at):
if evalution.life.setSize == songSetLimit:
allBenchmarks.append(evalution.benchmrr)
benchmarkTimes = [
((benchmark.finished_at - benchmark.started_at).total_seconds() / 60.0)
for benchmark in allBenchmarks
]
logger.debug(
"MRR Benchmark -> Run Number: "
+ str(len(benchmarkTimes))
)
directory = str(
'files/apps/evaluators/MRR/graphs/'
+ str(songSetLimit)
+ '/benchmark/'
+ str(at) + '/'
)
if not os.path.exists(directory):
os.makedirs(directory)
plt.figure()
plt.title(
'MRR - Mean Reciprocal Rank@'
+ str(at)
+ '\nBenchmark - set '
+ str(songSetLimit)
)
plt.boxplot(benchmarkTimes, labels='T')
plt.savefig(
str(directory)
+ 'value_gBoxPlot.png'
)
plt.close()
logger.info("[Finish Bench MRR (Graph BoxPlot)]")
def bench_gBar(songSetLimit, at=5):
logger.info("[Start Bench MRR (Graph Bar)]")
allBenchmarks = []
for evalution in MRR.objects.filter(at=at):
if evalution.life.setSize == songSetLimit:
allBenchmarks.append(evalution.benchmrr)
benchmarkTimes = [
float("{0:.3f}".format(
(
benchmark.finished_at - benchmark.started_at
).total_seconds() / 60.0)
)
for benchmark in allBenchmarks
]
benchmarkCountList = Counter(benchmarkTimes)
mode = benchmarkCountList.most_common(1)[0][0]
logger.debug('MRR Benchmark -> Mode: ' + str(mode))
directory = str(
'files/apps/evaluators/MRR/graphs/'
+ str(songSetLimit)
+ '/benchmark/'
+ str(at) + '/'
)
if not os.path.exists(directory):
os.makedirs(directory)
plt.figure()
plt.title(
'MRR - Mean Reciprocal Rank@'
+ str(at)
+ '\nBenchmark - set '
+ str(songSetLimit)
)
plt.ylabel('Tempo execução (minutos)')
plt.xlabel('Quantidade')
plt.bar(
benchmarkCountList.values(),
benchmarkCountList.keys(),
label='Moda: '
+ str(float("{0:.3f}".format(mode)))
)
plt.legend(loc='best')
plt.savefig(
str(directory)
+ 'value_gBar.png'
)
plt.close()
logger.info("[Finish Bench MRR (Graph Bar)]")
# ###################################################################### #
def all_time_gLine(at=5, size_list=SET_SIZE_LIST):
logger.info("[Start Bench MRR (Graph Line)]")
allBenchmarks = {}
for evalution in MRR.objects.filter(at=at):
if evalution.life.setSize not in allBenchmarks:
allBenchmarks.setdefault(evalution.life.setSize, [])
else:
allBenchmarks[evalution.life.setSize].append(
(
evalution.benchmrr.finished_at - evalution.benchmrr.started_at
).total_seconds()
)
directory = str(
'files/apps/evaluators/MRR/graphs/all/'
)
if not os.path.exists(directory):
os.makedirs(directory)
plt.figure()
plt.grid(True)
# plt.title(
# 'MRR - Mean Reciprocal Rank@'
# + str(at)
# + ' Benchmark'
# + '\n |u| - '
# + str(User.objects.count())
# )
plt.xlabel('Round Id')
plt.ylabel('Round time (seconds)')
plt.plot(
[i+1 for i in range(len(allBenchmarks[size_list[0]][-INTERVAL:]))],
[benchmark for benchmark in allBenchmarks[size_list[0]][-INTERVAL:]],
color=GRAPH_SET_COLORS_LIST[0],
label=size_list[0]
)
plt.plot(
[i+1 for i in range(len(allBenchmarks[size_list[1]][-INTERVAL:]))],
[benchmark for benchmark in allBenchmarks[size_list[1]][-INTERVAL:]],
color=GRAPH_SET_COLORS_LIST[1],
label=size_list[1]
)
plt.plot(
[i+1 for i in range(len(allBenchmarks[size_list[2]][-INTERVAL:]))],
[benchmark for benchmark in allBenchmarks[size_list[2]][-INTERVAL:]],
color=GRAPH_SET_COLORS_LIST[2],
label=size_list[2]
)
plt.legend(loc='best')
plt.savefig(
str(directory)
+ 'mrr_all_time_gLine_'
+ str(at)
+ '.png'
)
plt.close()
logger.info("[Finish Bench MRR (Graph Line)]")
def all_time_gBoxPlot(at=5, size_list=SET_SIZE_LIST):
logger.info("[Start Bench MRR (Graph BoxPlot)]")
allBenchmarks = {}
for evalution in MRR.objects.filter(at=at):
if evalution.life.setSize not in allBenchmarks:
allBenchmarks.setdefault(evalution.life.setSize, [])
else:
allBenchmarks[evalution.life.setSize].append(
(
evalution.benchmrr.finished_at - evalution.benchmrr.started_at
).total_seconds()
)
directory = str(
'files/apps/evaluators/MRR/graphs/all/'
)
if not os.path.exists(directory):
os.makedirs(directory)
plt.figure()
plt.grid(True)
# plt.title(
# 'MRR - Mean Reciprocal Rank@'
# + str(at)
# + ' Benchmark'
# + '\n |u| - '
# + str(User.objects.count())
# )
plt.ylabel('Round time (seconds)')
plt.boxplot(
[
[benchmark for benchmark in allBenchmarks[size_list[0]][-INTERVAL:]],
[benchmark for benchmark in allBenchmarks[size_list[1]][-INTERVAL:]],
[benchmark for benchmark in allBenchmarks[size_list[2]][-INTERVAL:]]
],
labels=[size_list[0], size_list[1], size_list[2]]
)
plt.savefig(
str(directory)
+ 'mrr_all_time_gBoxPlot_'
+ str(at)
+ '.png'
)
plt.close()
logger.info("[Finish Bench MRR (Graph BoxPlot)]")
# ########################################################################## #
# ########################################################################## #
# ########################################################################## #
def report_MRR_time(at_list=AT_LIST, size_list=SET_SIZE_LIST):
logger.info("[Start MRR Report]")
allEvaluations = {}
for at in at_list:
allEvaluations_at = {}
for evalution in MRR.objects.filter(at=at):
if evalution.life.setSize not in allEvaluations_at:
allEvaluations_at.setdefault(evalution.life.setSize, [])
allEvaluations_at[evalution.life.setSize].append(evalution.benchmrr)
else:
allEvaluations_at[evalution.life.setSize].append(evalution.benchmrr)
allEvaluations[at] = allEvaluations_at
directory = str(
'files/apps/evaluators/MRR/csv/'
)
if not os.path.exists(directory):
os.makedirs(directory)
toSaveFile = open(
directory + 'mrr_time.csv',
'w+'
)
toSaveFile.write('at,size,mean\n')
for at in at_list:
for size in size_list:
meanAT = np.mean(
[
(benchmark.finished_at - benchmark.started_at).total_seconds()/60.0
for benchmark in allEvaluations[at][size][-INTERVAL:]]
)
toSaveFile.write(str(at) + ',' + str(size) + ',' + str(float("{0:.3f}".format(meanAT))) + '\n')
print('|' + str(at) + '|' + str(size) + '|' + str(float("{0:.3f}".format(meanAT))) + "\t|")
toSaveFile.close()
| mit |
ueshin/apache-spark | python/pyspark/pandas/tests/test_series_conversion.py | 15 | 3303 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
from distutils.version import LooseVersion
import pandas as pd
from pyspark import pandas as ps
from pyspark.testing.pandasutils import PandasOnSparkTestCase
from pyspark.testing.sqlutils import SQLTestUtils
class SeriesConversionTest(PandasOnSparkTestCase, SQLTestUtils):
@property
def pser(self):
return pd.Series([1, 2, 3, 4, 5, 6, 7], name="x")
@property
def psser(self):
return ps.from_pandas(self.pser)
@unittest.skip("Pyperclip could not find a copy/paste mechanism for Linux.")
def test_to_clipboard(self):
pser = self.pser
psser = self.psser
self.assert_eq(psser.to_clipboard(), pser.to_clipboard())
self.assert_eq(psser.to_clipboard(excel=False), pser.to_clipboard(excel=False))
self.assert_eq(
psser.to_clipboard(sep=",", index=False), pser.to_clipboard(sep=",", index=False)
)
def test_to_latex(self):
pser = self.pser
psser = self.psser
self.assert_eq(psser.to_latex(), pser.to_latex())
self.assert_eq(psser.to_latex(col_space=2), pser.to_latex(col_space=2))
self.assert_eq(psser.to_latex(header=True), pser.to_latex(header=True))
self.assert_eq(psser.to_latex(index=False), pser.to_latex(index=False))
self.assert_eq(psser.to_latex(na_rep="-"), pser.to_latex(na_rep="-"))
self.assert_eq(psser.to_latex(float_format="%.1f"), pser.to_latex(float_format="%.1f"))
self.assert_eq(psser.to_latex(sparsify=False), pser.to_latex(sparsify=False))
self.assert_eq(psser.to_latex(index_names=False), pser.to_latex(index_names=False))
self.assert_eq(psser.to_latex(bold_rows=True), pser.to_latex(bold_rows=True))
# Can't specifying `encoding` without specifying `buf` as filename in pandas >= 1.0.0
# https://github.com/pandas-dev/pandas/blob/master/pandas/io/formats/format.py#L492-L495
if LooseVersion(pd.__version__) < LooseVersion("1.0.0"):
self.assert_eq(psser.to_latex(encoding="ascii"), pser.to_latex(encoding="ascii"))
self.assert_eq(psser.to_latex(decimal=","), pser.to_latex(decimal=","))
if __name__ == "__main__":
from pyspark.pandas.tests.test_series_conversion import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| apache-2.0 |
SheffieldML/GPy | setup.py | 1 | 9929 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#===============================================================================
# Copyright (c) 2012 - 2014, GPy authors (see AUTHORS.txt).
# Copyright (c) 2014, James Hensman, Max Zwiessele
# Copyright (c) 2015, Max Zwiessele
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of GPy nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#===============================================================================
from __future__ import print_function
import os
import sys
from setuptools import setup, Extension
import codecs
try:
ModuleNotFoundError
except NameError:
ModuleNotFoundError = ImportError
def read(fname):
with codecs.open(fname, 'r', 'latin') as f:
return f.read()
def read_to_rst(fname):
try:
import pypandoc
rstname = "{}.{}".format(os.path.splitext(fname)[0], 'rst')
pypandoc.convert(read(fname), 'rst', format='md', outputfile=rstname)
with open(rstname, 'r') as f:
rststr = f.read()
return rststr
#return read(rstname)
except ImportError:
return read(fname)
desc = """
Please refer to the github homepage for detailed instructions on installation and usage.
"""
version_dummy = {}
exec(read('GPy/__version__.py'), version_dummy)
__version__ = version_dummy['__version__']
del version_dummy
#Mac OS X Clang doesn't support OpenMP at the current time.
#This detects if we are building on a Mac
def ismac():
return sys.platform[:6] == 'darwin'
if ismac():
compile_flags = [ '-O3', ]
link_args = []
else:
compile_flags = [ '-fopenmp', '-O3']
link_args = ['-lgomp' ]
try:
# So that we don't need numpy installed to determine it's a dependency.
import numpy as np
ext_mods = [Extension(name='GPy.kern.src.stationary_cython',
sources=['GPy/kern/src/stationary_cython.pyx',
'GPy/kern/src/stationary_utils.c'],
include_dirs=[np.get_include(), '.'],
extra_compile_args=compile_flags,
extra_link_args=link_args),
Extension(name='GPy.util.choleskies_cython',
sources=['GPy/util/choleskies_cython.pyx'],
include_dirs=[np.get_include(), '.'],
extra_link_args=link_args,
extra_compile_args=compile_flags),
Extension(name='GPy.util.linalg_cython',
sources=['GPy/util/linalg_cython.pyx'],
include_dirs=[np.get_include(), '.'],
extra_compile_args=compile_flags,
extra_link_args=link_args),
Extension(name='GPy.kern.src.coregionalize_cython',
sources=['GPy/kern/src/coregionalize_cython.pyx'],
include_dirs=[np.get_include(), '.'],
extra_compile_args=compile_flags,
extra_link_args=link_args),
Extension(name='GPy.models.state_space_cython',
sources=['GPy/models/state_space_cython.pyx'],
include_dirs=[np.get_include(), '.'],
extra_compile_args=compile_flags,
extra_link_args=link_args)]
except ModuleNotFoundError:
ext_mods = []
install_requirements = ['numpy>=1.7', 'six', 'paramz>=0.9.0', 'cython>=0.29']
if sys.version_info < (3, 6):
install_requirements += ['scipy>=1.3.0,<1.5.0']
else:
install_requirements += ['scipy>=1.3.0']
setup(name = 'GPy',
version = __version__,
author = read_to_rst('AUTHORS.txt'),
author_email = "gpy.authors@gmail.com",
description = ("The Gaussian Process Toolbox"),
long_description = desc,
license = "BSD 3-clause",
keywords = "machine-learning gaussian-processes kernels",
url = "http://sheffieldml.github.com/GPy/",
download_url='https://github.com/SheffieldML/GPy/',
ext_modules = ext_mods,
packages = ["GPy",
"GPy.core",
"GPy.core.parameterization",
"GPy.kern",
"GPy.kern.src",
"GPy.kern.src.psi_comp",
"GPy.models",
"GPy.inference",
"GPy.inference.optimization",
"GPy.inference.mcmc",
"GPy.inference.latent_function_inference",
"GPy.likelihoods",
"GPy.mappings",
"GPy.examples",
"GPy.testing",
"GPy.util",
"GPy.plotting",
"GPy.plotting.gpy_plot",
"GPy.plotting.matplot_dep",
"GPy.plotting.matplot_dep.controllers",
"GPy.plotting.plotly_dep",
],
package_dir={'GPy': 'GPy'},
#package_data = {'GPy': ['defaults.cfg', 'installation.cfg',
# 'util/data_resources.json',
# 'util/football_teams.json',
# 'testing/plotting_tests/baseline/*.png'
# ]},
#data_files=[('GPy/testing/plotting_tests/baseline', 'testing/plotting_tests/baseline/*.png'),
# ('GPy/testing/', 'GPy/testing/pickle_test.pickle'),
# ],
include_package_data = True,
py_modules = ['GPy.__init__'],
test_suite = 'GPy.testing',
setup_requires = ['numpy>=1.7'],
install_requires = install_requirements,
extras_require = {'docs':['sphinx'],
'optional':['mpi4py',
'ipython>=4.0.0',
],
'plotting':['matplotlib >= 3.0',
'plotly >= 1.8.6'],
'notebook':['jupyter_client >= 4.0.6',
'ipywidgets >= 4.0.3',
'ipykernel >= 4.1.0',
'notebook >= 4.0.5',
],
},
classifiers=['License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Framework :: IPython',
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries :: Python Modules',
]
)
# Check config files and settings:
local_file = os.path.abspath(os.path.join(os.path.dirname(__file__), 'GPy', 'installation.cfg'))
home = os.getenv('HOME') or os.getenv('USERPROFILE')
user_file = os.path.join(home,'.config', 'GPy', 'user.cfg')
print("")
try:
if not os.path.exists(user_file):
# Does an old config exist?
old_user_file = os.path.join(home,'.gpy_user.cfg')
if os.path.exists(old_user_file):
# Move it to new location:
print("GPy: Found old config file, moving to new location {}".format(user_file))
if not os.path.exists(os.path.dirname(user_file)):
os.makedirs(os.path.dirname(user_file))
os.rename(old_user_file, user_file)
else:
# No config file exists, save informative stub to user config folder:
print("GPy: Saving user configuration file to {}".format(user_file))
if not os.path.exists(os.path.dirname(user_file)):
os.makedirs(os.path.dirname(user_file))
with open(user_file, 'w') as f:
with open(local_file, 'r') as l:
tmp = l.read()
f.write(tmp)
else:
print("GPy: User configuration file at location {}".format(user_file))
except:
print("GPy: Could not write user configuration file {}".format(user_file))
| bsd-3-clause |
pnedunuri/scikit-learn | sklearn/ensemble/tests/test_forest.py | 48 | 39224 | """
Testing for the forest module (sklearn.ensemble.forest).
"""
# Authors: Gilles Louppe,
# Brian Holt,
# Andreas Mueller,
# Arnaud Joly
# License: BSD 3 clause
import pickle
from collections import defaultdict
from itertools import combinations
from itertools import product
import numpy as np
from scipy.misc import comb
from scipy.sparse import csr_matrix
from scipy.sparse import csc_matrix
from scipy.sparse import coo_matrix
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_less, assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn import datasets
from sklearn.decomposition import TruncatedSVD
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import RandomTreesEmbedding
from sklearn.grid_search import GridSearchCV
from sklearn.svm import LinearSVC
from sklearn.utils.fixes import bincount
from sklearn.utils.validation import check_random_state
from sklearn.tree.tree import SPARSE_SPLITTERS
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = check_random_state(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
FOREST_CLASSIFIERS = {
"ExtraTreesClassifier": ExtraTreesClassifier,
"RandomForestClassifier": RandomForestClassifier,
}
FOREST_REGRESSORS = {
"ExtraTreesRegressor": ExtraTreesRegressor,
"RandomForestRegressor": RandomForestRegressor,
}
FOREST_TRANSFORMERS = {
"RandomTreesEmbedding": RandomTreesEmbedding,
}
FOREST_ESTIMATORS = dict()
FOREST_ESTIMATORS.update(FOREST_CLASSIFIERS)
FOREST_ESTIMATORS.update(FOREST_REGRESSORS)
FOREST_ESTIMATORS.update(FOREST_TRANSFORMERS)
def check_classification_toy(name):
"""Check classification on a toy dataset."""
ForestClassifier = FOREST_CLASSIFIERS[name]
clf = ForestClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf))
clf = ForestClassifier(n_estimators=10, max_features=1, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf))
# also test apply
leaf_indices = clf.apply(X)
assert_equal(leaf_indices.shape, (len(X), clf.n_estimators))
def test_classification_toy():
for name in FOREST_CLASSIFIERS:
yield check_classification_toy, name
def check_iris_criterion(name, criterion):
# Check consistency on dataset iris.
ForestClassifier = FOREST_CLASSIFIERS[name]
clf = ForestClassifier(n_estimators=10, criterion=criterion,
random_state=1)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.9, "Failed with criterion %s and score = %f"
% (criterion, score))
clf = ForestClassifier(n_estimators=10, criterion=criterion,
max_features=2, random_state=1)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.5, "Failed with criterion %s and score = %f"
% (criterion, score))
def test_iris():
for name, criterion in product(FOREST_CLASSIFIERS, ("gini", "entropy")):
yield check_iris_criterion, name, criterion
def check_boston_criterion(name, criterion):
# Check consistency on dataset boston house prices.
ForestRegressor = FOREST_REGRESSORS[name]
clf = ForestRegressor(n_estimators=5, criterion=criterion, random_state=1)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert_greater(score, 0.95, "Failed with max_features=None, criterion %s "
"and score = %f" % (criterion, score))
clf = ForestRegressor(n_estimators=5, criterion=criterion,
max_features=6, random_state=1)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert_greater(score, 0.95, "Failed with max_features=6, criterion %s "
"and score = %f" % (criterion, score))
def test_boston():
for name, criterion in product(FOREST_REGRESSORS, ("mse", )):
yield check_boston_criterion, name, criterion
def check_regressor_attributes(name):
# Regression models should not have a classes_ attribute.
r = FOREST_REGRESSORS[name](random_state=0)
assert_false(hasattr(r, "classes_"))
assert_false(hasattr(r, "n_classes_"))
r.fit([[1, 2, 3], [4, 5, 6]], [1, 2])
assert_false(hasattr(r, "classes_"))
assert_false(hasattr(r, "n_classes_"))
def test_regressor_attributes():
for name in FOREST_REGRESSORS:
yield check_regressor_attributes, name
def check_probability(name):
# Predict probabilities.
ForestClassifier = FOREST_CLASSIFIERS[name]
with np.errstate(divide="ignore"):
clf = ForestClassifier(n_estimators=10, random_state=1, max_features=1,
max_depth=1)
clf.fit(iris.data, iris.target)
assert_array_almost_equal(np.sum(clf.predict_proba(iris.data), axis=1),
np.ones(iris.data.shape[0]))
assert_array_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)))
def test_probability():
for name in FOREST_CLASSIFIERS:
yield check_probability, name
def check_importances(X, y, name, criterion):
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(n_estimators=20, criterion=criterion,
random_state=0)
est.fit(X, y)
importances = est.feature_importances_
n_important = np.sum(importances > 0.1)
assert_equal(importances.shape[0], 10)
assert_equal(n_important, 3)
X_new = est.transform(X, threshold="mean")
assert_less(X_new.shape[1], X.shape[1])
# Check with parallel
importances = est.feature_importances_
est.set_params(n_jobs=2)
importances_parrallel = est.feature_importances_
assert_array_almost_equal(importances, importances_parrallel)
# Check with sample weights
sample_weight = check_random_state(0).randint(1, 10, len(X))
est = ForestEstimator(n_estimators=20, random_state=0,
criterion=criterion)
est.fit(X, y, sample_weight=sample_weight)
importances = est.feature_importances_
assert_true(np.all(importances >= 0.0))
for scale in [0.5, 10, 100]:
est = ForestEstimator(n_estimators=20, random_state=0,
criterion=criterion)
est.fit(X, y, sample_weight=scale * sample_weight)
importances_bis = est.feature_importances_
assert_less(np.abs(importances - importances_bis).mean(), 0.001)
def test_importances():
X, y = datasets.make_classification(n_samples=500, n_features=10,
n_informative=3, n_redundant=0,
n_repeated=0, shuffle=False,
random_state=0)
for name, criterion in product(FOREST_CLASSIFIERS, ["gini", "entropy"]):
yield check_importances, X, y, name, criterion
for name, criterion in product(FOREST_REGRESSORS, ["mse", "friedman_mse"]):
yield check_importances, X, y, name, criterion
def test_importances_asymptotic():
# Check whether variable importances of totally randomized trees
# converge towards their theoretical values (See Louppe et al,
# Understanding variable importances in forests of randomized trees, 2013).
def binomial(k, n):
return 0 if k < 0 or k > n else comb(int(n), int(k), exact=True)
def entropy(samples):
n_samples = len(samples)
entropy = 0.
for count in bincount(samples):
p = 1. * count / n_samples
if p > 0:
entropy -= p * np.log2(p)
return entropy
def mdi_importance(X_m, X, y):
n_samples, n_features = X.shape
features = list(range(n_features))
features.pop(X_m)
values = [np.unique(X[:, i]) for i in range(n_features)]
imp = 0.
for k in range(n_features):
# Weight of each B of size k
coef = 1. / (binomial(k, n_features) * (n_features - k))
# For all B of size k
for B in combinations(features, k):
# For all values B=b
for b in product(*[values[B[j]] for j in range(k)]):
mask_b = np.ones(n_samples, dtype=np.bool)
for j in range(k):
mask_b &= X[:, B[j]] == b[j]
X_, y_ = X[mask_b, :], y[mask_b]
n_samples_b = len(X_)
if n_samples_b > 0:
children = []
for xi in values[X_m]:
mask_xi = X_[:, X_m] == xi
children.append(y_[mask_xi])
imp += (coef
* (1. * n_samples_b / n_samples) # P(B=b)
* (entropy(y_) -
sum([entropy(c) * len(c) / n_samples_b
for c in children])))
return imp
data = np.array([[0, 0, 1, 0, 0, 1, 0, 1],
[1, 0, 1, 1, 1, 0, 1, 2],
[1, 0, 1, 1, 0, 1, 1, 3],
[0, 1, 1, 1, 0, 1, 0, 4],
[1, 1, 0, 1, 0, 1, 1, 5],
[1, 1, 0, 1, 1, 1, 1, 6],
[1, 0, 1, 0, 0, 1, 0, 7],
[1, 1, 1, 1, 1, 1, 1, 8],
[1, 1, 1, 1, 0, 1, 1, 9],
[1, 1, 1, 0, 1, 1, 1, 0]])
X, y = np.array(data[:, :7], dtype=np.bool), data[:, 7]
n_features = X.shape[1]
# Compute true importances
true_importances = np.zeros(n_features)
for i in range(n_features):
true_importances[i] = mdi_importance(i, X, y)
# Estimate importances with totally randomized trees
clf = ExtraTreesClassifier(n_estimators=500,
max_features=1,
criterion="entropy",
random_state=0).fit(X, y)
importances = sum(tree.tree_.compute_feature_importances(normalize=False)
for tree in clf.estimators_) / clf.n_estimators
# Check correctness
assert_almost_equal(entropy(y), sum(importances))
assert_less(np.abs(true_importances - importances).mean(), 0.01)
def check_unfitted_feature_importances(name):
assert_raises(ValueError, getattr, FOREST_ESTIMATORS[name](random_state=0),
"feature_importances_")
def test_unfitted_feature_importances():
for name in FOREST_ESTIMATORS:
yield check_unfitted_feature_importances, name
def check_oob_score(name, X, y, n_estimators=20):
# Check that oob prediction is a good estimation of the generalization
# error.
# Proper behavior
est = FOREST_ESTIMATORS[name](oob_score=True, random_state=0,
n_estimators=n_estimators, bootstrap=True)
n_samples = X.shape[0]
est.fit(X[:n_samples // 2, :], y[:n_samples // 2])
test_score = est.score(X[n_samples // 2:, :], y[n_samples // 2:])
if name in FOREST_CLASSIFIERS:
assert_less(abs(test_score - est.oob_score_), 0.1)
else:
assert_greater(test_score, est.oob_score_)
assert_greater(est.oob_score_, .8)
# Check warning if not enough estimators
with np.errstate(divide="ignore", invalid="ignore"):
est = FOREST_ESTIMATORS[name](oob_score=True, random_state=0,
n_estimators=1, bootstrap=True)
assert_warns(UserWarning, est.fit, X, y)
def test_oob_score():
for name in FOREST_CLASSIFIERS:
yield check_oob_score, name, iris.data, iris.target
# csc matrix
yield check_oob_score, name, csc_matrix(iris.data), iris.target
# non-contiguous targets in classification
yield check_oob_score, name, iris.data, iris.target * 2 + 1
for name in FOREST_REGRESSORS:
yield check_oob_score, name, boston.data, boston.target, 50
# csc matrix
yield check_oob_score, name, csc_matrix(boston.data), boston.target, 50
def check_oob_score_raise_error(name):
ForestEstimator = FOREST_ESTIMATORS[name]
if name in FOREST_TRANSFORMERS:
for oob_score in [True, False]:
assert_raises(TypeError, ForestEstimator, oob_score=oob_score)
assert_raises(NotImplementedError, ForestEstimator()._set_oob_score,
X, y)
else:
# Unfitted / no bootstrap / no oob_score
for oob_score, bootstrap in [(True, False), (False, True),
(False, False)]:
est = ForestEstimator(oob_score=oob_score, bootstrap=bootstrap,
random_state=0)
assert_false(hasattr(est, "oob_score_"))
# No bootstrap
assert_raises(ValueError, ForestEstimator(oob_score=True,
bootstrap=False).fit, X, y)
def test_oob_score_raise_error():
for name in FOREST_ESTIMATORS:
yield check_oob_score_raise_error, name
def check_gridsearch(name):
forest = FOREST_CLASSIFIERS[name]()
clf = GridSearchCV(forest, {'n_estimators': (1, 2), 'max_depth': (1, 2)})
clf.fit(iris.data, iris.target)
def test_gridsearch():
# Check that base trees can be grid-searched.
for name in FOREST_CLASSIFIERS:
yield check_gridsearch, name
def check_parallel(name, X, y):
"""Check parallel computations in classification"""
ForestEstimator = FOREST_ESTIMATORS[name]
forest = ForestEstimator(n_estimators=10, n_jobs=3, random_state=0)
forest.fit(X, y)
assert_equal(len(forest), 10)
forest.set_params(n_jobs=1)
y1 = forest.predict(X)
forest.set_params(n_jobs=2)
y2 = forest.predict(X)
assert_array_almost_equal(y1, y2, 3)
def test_parallel():
for name in FOREST_CLASSIFIERS:
yield check_parallel, name, iris.data, iris.target
for name in FOREST_REGRESSORS:
yield check_parallel, name, boston.data, boston.target
def check_pickle(name, X, y):
# Check pickability.
ForestEstimator = FOREST_ESTIMATORS[name]
obj = ForestEstimator(random_state=0)
obj.fit(X, y)
score = obj.score(X, y)
pickle_object = pickle.dumps(obj)
obj2 = pickle.loads(pickle_object)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(X, y)
assert_equal(score, score2)
def test_pickle():
for name in FOREST_CLASSIFIERS:
yield check_pickle, name, iris.data[::2], iris.target[::2]
for name in FOREST_REGRESSORS:
yield check_pickle, name, boston.data[::2], boston.target[::2]
def check_multioutput(name):
# Check estimators on multi-output problems.
X_train = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1], [-2, 1],
[-1, 1], [-1, 2], [2, -1], [1, -1], [1, -2]]
y_train = [[-1, 0], [-1, 0], [-1, 0], [1, 1], [1, 1], [1, 1], [-1, 2],
[-1, 2], [-1, 2], [1, 3], [1, 3], [1, 3]]
X_test = [[-1, -1], [1, 1], [-1, 1], [1, -1]]
y_test = [[-1, 0], [1, 1], [-1, 2], [1, 3]]
est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False)
y_pred = est.fit(X_train, y_train).predict(X_test)
assert_array_almost_equal(y_pred, y_test)
if name in FOREST_CLASSIFIERS:
with np.errstate(divide="ignore"):
proba = est.predict_proba(X_test)
assert_equal(len(proba), 2)
assert_equal(proba[0].shape, (4, 2))
assert_equal(proba[1].shape, (4, 4))
log_proba = est.predict_log_proba(X_test)
assert_equal(len(log_proba), 2)
assert_equal(log_proba[0].shape, (4, 2))
assert_equal(log_proba[1].shape, (4, 4))
def test_multioutput():
for name in FOREST_CLASSIFIERS:
yield check_multioutput, name
for name in FOREST_REGRESSORS:
yield check_multioutput, name
def check_classes_shape(name):
# Test that n_classes_ and classes_ have proper shape.
ForestClassifier = FOREST_CLASSIFIERS[name]
# Classification, single output
clf = ForestClassifier(random_state=0).fit(X, y)
assert_equal(clf.n_classes_, 2)
assert_array_equal(clf.classes_, [-1, 1])
# Classification, multi-output
_y = np.vstack((y, np.array(y) * 2)).T
clf = ForestClassifier(random_state=0).fit(X, _y)
assert_array_equal(clf.n_classes_, [2, 2])
assert_array_equal(clf.classes_, [[-1, 1], [-2, 2]])
def test_classes_shape():
for name in FOREST_CLASSIFIERS:
yield check_classes_shape, name
def test_random_trees_dense_type():
# Test that the `sparse_output` parameter of RandomTreesEmbedding
# works by returning a dense array.
# Create the RTE with sparse=False
hasher = RandomTreesEmbedding(n_estimators=10, sparse_output=False)
X, y = datasets.make_circles(factor=0.5)
X_transformed = hasher.fit_transform(X)
# Assert that type is ndarray, not scipy.sparse.csr.csr_matrix
assert_equal(type(X_transformed), np.ndarray)
def test_random_trees_dense_equal():
# Test that the `sparse_output` parameter of RandomTreesEmbedding
# works by returning the same array for both argument values.
# Create the RTEs
hasher_dense = RandomTreesEmbedding(n_estimators=10, sparse_output=False,
random_state=0)
hasher_sparse = RandomTreesEmbedding(n_estimators=10, sparse_output=True,
random_state=0)
X, y = datasets.make_circles(factor=0.5)
X_transformed_dense = hasher_dense.fit_transform(X)
X_transformed_sparse = hasher_sparse.fit_transform(X)
# Assert that dense and sparse hashers have same array.
assert_array_equal(X_transformed_sparse.toarray(), X_transformed_dense)
def test_random_hasher():
# test random forest hashing on circles dataset
# make sure that it is linearly separable.
# even after projected to two SVD dimensions
# Note: Not all random_states produce perfect results.
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
X, y = datasets.make_circles(factor=0.5)
X_transformed = hasher.fit_transform(X)
# test fit and transform:
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
assert_array_equal(hasher.fit(X).transform(X).toarray(),
X_transformed.toarray())
# one leaf active per data point per forest
assert_equal(X_transformed.shape[0], X.shape[0])
assert_array_equal(X_transformed.sum(axis=1), hasher.n_estimators)
svd = TruncatedSVD(n_components=2)
X_reduced = svd.fit_transform(X_transformed)
linear_clf = LinearSVC()
linear_clf.fit(X_reduced, y)
assert_equal(linear_clf.score(X_reduced, y), 1.)
def test_random_hasher_sparse_data():
X, y = datasets.make_multilabel_classification(random_state=0)
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
X_transformed = hasher.fit_transform(X)
X_transformed_sparse = hasher.fit_transform(csc_matrix(X))
assert_array_equal(X_transformed_sparse.toarray(), X_transformed.toarray())
def test_parallel_train():
rng = check_random_state(12321)
n_samples, n_features = 80, 30
X_train = rng.randn(n_samples, n_features)
y_train = rng.randint(0, 2, n_samples)
clfs = [
RandomForestClassifier(n_estimators=20, n_jobs=n_jobs,
random_state=12345).fit(X_train, y_train)
for n_jobs in [1, 2, 3, 8, 16, 32]
]
X_test = rng.randn(n_samples, n_features)
probas = [clf.predict_proba(X_test) for clf in clfs]
for proba1, proba2 in zip(probas, probas[1:]):
assert_array_almost_equal(proba1, proba2)
def test_distribution():
rng = check_random_state(12321)
# Single variable with 4 values
X = rng.randint(0, 4, size=(1000, 1))
y = rng.rand(1000)
n_trees = 500
clf = ExtraTreesRegressor(n_estimators=n_trees, random_state=42).fit(X, y)
uniques = defaultdict(int)
for tree in clf.estimators_:
tree = "".join(("%d,%d/" % (f, int(t)) if f >= 0 else "-")
for f, t in zip(tree.tree_.feature,
tree.tree_.threshold))
uniques[tree] += 1
uniques = sorted([(1. * count / n_trees, tree)
for tree, count in uniques.items()])
# On a single variable problem where X_0 has 4 equiprobable values, there
# are 5 ways to build a random tree. The more compact (0,1/0,0/--0,2/--) of
# them has probability 1/3 while the 4 others have probability 1/6.
assert_equal(len(uniques), 5)
assert_greater(0.20, uniques[0][0]) # Rough approximation of 1/6.
assert_greater(0.20, uniques[1][0])
assert_greater(0.20, uniques[2][0])
assert_greater(0.20, uniques[3][0])
assert_greater(uniques[4][0], 0.3)
assert_equal(uniques[4][1], "0,1/0,0/--0,2/--")
# Two variables, one with 2 values, one with 3 values
X = np.empty((1000, 2))
X[:, 0] = np.random.randint(0, 2, 1000)
X[:, 1] = np.random.randint(0, 3, 1000)
y = rng.rand(1000)
clf = ExtraTreesRegressor(n_estimators=100, max_features=1,
random_state=1).fit(X, y)
uniques = defaultdict(int)
for tree in clf.estimators_:
tree = "".join(("%d,%d/" % (f, int(t)) if f >= 0 else "-")
for f, t in zip(tree.tree_.feature,
tree.tree_.threshold))
uniques[tree] += 1
uniques = [(count, tree) for tree, count in uniques.items()]
assert_equal(len(uniques), 8)
def check_max_leaf_nodes_max_depth(name, X, y):
# Test precedence of max_leaf_nodes over max_depth.
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(max_depth=1, max_leaf_nodes=4,
n_estimators=1).fit(X, y)
assert_greater(est.estimators_[0].tree_.max_depth, 1)
est = ForestEstimator(max_depth=1, n_estimators=1).fit(X, y)
assert_equal(est.estimators_[0].tree_.max_depth, 1)
def test_max_leaf_nodes_max_depth():
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for name in FOREST_ESTIMATORS:
yield check_max_leaf_nodes_max_depth, name, X, y
def check_min_samples_leaf(name, X, y):
# Test if leaves contain more than leaf_count training examples
ForestEstimator = FOREST_ESTIMATORS[name]
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes in (None, 1000):
est = ForestEstimator(min_samples_leaf=5,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
out = est.estimators_[0].tree_.apply(X)
node_counts = bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), 4,
"Failed with {0}".format(name))
def test_min_samples_leaf():
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
X = X.astype(np.float32)
for name in FOREST_ESTIMATORS:
yield check_min_samples_leaf, name, X, y
def check_min_weight_fraction_leaf(name, X, y):
# Test if leaves contain at least min_weight_fraction_leaf of the
# training set
ForestEstimator = FOREST_ESTIMATORS[name]
rng = np.random.RandomState(0)
weights = rng.rand(X.shape[0])
total_weight = np.sum(weights)
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes in (None, 1000):
for frac in np.linspace(0, 0.5, 6):
est = ForestEstimator(min_weight_fraction_leaf=frac,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
if isinstance(est, (RandomForestClassifier,
RandomForestRegressor)):
est.bootstrap = False
est.fit(X, y, sample_weight=weights)
out = est.estimators_[0].tree_.apply(X)
node_weights = bincount(out, weights=weights)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert_greater_equal(
np.min(leaf_weights),
total_weight * est.min_weight_fraction_leaf,
"Failed with {0} "
"min_weight_fraction_leaf={1}".format(
name, est.min_weight_fraction_leaf))
def test_min_weight_fraction_leaf():
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
X = X.astype(np.float32)
for name in FOREST_ESTIMATORS:
yield check_min_weight_fraction_leaf, name, X, y
def check_sparse_input(name, X, X_sparse, y):
ForestEstimator = FOREST_ESTIMATORS[name]
dense = ForestEstimator(random_state=0, max_depth=2).fit(X, y)
sparse = ForestEstimator(random_state=0, max_depth=2).fit(X_sparse, y)
assert_array_almost_equal(sparse.apply(X), dense.apply(X))
if name in FOREST_CLASSIFIERS or name in FOREST_REGRESSORS:
assert_array_almost_equal(sparse.predict(X), dense.predict(X))
assert_array_almost_equal(sparse.feature_importances_,
dense.feature_importances_)
if name in FOREST_CLASSIFIERS:
assert_array_almost_equal(sparse.predict_proba(X),
dense.predict_proba(X))
assert_array_almost_equal(sparse.predict_log_proba(X),
dense.predict_log_proba(X))
if name in FOREST_TRANSFORMERS:
assert_array_almost_equal(sparse.transform(X).toarray(),
dense.transform(X).toarray())
assert_array_almost_equal(sparse.fit_transform(X).toarray(),
dense.fit_transform(X).toarray())
def test_sparse_input():
X, y = datasets.make_multilabel_classification(random_state=0,
n_samples=50)
for name, sparse_matrix in product(FOREST_ESTIMATORS,
(csr_matrix, csc_matrix, coo_matrix)):
yield check_sparse_input, name, X, sparse_matrix(X), y
def check_memory_layout(name, dtype):
# Check that it works no matter the memory layout
est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False)
# Nothing
X = np.asarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# C-order
X = np.asarray(iris.data, order="C", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# F-order
X = np.asarray(iris.data, order="F", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Contiguous
X = np.ascontiguousarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
if est.base_estimator.splitter in SPARSE_SPLITTERS:
# csr matrix
X = csr_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# csc_matrix
X = csc_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# coo_matrix
X = coo_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Strided
X = np.asarray(iris.data[::3], dtype=dtype)
y = iris.target[::3]
assert_array_equal(est.fit(X, y).predict(X), y)
def test_memory_layout():
for name, dtype in product(FOREST_CLASSIFIERS, [np.float64, np.float32]):
yield check_memory_layout, name, dtype
for name, dtype in product(FOREST_REGRESSORS, [np.float64, np.float32]):
yield check_memory_layout, name, dtype
@ignore_warnings
def check_1d_input(name, X, X_2d, y):
ForestEstimator = FOREST_ESTIMATORS[name]
assert_raises(ValueError, ForestEstimator(random_state=0).fit, X, y)
est = ForestEstimator(random_state=0)
est.fit(X_2d, y)
if name in FOREST_CLASSIFIERS or name in FOREST_REGRESSORS:
assert_raises(ValueError, est.predict, X)
@ignore_warnings
def test_1d_input():
X = iris.data[:, 0]
X_2d = iris.data[:, 0].reshape((-1, 1))
y = iris.target
for name in FOREST_ESTIMATORS:
yield check_1d_input, name, X, X_2d, y
def check_class_weights(name):
# Check class_weights resemble sample_weights behavior.
ForestClassifier = FOREST_CLASSIFIERS[name]
# Iris is balanced, so no effect expected for using 'balanced' weights
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target)
clf2 = ForestClassifier(class_weight='balanced', random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Make a multi-output problem with three copies of Iris
iris_multi = np.vstack((iris.target, iris.target, iris.target)).T
# Create user-defined weights that should balance over the outputs
clf3 = ForestClassifier(class_weight=[{0: 2., 1: 2., 2: 1.},
{0: 2., 1: 1., 2: 2.},
{0: 1., 1: 2., 2: 2.}],
random_state=0)
clf3.fit(iris.data, iris_multi)
assert_almost_equal(clf2.feature_importances_, clf3.feature_importances_)
# Check against multi-output "balanced" which should also have no effect
clf4 = ForestClassifier(class_weight='balanced', random_state=0)
clf4.fit(iris.data, iris_multi)
assert_almost_equal(clf3.feature_importances_, clf4.feature_importances_)
# Inflate importance of class 1, check against user-defined weights
sample_weight = np.ones(iris.target.shape)
sample_weight[iris.target == 1] *= 100
class_weight = {0: 1., 1: 100., 2: 1.}
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight)
clf2 = ForestClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Check that sample_weight and class_weight are multiplicative
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight ** 2)
clf2 = ForestClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target, sample_weight)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
def test_class_weights():
for name in FOREST_CLASSIFIERS:
yield check_class_weights, name
def check_class_weight_balanced_and_bootstrap_multi_output(name):
# Test class_weight works for multi-output"""
ForestClassifier = FOREST_CLASSIFIERS[name]
_y = np.vstack((y, np.array(y) * 2)).T
clf = ForestClassifier(class_weight='balanced', random_state=0)
clf.fit(X, _y)
clf = ForestClassifier(class_weight=[{-1: 0.5, 1: 1.}, {-2: 1., 2: 1.}],
random_state=0)
clf.fit(X, _y)
# smoke test for subsample and balanced subsample
clf = ForestClassifier(class_weight='balanced_subsample', random_state=0)
clf.fit(X, _y)
clf = ForestClassifier(class_weight='subsample', random_state=0)
ignore_warnings(clf.fit)(X, _y)
def test_class_weight_balanced_and_bootstrap_multi_output():
for name in FOREST_CLASSIFIERS:
yield check_class_weight_balanced_and_bootstrap_multi_output, name
def check_class_weight_errors(name):
# Test if class_weight raises errors and warnings when expected.
ForestClassifier = FOREST_CLASSIFIERS[name]
_y = np.vstack((y, np.array(y) * 2)).T
# Invalid preset string
clf = ForestClassifier(class_weight='the larch', random_state=0)
assert_raises(ValueError, clf.fit, X, y)
assert_raises(ValueError, clf.fit, X, _y)
# Warning warm_start with preset
clf = ForestClassifier(class_weight='auto', warm_start=True,
random_state=0)
assert_warns(UserWarning, clf.fit, X, y)
assert_warns(UserWarning, clf.fit, X, _y)
# Not a list or preset for multi-output
clf = ForestClassifier(class_weight=1, random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
# Incorrect length list for multi-output
clf = ForestClassifier(class_weight=[{-1: 0.5, 1: 1.}], random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
def test_class_weight_errors():
for name in FOREST_CLASSIFIERS:
yield check_class_weight_errors, name
def check_warm_start(name, random_state=42):
# Test if fitting incrementally with warm start gives a forest of the
# right size and the same results as a normal fit.
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
clf_ws = None
for n_estimators in [5, 10]:
if clf_ws is None:
clf_ws = ForestEstimator(n_estimators=n_estimators,
random_state=random_state,
warm_start=True)
else:
clf_ws.set_params(n_estimators=n_estimators)
clf_ws.fit(X, y)
assert_equal(len(clf_ws), n_estimators)
clf_no_ws = ForestEstimator(n_estimators=10, random_state=random_state,
warm_start=False)
clf_no_ws.fit(X, y)
assert_equal(set([tree.random_state for tree in clf_ws]),
set([tree.random_state for tree in clf_no_ws]))
assert_array_equal(clf_ws.apply(X), clf_no_ws.apply(X),
err_msg="Failed with {0}".format(name))
def test_warm_start():
for name in FOREST_ESTIMATORS:
yield check_warm_start, name
def check_warm_start_clear(name):
# Test if fit clears state and grows a new forest when warm_start==False.
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=1, warm_start=False,
random_state=1)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=1, warm_start=True,
random_state=2)
clf_2.fit(X, y) # inits state
clf_2.set_params(warm_start=False, random_state=1)
clf_2.fit(X, y) # clears old state and equals clf
assert_array_almost_equal(clf_2.apply(X), clf.apply(X))
def test_warm_start_clear():
for name in FOREST_ESTIMATORS:
yield check_warm_start_clear, name
def check_warm_start_smaller_n_estimators(name):
# Test if warm start second fit with smaller n_estimators raises error.
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=1, warm_start=True)
clf.fit(X, y)
clf.set_params(n_estimators=4)
assert_raises(ValueError, clf.fit, X, y)
def test_warm_start_smaller_n_estimators():
for name in FOREST_ESTIMATORS:
yield check_warm_start_smaller_n_estimators, name
def check_warm_start_equal_n_estimators(name):
# Test if warm start with equal n_estimators does nothing and returns the
# same forest and raises a warning.
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=3, warm_start=True,
random_state=1)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=3, warm_start=True,
random_state=1)
clf_2.fit(X, y)
# Now clf_2 equals clf.
clf_2.set_params(random_state=2)
assert_warns(UserWarning, clf_2.fit, X, y)
# If we had fit the trees again we would have got a different forest as we
# changed the random state.
assert_array_equal(clf.apply(X), clf_2.apply(X))
def test_warm_start_equal_n_estimators():
for name in FOREST_ESTIMATORS:
yield check_warm_start_equal_n_estimators, name
def check_warm_start_oob(name):
# Test that the warm start computes oob score when asked.
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
# Use 15 estimators to avoid 'some inputs do not have OOB scores' warning.
clf = ForestEstimator(n_estimators=15, max_depth=3, warm_start=False,
random_state=1, bootstrap=True, oob_score=True)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=3, warm_start=False,
random_state=1, bootstrap=True, oob_score=False)
clf_2.fit(X, y)
clf_2.set_params(warm_start=True, oob_score=True, n_estimators=15)
clf_2.fit(X, y)
assert_true(hasattr(clf_2, 'oob_score_'))
assert_equal(clf.oob_score_, clf_2.oob_score_)
# Test that oob_score is computed even if we don't need to train
# additional trees.
clf_3 = ForestEstimator(n_estimators=15, max_depth=3, warm_start=True,
random_state=1, bootstrap=True, oob_score=False)
clf_3.fit(X, y)
assert_true(not(hasattr(clf_3, 'oob_score_')))
clf_3.set_params(oob_score=True)
ignore_warnings(clf_3.fit)(X, y)
assert_equal(clf.oob_score_, clf_3.oob_score_)
def test_warm_start_oob():
for name in FOREST_CLASSIFIERS:
yield check_warm_start_oob, name
for name in FOREST_REGRESSORS:
yield check_warm_start_oob, name
def test_dtype_convert(n_classes=15):
classifier = RandomForestClassifier(random_state=0, bootstrap=False)
X = np.eye(n_classes)
y = [ch for ch in 'ABCDEFGHIJKLMNOPQRSTU'[:n_classes]]
result = classifier.fit(X, y).predict(X)
assert_array_equal(classifier.classes_, y)
assert_array_equal(result, y)
| bsd-3-clause |
jseabold/statsmodels | statsmodels/tsa/arima_process.py | 5 | 28643 | """ARMA process and estimation with scipy.signal.lfilter
Notes
-----
* written without textbook, works but not sure about everything
briefly checked and it looks to be standard least squares, see below
* theoretical autocorrelation function of general ARMA
Done, relatively easy to guess solution, time consuming to get
theoretical test cases, example file contains explicit formulas for
acovf of MA(1), MA(2) and ARMA(1,1)
Properties:
Judge, ... (1985): The Theory and Practise of Econometrics
Author: josefpktd
License: BSD
"""
import numpy as np
from scipy import signal, optimize, linalg
from statsmodels.compat.pandas import Appender
from statsmodels.tools.docstring import remove_parameters, Docstring
from statsmodels.tools.validation import array_like
__all__ = ['arma_acf', 'arma_acovf', 'arma_generate_sample',
'arma_impulse_response', 'arma2ar', 'arma2ma', 'deconvolve',
'lpol2index', 'index2lpol']
NONSTATIONARY_ERROR = """\
The model's autoregressive parameters (ar) indicate that the process
is non-stationary. arma_acovf can only be used with stationary processes.
"""
def arma_generate_sample(ar, ma, nsample, scale=1, distrvs=None,
axis=0, burnin=0):
"""
Simulate data from an ARMA.
Parameters
----------
ar : array_like
The coefficient for autoregressive lag polynomial, including zero lag.
ma : array_like
The coefficient for moving-average lag polynomial, including zero lag.
nsample : int or tuple of ints
If nsample is an integer, then this creates a 1d timeseries of
length size. If nsample is a tuple, creates a len(nsample)
dimensional time series where time is indexed along the input
variable ``axis``. All series are unless ``distrvs`` generates
dependent data.
scale : float
The standard deviation of noise.
distrvs : function, random number generator
A function that generates the random numbers, and takes ``size``
as argument. The default is np.random.standard_normal.
axis : int
See nsample for details.
burnin : int
Number of observation at the beginning of the sample to drop.
Used to reduce dependence on initial values.
Returns
-------
ndarray
Random sample(s) from an ARMA process.
Notes
-----
As mentioned above, both the AR and MA components should include the
coefficient on the zero-lag. This is typically 1. Further, due to the
conventions used in signal processing used in signal.lfilter vs.
conventions in statistics for ARMA processes, the AR parameters should
have the opposite sign of what you might expect. See the examples below.
Examples
--------
>>> import numpy as np
>>> np.random.seed(12345)
>>> arparams = np.array([.75, -.25])
>>> maparams = np.array([.65, .35])
>>> ar = np.r_[1, -arparams] # add zero-lag and negate
>>> ma = np.r_[1, maparams] # add zero-lag
>>> y = sm.tsa.arma_generate_sample(ar, ma, 250)
>>> model = sm.tsa.ARMA(y, (2, 2)).fit(trend='nc', disp=0)
>>> model.params
array([ 0.79044189, -0.23140636, 0.70072904, 0.40608028])
"""
distrvs = np.random.standard_normal if distrvs is None else distrvs
if np.ndim(nsample) == 0:
nsample = [nsample]
if burnin:
# handle burin time for nd arrays
# maybe there is a better trick in scipy.fft code
newsize = list(nsample)
newsize[axis] += burnin
newsize = tuple(newsize)
fslice = [slice(None)] * len(newsize)
fslice[axis] = slice(burnin, None, None)
fslice = tuple(fslice)
else:
newsize = tuple(nsample)
fslice = tuple([slice(None)] * np.ndim(newsize))
eta = scale * distrvs(size=newsize)
return signal.lfilter(ma, ar, eta, axis=axis)[fslice]
def arma_acovf(ar, ma, nobs=10, sigma2=1, dtype=None):
"""
Theoretical autocovariances of stationary ARMA processes
Parameters
----------
ar : array_like, 1d
The coefficients for autoregressive lag polynomial, including zero lag.
ma : array_like, 1d
The coefficients for moving-average lag polynomial, including zero lag.
nobs : int
The number of terms (lags plus zero lag) to include in returned acovf.
sigma2 : float
Variance of the innovation term.
Returns
-------
ndarray
The autocovariance of ARMA process given by ar, ma.
See Also
--------
arma_acf : Autocorrelation function for ARMA processes.
acovf : Sample autocovariance estimation.
References
----------
.. [*] Brockwell, Peter J., and Richard A. Davis. 2009. Time Series:
Theory and Methods. 2nd ed. 1991. New York, NY: Springer.
"""
if dtype is None:
dtype = np.common_type(np.array(ar), np.array(ma), np.array(sigma2))
p = len(ar) - 1
q = len(ma) - 1
m = max(p, q) + 1
if sigma2.real < 0:
raise ValueError('Must have positive innovation variance.')
# Short-circuit for trivial corner-case
if p == q == 0:
out = np.zeros(nobs, dtype=dtype)
out[0] = sigma2
return out
elif p > 0 and np.max(np.abs(np.roots(ar))) >= 1:
raise ValueError(NONSTATIONARY_ERROR)
# Get the moving average representation coefficients that we need
ma_coeffs = arma2ma(ar, ma, lags=m)
# Solve for the first m autocovariances via the linear system
# described by (BD, eq. 3.3.8)
A = np.zeros((m, m), dtype=dtype)
b = np.zeros((m, 1), dtype=dtype)
# We need a zero-right-padded version of ar params
tmp_ar = np.zeros(m, dtype=dtype)
tmp_ar[:p + 1] = ar
for k in range(m):
A[k, :(k + 1)] = tmp_ar[:(k + 1)][::-1]
A[k, 1:m - k] += tmp_ar[(k + 1):m]
b[k] = sigma2 * np.dot(ma[k:q + 1], ma_coeffs[:max((q + 1 - k), 0)])
acovf = np.zeros(max(nobs, m), dtype=dtype)
try:
acovf[:m] = np.linalg.solve(A, b)[:, 0]
except np.linalg.LinAlgError:
raise ValueError(NONSTATIONARY_ERROR)
# Iteratively apply (BD, eq. 3.3.9) to solve for remaining autocovariances
if nobs > m:
zi = signal.lfiltic([1], ar, acovf[:m:][::-1])
acovf[m:] = signal.lfilter([1], ar, np.zeros(nobs - m, dtype=dtype),
zi=zi)[0]
return acovf[:nobs]
def arma_acf(ar, ma, lags=10):
"""
Theoretical autocorrelation function of an ARMA process.
Parameters
----------
ar : array_like
Coefficients for autoregressive lag polynomial, including zero lag.
ma : array_like
Coefficients for moving-average lag polynomial, including zero lag.
lags : int
The number of terms (lags plus zero lag) to include in returned acf.
Returns
-------
ndarray
The autocorrelations of ARMA process given by ar and ma.
See Also
--------
arma_acovf : Autocovariances from ARMA processes.
acf : Sample autocorrelation function estimation.
acovf : Sample autocovariance function estimation.
"""
acovf = arma_acovf(ar, ma, lags)
return acovf / acovf[0]
def arma_pacf(ar, ma, lags=10):
"""
Theoretical partial autocorrelation function of an ARMA process.
Parameters
----------
ar : array_like, 1d
The coefficients for autoregressive lag polynomial, including zero lag.
ma : array_like, 1d
The coefficients for moving-average lag polynomial, including zero lag.
lags : int
The number of terms (lags plus zero lag) to include in returned pacf.
Returns
-------
ndarrray
The partial autocorrelation of ARMA process given by ar and ma.
Notes
-----
Solves yule-walker equation for each lag order up to nobs lags.
not tested/checked yet
"""
# TODO: Should use rank 1 inverse update
apacf = np.zeros(lags)
acov = arma_acf(ar, ma, lags=lags + 1)
apacf[0] = 1.
for k in range(2, lags + 1):
r = acov[:k]
apacf[k - 1] = linalg.solve(linalg.toeplitz(r[:-1]), r[1:])[-1]
return apacf
def arma_periodogram(ar, ma, worN=None, whole=0):
"""
Periodogram for ARMA process given by lag-polynomials ar and ma.
Parameters
----------
ar : array_like
The autoregressive lag-polynomial with leading 1 and lhs sign.
ma : array_like
The moving average lag-polynomial with leading 1.
worN : {None, int}, optional
An option for scipy.signal.freqz (read "w or N").
If None, then compute at 512 frequencies around the unit circle.
If a single integer, the compute at that many frequencies.
Otherwise, compute the response at frequencies given in worN.
whole : {0,1}, optional
An options for scipy.signal.freqz/
Normally, frequencies are computed from 0 to pi (upper-half of
unit-circle. If whole is non-zero compute frequencies from 0 to 2*pi.
Returns
-------
w : ndarray
The frequencies.
sd : ndarray
The periodogram, also known as the spectral density.
Notes
-----
Normalization ?
This uses signal.freqz, which does not use fft. There is a fft version
somewhere.
"""
w, h = signal.freqz(ma, ar, worN=worN, whole=whole)
sd = np.abs(h) ** 2 / np.sqrt(2 * np.pi)
if np.any(np.isnan(h)):
# this happens with unit root or seasonal unit root'
import warnings
warnings.warn('Warning: nan in frequency response h, maybe a unit '
'root', RuntimeWarning)
return w, sd
def arma_impulse_response(ar, ma, leads=100):
"""
Compute the impulse response function (MA representation) for ARMA process.
Parameters
----------
ar : array_like, 1d
The auto regressive lag polynomial.
ma : array_like, 1d
The moving average lag polynomial.
leads : int
The number of observations to calculate.
Returns
-------
ndarray
The impulse response function with nobs elements.
Notes
-----
This is the same as finding the MA representation of an ARMA(p,q).
By reversing the role of ar and ma in the function arguments, the
returned result is the AR representation of an ARMA(p,q), i.e
ma_representation = arma_impulse_response(ar, ma, leads=100)
ar_representation = arma_impulse_response(ma, ar, leads=100)
Fully tested against matlab
Examples
--------
AR(1)
>>> arma_impulse_response([1.0, -0.8], [1.], leads=10)
array([ 1. , 0.8 , 0.64 , 0.512 , 0.4096 ,
0.32768 , 0.262144 , 0.2097152 , 0.16777216, 0.13421773])
this is the same as
>>> 0.8**np.arange(10)
array([ 1. , 0.8 , 0.64 , 0.512 , 0.4096 ,
0.32768 , 0.262144 , 0.2097152 , 0.16777216, 0.13421773])
MA(2)
>>> arma_impulse_response([1.0], [1., 0.5, 0.2], leads=10)
array([ 1. , 0.5, 0.2, 0. , 0. , 0. , 0. , 0. , 0. , 0. ])
ARMA(1,2)
>>> arma_impulse_response([1.0, -0.8], [1., 0.5, 0.2], leads=10)
array([ 1. , 1.3 , 1.24 , 0.992 , 0.7936 ,
0.63488 , 0.507904 , 0.4063232 , 0.32505856, 0.26004685])
"""
impulse = np.zeros(leads)
impulse[0] = 1.
return signal.lfilter(ma, ar, impulse)
def arma2ma(ar, ma, lags=100):
"""
A finite-lag approximate MA representation of an ARMA process.
Parameters
----------
ar : ndarray
The auto regressive lag polynomial.
ma : ndarray
The moving average lag polynomial.
lags : int
The number of coefficients to calculate.
Returns
-------
ndarray
The coefficients of AR lag polynomial with nobs elements.
Notes
-----
Equivalent to ``arma_impulse_response(ma, ar, leads=100)``
"""
return arma_impulse_response(ar, ma, leads=lags)
def arma2ar(ar, ma, lags=100):
"""
A finite-lag AR approximation of an ARMA process.
Parameters
----------
ar : array_like
The auto regressive lag polynomial.
ma : array_like
The moving average lag polynomial.
lags : int
The number of coefficients to calculate.
Returns
-------
ndarray
The coefficients of AR lag polynomial with nobs elements.
Notes
-----
Equivalent to ``arma_impulse_response(ma, ar, leads=100)``
"""
return arma_impulse_response(ma, ar, leads=lags)
# moved from sandbox.tsa.try_fi
def ar2arma(ar_des, p, q, n=20, mse='ar', start=None):
"""
Find arma approximation to ar process.
This finds the ARMA(p,q) coefficients that minimize the integrated
squared difference between the impulse_response functions (MA
representation) of the AR and the ARMA process. This does not check
whether the MA lag polynomial of the ARMA process is invertible, neither
does it check the roots of the AR lag polynomial.
Parameters
----------
ar_des : array_like
The coefficients of original AR lag polynomial, including lag zero.
p : int
The length of desired AR lag polynomials.
q : int
The length of desired MA lag polynomials.
n : int
The number of terms of the impulse_response function to include in the
objective function for the approximation.
mse : str, 'ar'
Not used.
start : ndarray
Initial values to use when finding the approximation.
Returns
-------
ar_app : ndarray
The coefficients of the AR lag polynomials of the approximation.
ma_app : ndarray
The coefficients of the MA lag polynomials of the approximation.
res : tuple
The result of optimize.leastsq.
Notes
-----
Extension is possible if we want to match autocovariance instead
of impulse response function.
"""
# TODO: convert MA lag polynomial, ma_app, to be invertible, by mirroring
# TODO: roots outside the unit interval to ones that are inside. How to do
# TODO: this?
# p,q = pq
def msear_err(arma, ar_des):
ar, ma = np.r_[1, arma[:p - 1]], np.r_[1, arma[p - 1:]]
ar_approx = arma_impulse_response(ma, ar, n)
return (ar_des - ar_approx) # ((ar - ar_approx)**2).sum()
if start is None:
arma0 = np.r_[-0.9 * np.ones(p - 1), np.zeros(q - 1)]
else:
arma0 = start
res = optimize.leastsq(msear_err, arma0, ar_des, maxfev=5000)
arma_app = np.atleast_1d(res[0])
ar_app = np.r_[1, arma_app[:p - 1]],
ma_app = np.r_[1, arma_app[p - 1:]]
return ar_app, ma_app, res
_arma_docs = {'ar': arma2ar.__doc__,
'ma': arma2ma.__doc__}
def lpol2index(ar):
"""
Remove zeros from lag polynomial
Parameters
----------
ar : array_like
coefficients of lag polynomial
Returns
-------
coeffs : ndarray
non-zero coefficients of lag polynomial
index : ndarray
index (lags) of lag polynomial with non-zero elements
"""
ar = array_like(ar, 'ar')
index = np.nonzero(ar)[0]
coeffs = ar[index]
return coeffs, index
def index2lpol(coeffs, index):
"""
Expand coefficients to lag poly
Parameters
----------
coeffs : ndarray
non-zero coefficients of lag polynomial
index : ndarray
index (lags) of lag polynomial with non-zero elements
Returns
-------
ar : array_like
coefficients of lag polynomial
"""
n = max(index)
ar = np.zeros(n + 1)
ar[index] = coeffs
return ar
def lpol_fima(d, n=20):
"""MA representation of fractional integration
.. math:: (1-L)^{-d} for |d|<0.5 or |d|<1 (?)
Parameters
----------
d : float
fractional power
n : int
number of terms to calculate, including lag zero
Returns
-------
ma : ndarray
coefficients of lag polynomial
"""
# hide import inside function until we use this heavily
from scipy.special import gammaln
j = np.arange(n)
return np.exp(gammaln(d + j) - gammaln(j + 1) - gammaln(d))
# moved from sandbox.tsa.try_fi
def lpol_fiar(d, n=20):
"""AR representation of fractional integration
.. math:: (1-L)^{d} for |d|<0.5 or |d|<1 (?)
Parameters
----------
d : float
fractional power
n : int
number of terms to calculate, including lag zero
Returns
-------
ar : ndarray
coefficients of lag polynomial
Notes:
first coefficient is 1, negative signs except for first term,
ar(L)*x_t
"""
# hide import inside function until we use this heavily
from scipy.special import gammaln
j = np.arange(n)
ar = - np.exp(gammaln(-d + j) - gammaln(j + 1) - gammaln(-d))
ar[0] = 1
return ar
# moved from sandbox.tsa.try_fi
def lpol_sdiff(s):
"""return coefficients for seasonal difference (1-L^s)
just a trivial convenience function
Parameters
----------
s : int
number of periods in season
Returns
-------
sdiff : list, length s+1
"""
return [1] + [0] * (s - 1) + [-1]
def deconvolve(num, den, n=None):
"""Deconvolves divisor out of signal, division of polynomials for n terms
calculates den^{-1} * num
Parameters
----------
num : array_like
signal or lag polynomial
denom : array_like
coefficients of lag polynomial (linear filter)
n : None or int
number of terms of quotient
Returns
-------
quot : ndarray
quotient or filtered series
rem : ndarray
remainder
Notes
-----
If num is a time series, then this applies the linear filter den^{-1}.
If both num and den are both lag polynomials, then this calculates the
quotient polynomial for n terms and also returns the remainder.
This is copied from scipy.signal.signaltools and added n as optional
parameter.
"""
num = np.atleast_1d(num)
den = np.atleast_1d(den)
N = len(num)
D = len(den)
if D > N and n is None:
quot = []
rem = num
else:
if n is None:
n = N - D + 1
input = np.zeros(n, float)
input[0] = 1
quot = signal.lfilter(num, den, input)
num_approx = signal.convolve(den, quot, mode='full')
if len(num) < len(num_approx): # 1d only ?
num = np.concatenate((num, np.zeros(len(num_approx) - len(num))))
rem = num - num_approx
return quot, rem
_generate_sample_doc = Docstring(arma_generate_sample.__doc__)
_generate_sample_doc.remove_parameters(['ar', 'ma'])
_generate_sample_doc.replace_block('Notes', [])
_generate_sample_doc.replace_block('Examples', [])
class ArmaProcess(object):
r"""
Theoretical properties of an ARMA process for specified lag-polynomials.
Parameters
----------
ar : array_like
Coefficient for autoregressive lag polynomial, including zero lag.
Must be entered using the signs from the lag polynomial representation.
See the notes for more information about the sign.
ma : array_like
Coefficient for moving-average lag polynomial, including zero lag.
nobs : int, optional
Length of simulated time series. Used, for example, if a sample is
generated. See example.
Notes
-----
Both the AR and MA components must include the coefficient on the
zero-lag. In almost all cases these values should be 1. Further, due to
using the lag-polynomial representation, the AR parameters should
have the opposite sign of what one would write in the ARMA representation.
See the examples below.
The ARMA(p,q) process is described by
.. math::
y_{t}=\phi_{1}y_{t-1}+\ldots+\phi_{p}y_{t-p}+\theta_{1}\epsilon_{t-1}
+\ldots+\theta_{q}\epsilon_{t-q}+\epsilon_{t}
and the parameterization used in this function uses the lag-polynomial
representation,
.. math::
\left(1-\phi_{1}L-\ldots-\phi_{p}L^{p}\right)y_{t} =
\left(1+\theta_{1}L+\ldots+\theta_{q}L^{q}\right)\epsilon_{t}
Examples
--------
ARMA(2,2) with AR coefficients 0.75 and -0.25, and MA coefficients 0.65 and 0.35
>>> import statsmodels.api as sm
>>> import numpy as np
>>> np.random.seed(12345)
>>> arparams = np.array([.75, -.25])
>>> maparams = np.array([.65, .35])
>>> ar = np.r_[1, -arparams] # add zero-lag and negate
>>> ma = np.r_[1, maparams] # add zero-lag
>>> arma_process = sm.tsa.ArmaProcess(ar, ma)
>>> arma_process.isstationary
True
>>> arma_process.isinvertible
True
>>> arma_process.arroots
array([1.5-1.32287566j, 1.5+1.32287566j])
>>> y = arma_process.generate_sample(250)
>>> model = sm.tsa.ARMA(y, (2, 2)).fit(trend='nc', disp=0)
>>> model.params
array([ 0.79044189, -0.23140636, 0.70072904, 0.40608028])
The same ARMA(2,2) Using the from_coeffs class method
>>> arma_process = sm.tsa.ArmaProcess.from_coeffs(arparams, maparams)
>>> arma_process.arroots
array([1.5-1.32287566j, 1.5+1.32287566j])
"""
# TODO: Check unit root behavior
def __init__(self, ar=None, ma=None, nobs=100):
if ar is None:
ar = np.array([1.])
if ma is None:
ma = np.array([1.])
self.ar = array_like(ar, 'ar')
self.ma = array_like(ma, 'ma')
self.arcoefs = -self.ar[1:]
self.macoefs = self.ma[1:]
self.arpoly = np.polynomial.Polynomial(self.ar)
self.mapoly = np.polynomial.Polynomial(self.ma)
self.nobs = nobs
@classmethod
def from_coeffs(cls, arcoefs=None, macoefs=None, nobs=100):
"""
Create ArmaProcess from an ARMA representation.
Parameters
----------
arcoefs : array_like
Coefficient for autoregressive lag polynomial, not including zero
lag. The sign is inverted to conform to the usual time series
representation of an ARMA process in statistics. See the class
docstring for more information.
macoefs : array_like
Coefficient for moving-average lag polynomial, excluding zero lag.
nobs : int, optional
Length of simulated time series. Used, for example, if a sample
is generated.
Returns
-------
ArmaProcess
Class instance initialized with arcoefs and macoefs.
Examples
--------
>>> arparams = [.75, -.25]
>>> maparams = [.65, .35]
>>> arma_process = sm.tsa.ArmaProcess.from_coeffs(ar, ma)
>>> arma_process.isstationary
True
>>> arma_process.isinvertible
True
"""
arcoefs = [] if arcoefs is None else arcoefs
macoefs = [] if macoefs is None else macoefs
return cls(np.r_[1, -np.asarray(arcoefs)],
np.r_[1, np.asarray(macoefs)],
nobs=nobs)
@classmethod
def from_estimation(cls, model_results, nobs=None):
"""
Create an ArmaProcess from the results of an ARMA estimation.
Parameters
----------
model_results : ARMAResults instance
A fitted model.
nobs : int, optional
If None, nobs is taken from the results.
Returns
-------
ArmaProcess
Class instance initialized from model_results.
"""
arcoefs = model_results.arparams
macoefs = model_results.maparams
nobs = nobs or model_results.nobs
return cls(np.r_[1, -arcoefs], np.r_[1, macoefs], nobs=nobs)
def __mul__(self, oth):
if isinstance(oth, self.__class__):
ar = (self.arpoly * oth.arpoly).coef
ma = (self.mapoly * oth.mapoly).coef
else:
try:
aroth, maoth = oth
arpolyoth = np.polynomial.Polynomial(aroth)
mapolyoth = np.polynomial.Polynomial(maoth)
ar = (self.arpoly * arpolyoth).coef
ma = (self.mapoly * mapolyoth).coef
except:
raise TypeError('Other type is not a valid type')
return self.__class__(ar, ma, nobs=self.nobs)
def __repr__(self):
msg = 'ArmaProcess({0}, {1}, nobs={2}) at {3}'
return msg.format(self.ar.tolist(), self.ma.tolist(),
self.nobs, hex(id(self)))
def __str__(self):
return 'ArmaProcess\nAR: {0}\nMA: {1}'.format(self.ar.tolist(),
self.ma.tolist())
@Appender(remove_parameters(arma_acovf.__doc__, ['ar', 'ma', 'sigma2']))
def acovf(self, nobs=None):
nobs = nobs or self.nobs
return arma_acovf(self.ar, self.ma, nobs=nobs)
@Appender(remove_parameters(arma_acf.__doc__, ['ar', 'ma']))
def acf(self, lags=None):
lags = lags or self.nobs
return arma_acf(self.ar, self.ma, lags=lags)
@Appender(remove_parameters(arma_pacf.__doc__, ['ar', 'ma']))
def pacf(self, lags=None):
lags = lags or self.nobs
return arma_pacf(self.ar, self.ma, lags=lags)
@Appender(remove_parameters(arma_periodogram.__doc__, ['ar', 'ma', 'worN',
'whole']))
def periodogram(self, nobs=None):
nobs = nobs or self.nobs
return arma_periodogram(self.ar, self.ma, worN=nobs)
@Appender(remove_parameters(arma_impulse_response.__doc__, ['ar', 'ma']))
def impulse_response(self, leads=None):
leads = leads or self.nobs
return arma_impulse_response(self.ar, self.ma, leads=leads)
@Appender(remove_parameters(arma2ma.__doc__, ['ar', 'ma']))
def arma2ma(self, lags=None):
lags = lags or self.lags
return arma2ma(self.ar, self.ma, lags=lags)
@Appender(remove_parameters(arma2ar.__doc__, ['ar', 'ma']))
def arma2ar(self, lags=None):
lags = lags or self.lags
return arma2ar(self.ar, self.ma, lags=lags)
@property
def arroots(self):
"""Roots of autoregressive lag-polynomial"""
return self.arpoly.roots()
@property
def maroots(self):
"""Roots of moving average lag-polynomial"""
return self.mapoly.roots()
@property
def isstationary(self):
"""
Arma process is stationary if AR roots are outside unit circle.
Returns
-------
bool
True if autoregressive roots are outside unit circle.
"""
if np.all(np.abs(self.arroots) > 1.0):
return True
else:
return False
@property
def isinvertible(self):
"""
Arma process is invertible if MA roots are outside unit circle.
Returns
-------
bool
True if moving average roots are outside unit circle.
"""
if np.all(np.abs(self.maroots) > 1):
return True
else:
return False
def invertroots(self, retnew=False):
"""
Make MA polynomial invertible by inverting roots inside unit circle.
Parameters
----------
retnew : bool
If False (default), then return the lag-polynomial as array.
If True, then return a new instance with invertible MA-polynomial.
Returns
-------
manew : ndarray
A new invertible MA lag-polynomial, returned if retnew is false.
wasinvertible : bool
True if the MA lag-polynomial was already invertible, returned if
retnew is false.
armaprocess : new instance of class
If retnew is true, then return a new instance with invertible
MA-polynomial.
"""
# TODO: variable returns like this?
pr = self.maroots
mainv = self.ma
invertible = self.isinvertible
if not invertible:
pr[np.abs(pr) < 1] = 1. / pr[np.abs(pr) < 1]
pnew = np.polynomial.Polynomial.fromroots(pr)
mainv = pnew.coef / pnew.coef[0]
if retnew:
return self.__class__(self.ar, mainv, nobs=self.nobs)
else:
return mainv, invertible
@Appender(str(_generate_sample_doc))
def generate_sample(self, nsample=100, scale=1., distrvs=None, axis=0,
burnin=0):
return arma_generate_sample(self.ar, self.ma, nsample, scale, distrvs,
axis=axis, burnin=burnin)
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.