repo_name
stringlengths 7
90
| path
stringlengths 5
191
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 976
581k
| license
stringclasses 15
values |
|---|---|---|---|---|---|
jreback/pandas
|
pandas/tests/frame/methods/test_droplevel.py
|
3
|
1236
|
import pytest
from pandas import DataFrame, Index, MultiIndex
import pandas._testing as tm
class TestDropLevel:
def test_droplevel(self, frame_or_series):
# GH#20342
cols = MultiIndex.from_tuples(
[("c", "e"), ("d", "f")], names=["level_1", "level_2"]
)
mi = MultiIndex.from_tuples([(1, 2), (5, 6), (9, 10)], names=["a", "b"])
df = DataFrame([[3, 4], [7, 8], [11, 12]], index=mi, columns=cols)
if frame_or_series is not DataFrame:
df = df.iloc[:, 0]
# test that dropping of a level in index works
expected = df.reset_index("a", drop=True)
result = df.droplevel("a", axis="index")
tm.assert_equal(result, expected)
if frame_or_series is DataFrame:
# test that dropping of a level in columns works
expected = df.copy()
expected.columns = Index(["c", "d"], name="level_1")
result = df.droplevel("level_2", axis="columns")
tm.assert_equal(result, expected)
else:
# test that droplevel raises ValueError on axis != 0
with pytest.raises(ValueError, match="No axis named columns"):
df.droplevel(1, axis="columns")
|
bsd-3-clause
|
mogeiwang/nest
|
testsuite/manualtests/test_tsodyks_depr_fac.py
|
13
|
1136
|
# -*- coding: utf-8 -*-
#
# test_tsodyks_depr_fac.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
from scipy import *
from matplotlib.pylab import *
from matplotlib.mlab import *
def plot_spikes():
dt = 0.1 # time resolution
nbins = 1000
N = 500 # number of neurons
vm = load('voltmeter-0-0-4.dat')
figure(1)
clf()
plot(vm[:,0], vm[:,1], 'r')
xlabel('time / ms')
ylabel('$V_m [mV]$')
savefig('test_tsodyks_depressing.png')
plot_spikes()
show()
|
gpl-2.0
|
Vvucinic/Wander
|
venv_2_7/lib/python2.7/site-packages/pandas/tools/util.py
|
9
|
2780
|
import numpy as np
import pandas.lib as lib
import pandas as pd
from pandas.compat import reduce
from pandas.core.index import Index
from pandas.core import common as com
def match(needles, haystack):
haystack = Index(haystack)
needles = Index(needles)
return haystack.get_indexer(needles)
def cartesian_product(X):
'''
Numpy version of itertools.product or pandas.compat.product.
Sometimes faster (for large inputs)...
Examples
--------
>>> cartesian_product([list('ABC'), [1, 2]])
[array(['A', 'A', 'B', 'B', 'C', 'C'], dtype='|S1'),
array([1, 2, 1, 2, 1, 2])]
'''
lenX = np.fromiter((len(x) for x in X), dtype=int)
cumprodX = np.cumproduct(lenX)
a = np.roll(cumprodX, 1)
a[0] = 1
b = cumprodX[-1] / cumprodX
return [np.tile(np.repeat(np.asarray(com._values_from_object(x)), b[i]),
np.product(a[i]))
for i, x in enumerate(X)]
def _compose2(f, g):
"""Compose 2 callables"""
return lambda *args, **kwargs: f(g(*args, **kwargs))
def compose(*funcs):
"""Compose 2 or more callables"""
assert len(funcs) > 1, 'At least 2 callables must be passed to compose'
return reduce(_compose2, funcs)
def to_numeric(arg, errors='raise'):
"""
Convert argument to a numeric type.
Parameters
----------
arg : list, tuple or array of objects, or Series
errors : {'ignore', 'raise', 'coerce'}, default 'raise'
- If 'raise', then invalid parsing will raise an exception
- If 'coerce', then invalid parsing will be set as NaN
- If 'ignore', then invalid parsing will return the input
Returns
-------
ret : numeric if parsing succeeded.
Return type depends on input. Series if Series, otherwise ndarray
Examples
--------
Take separate series and convert to numeric, coercing when told to
>>> import pandas as pd
>>> s = pd.Series(['1.0', '2', -3])
>>> pd.to_numeric(s)
>>> s = pd.Series(['apple', '1.0', '2', -3])
>>> pd.to_numeric(s, errors='ignore')
>>> pd.to_numeric(s, errors='coerce')
"""
index = name = None
if isinstance(arg, pd.Series):
index, name = arg.index, arg.name
elif isinstance(arg, (list, tuple)):
arg = np.array(arg, dtype='O')
conv = arg
arg = com._ensure_object(arg)
coerce_numeric = False if errors in ('ignore', 'raise') else True
try:
conv = lib.maybe_convert_numeric(arg,
set(),
coerce_numeric=coerce_numeric)
except:
if errors == 'raise':
raise
if index is not None:
return pd.Series(conv, index=index, name=name)
else:
return conv
|
artistic-2.0
|
ephes/scikit-learn
|
examples/cluster/plot_color_quantization.py
|
297
|
3443
|
# -*- coding: utf-8 -*-
"""
==================================
Color Quantization using K-Means
==================================
Performs a pixel-wise Vector Quantization (VQ) of an image of the summer palace
(China), reducing the number of colors required to show the image from 96,615
unique colors to 64, while preserving the overall appearance quality.
In this example, pixels are represented in a 3D-space and K-means is used to
find 64 color clusters. In the image processing literature, the codebook
obtained from K-means (the cluster centers) is called the color palette. Using
a single byte, up to 256 colors can be addressed, whereas an RGB encoding
requires 3 bytes per pixel. The GIF file format, for example, uses such a
palette.
For comparison, a quantized image using a random codebook (colors picked up
randomly) is also shown.
"""
# Authors: Robert Layton <robertlayton@gmail.com>
# Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
#
# License: BSD 3 clause
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.metrics import pairwise_distances_argmin
from sklearn.datasets import load_sample_image
from sklearn.utils import shuffle
from time import time
n_colors = 64
# Load the Summer Palace photo
china = load_sample_image("china.jpg")
# Convert to floats instead of the default 8 bits integer coding. Dividing by
# 255 is important so that plt.imshow behaves works well on float data (need to
# be in the range [0-1]
china = np.array(china, dtype=np.float64) / 255
# Load Image and transform to a 2D numpy array.
w, h, d = original_shape = tuple(china.shape)
assert d == 3
image_array = np.reshape(china, (w * h, d))
print("Fitting model on a small sub-sample of the data")
t0 = time()
image_array_sample = shuffle(image_array, random_state=0)[:1000]
kmeans = KMeans(n_clusters=n_colors, random_state=0).fit(image_array_sample)
print("done in %0.3fs." % (time() - t0))
# Get labels for all points
print("Predicting color indices on the full image (k-means)")
t0 = time()
labels = kmeans.predict(image_array)
print("done in %0.3fs." % (time() - t0))
codebook_random = shuffle(image_array, random_state=0)[:n_colors + 1]
print("Predicting color indices on the full image (random)")
t0 = time()
labels_random = pairwise_distances_argmin(codebook_random,
image_array,
axis=0)
print("done in %0.3fs." % (time() - t0))
def recreate_image(codebook, labels, w, h):
"""Recreate the (compressed) image from the code book & labels"""
d = codebook.shape[1]
image = np.zeros((w, h, d))
label_idx = 0
for i in range(w):
for j in range(h):
image[i][j] = codebook[labels[label_idx]]
label_idx += 1
return image
# Display all results, alongside original image
plt.figure(1)
plt.clf()
ax = plt.axes([0, 0, 1, 1])
plt.axis('off')
plt.title('Original image (96,615 colors)')
plt.imshow(china)
plt.figure(2)
plt.clf()
ax = plt.axes([0, 0, 1, 1])
plt.axis('off')
plt.title('Quantized image (64 colors, K-Means)')
plt.imshow(recreate_image(kmeans.cluster_centers_, labels, w, h))
plt.figure(3)
plt.clf()
ax = plt.axes([0, 0, 1, 1])
plt.axis('off')
plt.title('Quantized image (64 colors, Random)')
plt.imshow(recreate_image(codebook_random, labels_random, w, h))
plt.show()
|
bsd-3-clause
|
secdev/scapy
|
scapy/plist.py
|
2
|
31226
|
# This file is part of Scapy
# See http://www.secdev.org/projects/scapy for more information
# Copyright (C) Philippe Biondi <phil@secdev.org>
# This program is published under a GPLv2 license
"""
PacketList: holds several packets and allows to do operations on them.
"""
from __future__ import absolute_import
from __future__ import print_function
import os
from collections import defaultdict
from scapy.compat import lambda_tuple_converter
from scapy.config import conf
from scapy.base_classes import (
BasePacket,
BasePacketList,
PacketList_metaclass,
SetGen,
_CanvasDumpExtended,
)
from scapy.utils import do_graph, hexdump, make_table, make_lined_table, \
make_tex_table, issubtype
from scapy.extlib import plt, Line2D, \
MATPLOTLIB_INLINED, MATPLOTLIB_DEFAULT_PLOT_KARGS
from functools import reduce
import scapy.modules.six as six
from scapy.modules.six.moves import range, zip
# typings
from scapy.compat import (
Any,
Callable,
DefaultDict,
Dict,
Generic,
Iterator,
List,
NamedTuple,
Optional,
Tuple,
Type,
TypeVar,
Union,
)
from scapy.packet import Packet
#############
# Results #
#############
QueryAnswer = NamedTuple(
"QueryAnswer",
[("query", Packet), ("answer", Packet)]
)
_Inner = TypeVar("_Inner", Packet, QueryAnswer)
@six.add_metaclass(PacketList_metaclass)
class _PacketList(Generic[_Inner]):
__slots__ = ["stats", "res", "listname"]
def __init__(self,
res=None, # type: Optional[Union[_PacketList[_Inner], List[_Inner]]] # noqa: E501
name="PacketList", # type: str
stats=None # type: Optional[List[Type[Packet]]]
):
# type: (...) -> None
"""create a packet list from a list of packets
res: the list of packets
stats: a list of classes that will appear in the stats (defaults to [TCP,UDP,ICMP])""" # noqa: E501
if stats is None:
stats = conf.stats_classic_protocols
self.stats = stats
if res is None:
self.res = [] # type: List[_Inner]
elif isinstance(res, _PacketList):
self.res = res.res
else:
self.res = res
self.listname = name
def __len__(self):
# type: () -> int
return len(self.res)
def _elt2pkt(self, elt):
# type: (_Inner) -> Packet
return elt # type: ignore
def _elt2sum(self, elt):
# type: (_Inner) -> str
return elt.summary() # type: ignore
def _elt2show(self, elt):
# type: (_Inner) -> str
return self._elt2sum(elt)
def __repr__(self):
# type: () -> str
stats = {x: 0 for x in self.stats}
other = 0
for r in self.res:
f = 0
for p in stats:
if self._elt2pkt(r).haslayer(p):
stats[p] += 1
f = 1
break
if not f:
other += 1
s = ""
ct = conf.color_theme
for p in self.stats:
s += " %s%s%s" % (ct.packetlist_proto(p._name),
ct.punct(":"),
ct.packetlist_value(stats[p]))
s += " %s%s%s" % (ct.packetlist_proto("Other"),
ct.punct(":"),
ct.packetlist_value(other))
return "%s%s%s%s%s" % (ct.punct("<"),
ct.packetlist_name(self.listname),
ct.punct(":"),
s,
ct.punct(">"))
def __getstate__(self):
# type: () -> Dict[str, Any]
"""
Creates a basic representation of the instance, used in
conjunction with __setstate__() e.g. by pickle
:returns: dict representing this instance
"""
state = {
'res': self.res,
'stats': self.stats,
'listname': self.listname
}
return state
def __setstate__(self, state):
# type: (Dict[str, Any]) -> None
"""
Sets instance attributes to values given by state, used in
conjunction with __getstate__() e.g. by pickle
:param state: dict representing this instance
"""
self.res = state['res']
self.stats = state['stats']
self.listname = state['listname']
def __iter__(self):
# type: () -> Iterator[_Inner]
return self.res.__iter__()
def __getattr__(self, attr):
# type: (str) -> Any
return getattr(self.res, attr)
def __getitem__(self, item):
# type: (Any) -> Any
if issubtype(item, BasePacket):
return self.__class__([x for x in self.res if item in self._elt2pkt(x)], # noqa: E501
name="%s from %s" % (item.__name__, self.listname)) # noqa: E501
if isinstance(item, slice):
return self.__class__(self.res.__getitem__(item),
name="mod %s" % self.listname)
return self.res.__getitem__(item)
_T = TypeVar('_T', 'SndRcvList', 'PacketList')
# Hinting hack: type self
def __add__(self, # type: _PacketList._T # type: ignore
other # type: _PacketList._T
):
# type: (...) -> _PacketList._T
return self.__class__(
self.res + other.res,
name="%s+%s" % (
self.listname,
other.listname
)
)
def summary(self,
prn=None, # type: Optional[Callable[..., Any]]
lfilter=None # type: Optional[Callable[..., bool]]
):
# type: (...) -> None
"""prints a summary of each packet
:param prn: function to apply to each packet instead of
lambda x:x.summary()
:param lfilter: truth function to apply to each packet to decide
whether it will be displayed
"""
# Python 2 backward compatibility
if prn is not None:
prn = lambda_tuple_converter(prn)
if lfilter is not None:
lfilter = lambda_tuple_converter(lfilter)
for r in self.res:
if lfilter is not None:
if not lfilter(*r):
continue
if prn is None:
print(self._elt2sum(r))
else:
print(prn(*r))
def nsummary(self,
prn=None, # type: Optional[Callable[..., Any]]
lfilter=None # type: Optional[Callable[..., bool]]
):
# type: (...) -> None
"""prints a summary of each packet with the packet's number
:param prn: function to apply to each packet instead of
lambda x:x.summary()
:param lfilter: truth function to apply to each packet to decide
whether it will be displayed
"""
# Python 2 backward compatibility
if prn is not None:
prn = lambda_tuple_converter(prn)
if lfilter is not None:
lfilter = lambda_tuple_converter(lfilter)
for i, res in enumerate(self.res):
if lfilter is not None:
if not lfilter(*res):
continue
print(conf.color_theme.id(i, fmt="%04i"), end=' ')
if prn is None:
print(self._elt2sum(res))
else:
print(prn(*res))
def show(self, *args, **kargs):
# type: (*Any, **Any) -> None
"""Best way to display the packet list. Defaults to nsummary() method""" # noqa: E501
return self.nsummary(*args, **kargs)
def filter(self, func):
# type: (Callable[..., bool]) -> _PacketList[_Inner]
"""Returns a packet list filtered by a truth function. This truth
function has to take a packet as the only argument and return
a boolean value.
"""
# Python 2 backward compatibility
func = lambda_tuple_converter(func)
return self.__class__([x for x in self.res if func(*x)],
name="filtered %s" % self.listname)
def make_table(self, *args, **kargs):
# type: (Any, Any) -> Optional[str]
"""Prints a table using a function that returns for each packet its head column value, head row value and displayed value # noqa: E501
ex: p.make_table(lambda x:(x[IP].dst, x[TCP].dport, x[TCP].sprintf("%flags%")) """ # noqa: E501
return make_table(self.res, *args, **kargs)
def make_lined_table(self, *args, **kargs):
# type: (Any, Any) -> Optional[str]
"""Same as make_table, but print a table with lines"""
return make_lined_table(self.res, *args, **kargs)
def make_tex_table(self, *args, **kargs):
# type: (Any, Any) -> Optional[str]
"""Same as make_table, but print a table with LaTeX syntax"""
return make_tex_table(self.res, *args, **kargs)
def plot(self,
f, # type: Callable[..., Any]
lfilter=None, # type: Optional[Callable[..., bool]]
plot_xy=False, # type: bool
**kargs # type: Any
):
# type: (...) -> Line2D
"""Applies a function to each packet to get a value that will be plotted
with matplotlib. A list of matplotlib.lines.Line2D is returned.
lfilter: a truth function that decides whether a packet must be plotted
"""
# Python 2 backward compatibility
f = lambda_tuple_converter(f)
if lfilter is not None:
lfilter = lambda_tuple_converter(lfilter)
# Get the list of packets
if lfilter is None:
lst_pkts = [f(*e) for e in self.res]
else:
lst_pkts = [f(*e) for e in self.res if lfilter(*e)]
# Mimic the default gnuplot output
if kargs == {}:
kargs = MATPLOTLIB_DEFAULT_PLOT_KARGS
if plot_xy:
lines = plt.plot(*zip(*lst_pkts), **kargs)
else:
lines = plt.plot(lst_pkts, **kargs)
# Call show() if matplotlib is not inlined
if not MATPLOTLIB_INLINED:
plt.show()
return lines
def diffplot(self,
f, # type: Callable[..., Any]
delay=1, # type: int
lfilter=None, # type: Optional[Callable[..., bool]]
**kargs # type: Any
):
# type: (...) -> Line2D
"""diffplot(f, delay=1, lfilter=None)
Applies a function to couples (l[i],l[i+delay])
A list of matplotlib.lines.Line2D is returned.
"""
# Get the list of packets
if lfilter is None:
lst_pkts = [f(self.res[i], self.res[i + 1])
for i in range(len(self.res) - delay)]
else:
lst_pkts = [f(self.res[i], self.res[i + 1])
for i in range(len(self.res) - delay)
if lfilter(self.res[i])]
# Mimic the default gnuplot output
if kargs == {}:
kargs = MATPLOTLIB_DEFAULT_PLOT_KARGS
lines = plt.plot(lst_pkts, **kargs)
# Call show() if matplotlib is not inlined
if not MATPLOTLIB_INLINED:
plt.show()
return lines
def multiplot(self,
f, # type: Callable[..., Any]
lfilter=None, # type: Optional[Callable[..., Any]]
plot_xy=False, # type: bool
**kargs # type: Any
):
# type: (...) -> Line2D
"""Uses a function that returns a label and a value for this label, then
plots all the values label by label.
A list of matplotlib.lines.Line2D is returned.
"""
# Python 2 backward compatibility
f = lambda_tuple_converter(f)
if lfilter is not None:
lfilter = lambda_tuple_converter(lfilter)
# Get the list of packets
if lfilter is None:
lst_pkts = (f(*e) for e in self.res)
else:
lst_pkts = (f(*e) for e in self.res if lfilter(*e))
# Apply the function f to the packets
d = {} # type: Dict[str, List[float]]
for k, v in lst_pkts:
d.setdefault(k, []).append(v)
# Mimic the default gnuplot output
if not kargs:
kargs = MATPLOTLIB_DEFAULT_PLOT_KARGS
if plot_xy:
lines = [plt.plot(*zip(*pl), **dict(kargs, label=k))
for k, pl in six.iteritems(d)]
else:
lines = [plt.plot(pl, **dict(kargs, label=k))
for k, pl in six.iteritems(d)]
plt.legend(loc="center right", bbox_to_anchor=(1.5, 0.5))
# Call show() if matplotlib is not inlined
if not MATPLOTLIB_INLINED:
plt.show()
return lines
def rawhexdump(self):
# type: () -> None
"""Prints an hexadecimal dump of each packet in the list"""
for p in self:
hexdump(self._elt2pkt(p))
def hexraw(self, lfilter=None):
# type: (Optional[Callable[..., bool]]) -> None
"""Same as nsummary(), except that if a packet has a Raw layer, it will be hexdumped # noqa: E501
lfilter: a truth function that decides whether a packet must be displayed""" # noqa: E501
for i, res in enumerate(self.res):
p = self._elt2pkt(res)
if lfilter is not None and not lfilter(p):
continue
print("%s %s %s" % (conf.color_theme.id(i, fmt="%04i"),
p.sprintf("%.time%"),
self._elt2sum(res)))
if p.haslayer(conf.raw_layer):
hexdump(p.getlayer(conf.raw_layer).load) # type: ignore
def hexdump(self, lfilter=None):
# type: (Optional[Callable[..., bool]]) -> None
"""Same as nsummary(), except that packets are also hexdumped
lfilter: a truth function that decides whether a packet must be displayed""" # noqa: E501
for i, res in enumerate(self.res):
p = self._elt2pkt(res)
if lfilter is not None and not lfilter(p):
continue
print("%s %s %s" % (conf.color_theme.id(i, fmt="%04i"),
p.sprintf("%.time%"),
self._elt2sum(res)))
hexdump(p)
def padding(self, lfilter=None):
# type: (Optional[Callable[..., bool]]) -> None
"""Same as hexraw(), for Padding layer"""
for i, res in enumerate(self.res):
p = self._elt2pkt(res)
if p.haslayer(conf.padding_layer):
if lfilter is None or lfilter(p):
print("%s %s %s" % (conf.color_theme.id(i, fmt="%04i"),
p.sprintf("%.time%"),
self._elt2sum(res)))
hexdump(
p.getlayer(conf.padding_layer).load # type: ignore
)
def nzpadding(self, lfilter=None):
# type: (Optional[Callable[..., bool]]) -> None
"""Same as padding() but only non null padding"""
for i, res in enumerate(self.res):
p = self._elt2pkt(res)
if p.haslayer(conf.padding_layer):
pad = p.getlayer(conf.padding_layer).load # type: ignore
if pad == pad[0] * len(pad):
continue
if lfilter is None or lfilter(p):
print("%s %s %s" % (conf.color_theme.id(i, fmt="%04i"),
p.sprintf("%.time%"),
self._elt2sum(res)))
hexdump(
p.getlayer(conf.padding_layer).load # type: ignore
)
def conversations(self,
getsrcdst=None, # type: Optional[Callable[[Packet], Tuple[Any, ...]]] # noqa: E501
**kargs # type: Any
):
# type: (...) -> Any
"""Graphes a conversations between sources and destinations and display it
(using graphviz and imagemagick)
:param getsrcdst: a function that takes an element of the list and
returns the source, the destination and optionally
a label. By default, returns the IP source and
destination from IP and ARP layers
:param type: output type (svg, ps, gif, jpg, etc.), passed to dot's
"-T" option
:param target: filename or redirect. Defaults pipe to Imagemagick's
display program
:param prog: which graphviz program to use
"""
if getsrcdst is None:
def _getsrcdst(pkt):
# type: (Packet) -> Tuple[str, str]
"""Extract src and dst addresses"""
if 'IP' in pkt:
return (pkt['IP'].src, pkt['IP'].dst)
if 'IPv6' in pkt:
return (pkt['IPv6'].src, pkt['IPv6'].dst)
if 'ARP' in pkt:
return (pkt['ARP'].psrc, pkt['ARP'].pdst)
raise TypeError()
getsrcdst = _getsrcdst
conv = {} # type: Dict[Tuple[Any, ...], Any]
for p in self.res:
p = self._elt2pkt(p)
try:
c = getsrcdst(p)
except Exception:
# No warning here: it's OK that getsrcdst() raises an
# exception, since it might be, for example, a
# function that expects a specific layer in each
# packet. The try/except approach is faster and
# considered more Pythonic than adding tests.
continue
if len(c) == 3:
conv.setdefault(c[:2], set()).add(c[2])
else:
conv[c] = conv.get(c, 0) + 1
gr = 'digraph "conv" {\n'
for (s, d), l in six.iteritems(conv):
gr += '\t "%s" -> "%s" [label="%s"]\n' % (
s, d, ', '.join(str(x) for x in l) if isinstance(l, set) else l
)
gr += "}\n"
return do_graph(gr, **kargs)
def afterglow(self,
src=None, # type: Optional[Callable[[_Inner], Any]]
event=None, # type: Optional[Callable[[_Inner], Any]]
dst=None, # type: Optional[Callable[[_Inner], Any]]
**kargs # type: Any
):
# type: (...) -> Any
"""Experimental clone attempt of http://sourceforge.net/projects/afterglow
each datum is reduced as src -> event -> dst and the data are graphed.
by default we have IP.src -> IP.dport -> IP.dst"""
if src is None:
src = lambda *x: x[0]['IP'].src
if event is None:
event = lambda *x: x[0]['IP'].dport
if dst is None:
dst = lambda *x: x[0]['IP'].dst
sl = {} # type: Dict[Any, Tuple[Union[float, int], List[Any]]]
el = {} # type: Dict[Any, Tuple[Union[float, int], List[Any]]]
dl = {} # type: Dict[Any, int]
for i in self.res:
try:
s, e, d = src(i), event(i), dst(i)
if s in sl:
n, lst = sl[s]
n += 1
if e not in lst:
lst.append(e)
sl[s] = (n, lst)
else:
sl[s] = (1, [e])
if e in el:
n, lst = el[e]
n += 1
if d not in lst:
lst.append(d)
el[e] = (n, lst)
else:
el[e] = (1, [d])
dl[d] = dl.get(d, 0) + 1
except Exception:
continue
def minmax(x):
# type: (Any) -> Tuple[int, int]
m, M = reduce(lambda a, b: (min(a[0], b[0]), max(a[1], b[1])),
((a, a) for a in x))
if m == M:
m = 0
if M == 0:
M = 1
return m, M
mins, maxs = minmax(x for x, _ in six.itervalues(sl))
mine, maxe = minmax(x for x, _ in six.itervalues(el))
mind, maxd = minmax(six.itervalues(dl))
gr = 'digraph "afterglow" {\n\tedge [len=2.5];\n'
gr += "# src nodes\n"
for s in sl:
n, _ = sl[s]
n = 1 + float(n - mins) / (maxs - mins)
gr += '"src.%s" [label = "%s", shape=box, fillcolor="#FF0000", style=filled, fixedsize=1, height=%.2f,width=%.2f];\n' % (repr(s), repr(s), n, n) # noqa: E501
gr += "# event nodes\n"
for e in el:
n, _ = el[e]
n = 1 + float(n - mine) / (maxe - mine)
gr += '"evt.%s" [label = "%s", shape=circle, fillcolor="#00FFFF", style=filled, fixedsize=1, height=%.2f, width=%.2f];\n' % (repr(e), repr(e), n, n) # noqa: E501
for d in dl:
n = dl[d]
n = 1 + float(n - mind) / (maxd - mind)
gr += '"dst.%s" [label = "%s", shape=triangle, fillcolor="#0000ff", style=filled, fixedsize=1, height=%.2f, width=%.2f];\n' % (repr(d), repr(d), n, n) # noqa: E501
gr += "###\n"
for s in sl:
n, lst1 = sl[s]
for e in lst1:
gr += ' "src.%s" -> "evt.%s";\n' % (repr(s), repr(e))
for e in el:
n, lst2 = el[e]
for d in lst2:
gr += ' "evt.%s" -> "dst.%s";\n' % (repr(e), repr(d))
gr += "}"
return do_graph(gr, **kargs)
def canvas_dump(self, **kargs):
# type: (Any) -> Any # Using Any since pyx is imported later
import pyx
d = pyx.document.document()
len_res = len(self.res)
for i, res in enumerate(self.res):
c = self._elt2pkt(res).canvas_dump(**kargs)
cbb = c.bbox()
c.text(cbb.left(), cbb.top() + 1, r"\font\cmssfont=cmss12\cmssfont{Frame %i/%i}" % (i, len_res), [pyx.text.size.LARGE]) # noqa: E501
if conf.verb >= 2:
os.write(1, b".")
d.append(pyx.document.page(c, paperformat=pyx.document.paperformat.A4, # noqa: E501
margin=1 * pyx.unit.t_cm,
fittosize=1))
return d
def sessions(
self,
session_extractor=None # type: Optional[Callable[[Packet], str]]
):
# type: (...) -> Dict[str, _PacketList[_Inner]]
if session_extractor is None:
def _session_extractor(p):
# type: (Packet) -> str
"""Extract sessions from packets"""
if 'Ether' in p:
if 'IP' in p or 'IPv6' in p:
ip_src_fmt = "{IP:%IP.src%}{IPv6:%IPv6.src%}"
ip_dst_fmt = "{IP:%IP.dst%}{IPv6:%IPv6.dst%}"
addr_fmt = (ip_src_fmt, ip_dst_fmt)
if 'TCP' in p:
fmt = "TCP {}:%r,TCP.sport% > {}:%r,TCP.dport%"
elif 'UDP' in p:
fmt = "UDP {}:%r,UDP.sport% > {}:%r,UDP.dport%"
elif 'ICMP' in p:
fmt = "ICMP {} > {} type=%r,ICMP.type% code=%r," \
"ICMP.code% id=%ICMP.id%"
elif 'ICMPv6' in p:
fmt = "ICMPv6 {} > {} type=%r,ICMPv6.type% " \
"code=%r,ICMPv6.code%"
elif 'IPv6' in p:
fmt = "IPv6 {} > {} nh=%IPv6.nh%"
else:
fmt = "IP {} > {} proto=%IP.proto%"
return p.sprintf(fmt.format(*addr_fmt))
elif 'ARP' in p:
return p.sprintf("ARP %ARP.psrc% > %ARP.pdst%")
else:
return p.sprintf("Ethernet type=%04xr,Ether.type%")
return "Other"
session_extractor = _session_extractor
sessions = defaultdict(self.__class__) # type: DefaultDict[str, _PacketList[_Inner]] # noqa: E501
for p in self.res:
sess = session_extractor(
self._elt2pkt(p)
)
sessions[sess].append(p)
return dict(sessions)
def replace(self, *args, **kargs):
# type: (Any, Any) -> PacketList
"""
lst.replace(<field>,[<oldvalue>,]<newvalue>)
lst.replace( (fld,[ov],nv),(fld,[ov,]nv),...)
if ov is None, all values are replaced
ex:
lst.replace( IP.src, "192.168.1.1", "10.0.0.1" )
lst.replace( IP.ttl, 64 )
lst.replace( (IP.ttl, 64), (TCP.sport, 666, 777), )
"""
delete_checksums = kargs.get("delete_checksums", False)
x = PacketList(name="Replaced %s" % self.listname)
if not isinstance(args[0], tuple):
args = (args,)
for _p in self.res:
p = self._elt2pkt(_p)
copied = False
for scheme in args:
fld = scheme[0]
old = scheme[1] # not used if len(scheme) == 2
new = scheme[-1]
for o in fld.owners:
if o in p:
if len(scheme) == 2 or p[o].getfieldval(fld.name) == old: # noqa: E501
if not copied:
p = p.copy()
if delete_checksums:
p.delete_checksums()
copied = True
setattr(p[o], fld.name, new)
x.append(p)
return x
def getlayer(self, cls, # type: Packet
nb=None, # type: Optional[int]
flt=None, # type: Optional[Dict[str, Any]]
name=None, # type: Optional[str]
stats=None # type: Optional[List[Type[Packet]]]
):
# type: (...) -> PacketList
"""Returns the packet list from a given layer.
See ``Packet.getlayer`` for more info.
:param cls: search for a layer that is an instance of ``cls``
:type cls: Type[scapy.packet.Packet]
:param nb: return the nb^th layer that is an instance of ``cls``
:type nb: Optional[int]
:param flt: filter parameters for ``Packet.getlayer``
:type flt: Optional[Dict[str, Any]]
:param name: optional name for the new PacketList
:type name: Optional[str]
:param stats: optional list of protocols to give stats on; if not
specified, inherits from this PacketList.
:type stats: Optional[List[Type[scapy.packet.Packet]]]
:rtype: scapy.plist.PacketList
"""
if name is None:
name = "{} layer {}".format(self.listname, cls.__name__)
if stats is None:
stats = self.stats
getlayer_arg = {} # type: Dict[str, Any]
if flt is not None:
getlayer_arg.update(flt)
getlayer_arg['cls'] = cls
if nb is not None:
getlayer_arg['nb'] = nb
# Only return non-None getlayer results
return PacketList([
pc for pc in (
self._elt2pkt(p).getlayer(**getlayer_arg) for p in self.res
) if pc is not None],
name, stats
)
def convert_to(self,
other_cls, # type: Type[Packet]
name=None, # type: Optional[str]
stats=None # type: Optional[List[Type[Packet]]]
):
# type: (...) -> PacketList
"""Converts all packets to another type.
See ``Packet.convert_to`` for more info.
:param other_cls: reference to a Packet class to convert to
:type other_cls: Type[scapy.packet.Packet]
:param name: optional name for the new PacketList
:type name: Optional[str]
:param stats: optional list of protocols to give stats on;
if not specified, inherits from this PacketList.
:type stats: Optional[List[Type[scapy.packet.Packet]]]
:rtype: scapy.plist.PacketList
"""
if name is None:
name = "{} converted to {}".format(
self.listname, other_cls.__name__)
if stats is None:
stats = self.stats
return PacketList(
[self._elt2pkt(p).convert_to(other_cls) for p in self.res],
name, stats
)
class PacketList(_PacketList[Packet],
BasePacketList[Packet],
_CanvasDumpExtended):
def sr(self, multi=False, lookahead=None):
# type: (bool, Optional[int]) -> Tuple[SndRcvList, PacketList]
"""
Matches packets in the list
:param multi: True if a packet can have multiple answers
:param lookahead: Maximum number of packets between packet and answer.
If 0 or None, full remaining list is
scanned for answers
:return: ( (matched couples), (unmatched packets) )
"""
remain = self.res[:]
sr = [] # type: List[QueryAnswer]
i = 0
if lookahead is None or lookahead == 0:
lookahead = len(remain)
while i < len(remain):
s = remain[i]
j = i
while j < min(lookahead + i, len(remain) - 1):
j += 1
r = remain[j]
if r.answers(s):
sr.append(QueryAnswer(s, r))
if multi:
remain[i]._answered = 1
remain[j]._answered = 2
continue
del(remain[j])
del(remain[i])
i -= 1
break
i += 1
if multi:
remain = [x for x in remain if not hasattr(x, "_answered")]
return SndRcvList(sr), PacketList(remain)
_PacketIterable = Union[
List[Packet],
Packet,
SetGen[Packet],
_PacketList[Packet]
]
class SndRcvList(_PacketList[QueryAnswer],
BasePacketList[QueryAnswer],
_CanvasDumpExtended):
__slots__ = [] # type: List[str]
def __init__(self,
res=None, # type: Optional[Union[_PacketList[QueryAnswer], List[QueryAnswer]]] # noqa: E501
name="Results", # type: str
stats=None # type: Optional[List[Type[Packet]]]
):
# type: (...) -> None
super(SndRcvList, self).__init__(res, name, stats)
def _elt2pkt(self, elt):
# type: (QueryAnswer) -> Packet
return elt[1]
def _elt2sum(self, elt):
# type: (QueryAnswer) -> str
return "%s ==> %s" % (elt[0].summary(), elt[1].summary())
|
gpl-2.0
|
ptoman/icgauge
|
experiments/kannan_ambili.py
|
1
|
2108
|
# -*- coding: utf-8 -*-
#!/usr/bin/python
"""
This file contains experimental results related to the
Kannan-Ambili semantic coherence metric.
Usage:
From top level (/path/to/repo/icgauge), issue the command
`python -m experiments.kannan_ambili`
"""
import json
from collections import defaultdict
from matplotlib import pyplot as plt
import scipy
import numpy as np
import icgauge
from icgauge import experiment_frameworks
from icgauge import feature_extractors
run_experiment = True
if run_experiment:
corr, alpha, conf_matrix, details = experiment_frameworks.experiment_features_iterated(
train_reader=icgauge.data_readers.train_and_dev,
assess_reader=icgauge.data_readers.test,#_official,
train_size=0.7,
phi_list=[
icgauge.feature_extractors.semcom_ka_features
],
class_func=icgauge.label_transformers.identity_class_func, #ternary_class_func
train_func=icgauge.training_functions.fit_logistic_at,#_with_crossvalidation,
score_func=scipy.stats.stats.pearsonr,
verbose=False,
iterations=5)
# Print out the results
print "\n-- AFTER COMPLETION --"
print "Averaged correlation (95% CI): "
print np.round(np.mean(corr),2), "+/-", np.round(np.std(corr),2)
print "All correlations:"
print corr
print
print "Averaged Cronbach's alpha (95% CI): "
print np.round(np.mean(alpha),2), "+/-", np.round(np.std(alpha),2)
print "All alphas:"
print alpha
print
print "Confusion matrix:"
print conf_matrix
# Store the results to disk -- "truth"/"prediction"/"example"
with open("results.json", "w") as fp:
json.dump(details, fp, indent=4)
"""""""""""""""""""""
Experimental results.
Cross-validation on train:
alpha = 1.0, 0.2, 0.2, 0.2, 0.4, 0.8, 0.2, 2.0, 0.2, 0.4, 0.8
If only using best: 0.28 and 0.17
Try 0.6 -- it's closest to average -- and get 0.22 and 0.15
Try 0.2 -- and get 0.25 and 0.15
... these are basically the same. so let's stick with
the average ==> 0.6.
Test:
0 correlation, 0 alpha
Test official:
0 correlation, 0 alpha
"""""""""""""""""""""
|
mit
|
alephu5/Soundbyte
|
environment/lib/python3.3/site-packages/pandas/stats/ols.py
|
7
|
39955
|
"""
Ordinary least squares regression
"""
# pylint: disable-msg=W0201
from pandas.compat import zip, range, StringIO
from itertools import starmap
from pandas import compat
import numpy as np
from pandas.core.api import DataFrame, Series, isnull
from pandas.core.base import StringMixin
from pandas.core.common import _ensure_float64
from pandas.core.index import MultiIndex
from pandas.core.panel import Panel
from pandas.util.decorators import cache_readonly
import pandas.stats.common as scom
import pandas.stats.math as math
import pandas.stats.moments as moments
_FP_ERR = 1e-8
class OLS(StringMixin):
"""
Runs a full sample ordinary least squares regression.
Parameters
----------
y : Series
x : Series, DataFrame, dict of Series
intercept : bool
True if you want an intercept.
weights : array-like, optional
1d array of weights. If you supply 1/W then the variables are pre-
multiplied by 1/sqrt(W). If no weights are supplied the default value
is 1 and WLS reults are the same as OLS.
nw_lags : None or int
Number of Newey-West lags.
nw_overlap : boolean, default False
Assume data is overlapping when computing Newey-West estimator
"""
_panel_model = False
def __init__(self, y, x, intercept=True, weights=None, nw_lags=None,
nw_overlap=False):
try:
import statsmodels.api as sm
except ImportError:
import scikits.statsmodels.api as sm
self._x_orig = x
self._y_orig = y
self._weights_orig = weights
self._intercept = intercept
self._nw_lags = nw_lags
self._nw_overlap = nw_overlap
(self._y, self._x, self._weights, self._x_filtered,
self._index, self._time_has_obs) = self._prepare_data()
if self._weights is not None:
self._x_trans = self._x.mul(np.sqrt(self._weights), axis=0)
self._y_trans = self._y * np.sqrt(self._weights)
self.sm_ols = sm.WLS(self._y.get_values(),
self._x.get_values(),
weights=self._weights.values).fit()
else:
self._x_trans = self._x
self._y_trans = self._y
self.sm_ols = sm.OLS(self._y.get_values(),
self._x.get_values()).fit()
def _prepare_data(self):
"""
Cleans the input for single OLS.
Parameters
----------
lhs: Series
Dependent variable in the regression.
rhs: dict, whose values are Series, DataFrame, or dict
Explanatory variables of the regression.
Returns
-------
Series, DataFrame
Cleaned lhs and rhs
"""
(filt_lhs, filt_rhs, filt_weights,
pre_filt_rhs, index, valid) = _filter_data(self._y_orig, self._x_orig,
self._weights_orig)
if self._intercept:
filt_rhs['intercept'] = 1.
pre_filt_rhs['intercept'] = 1.
if hasattr(filt_weights,'to_dense'):
filt_weights = filt_weights.to_dense()
return (filt_lhs, filt_rhs, filt_weights,
pre_filt_rhs, index, valid)
@property
def nobs(self):
return self._nobs
@property
def _nobs(self):
return len(self._y)
@property
def nw_lags(self):
return self._nw_lags
@property
def x(self):
"""Returns the filtered x used in the regression."""
return self._x
@property
def y(self):
"""Returns the filtered y used in the regression."""
return self._y
@cache_readonly
def _beta_raw(self):
"""Runs the regression and returns the beta."""
return self.sm_ols.params
@cache_readonly
def beta(self):
"""Returns the betas in Series form."""
return Series(self._beta_raw, index=self._x.columns)
@cache_readonly
def _df_raw(self):
"""Returns the degrees of freedom."""
return math.rank(self._x.values)
@cache_readonly
def df(self):
"""Returns the degrees of freedom.
This equals the rank of the X matrix.
"""
return self._df_raw
@cache_readonly
def _df_model_raw(self):
"""Returns the raw model degrees of freedom."""
return self.sm_ols.df_model
@cache_readonly
def df_model(self):
"""Returns the degrees of freedom of the model."""
return self._df_model_raw
@cache_readonly
def _df_resid_raw(self):
"""Returns the raw residual degrees of freedom."""
return self.sm_ols.df_resid
@cache_readonly
def df_resid(self):
"""Returns the degrees of freedom of the residuals."""
return self._df_resid_raw
@cache_readonly
def _f_stat_raw(self):
"""Returns the raw f-stat value."""
from scipy.stats import f
cols = self._x.columns
if self._nw_lags is None:
F = self._r2_raw / (self._r2_raw - self._r2_adj_raw)
q = len(cols)
if 'intercept' in cols:
q -= 1
shape = q, self.df_resid
p_value = 1 - f.cdf(F, shape[0], shape[1])
return F, shape, p_value
k = len(cols)
R = np.eye(k)
r = np.zeros((k, 1))
try:
intercept = cols.get_loc('intercept')
R = np.concatenate((R[0: intercept], R[intercept + 1:]))
r = np.concatenate((r[0: intercept], r[intercept + 1:]))
except KeyError:
# no intercept
pass
return math.calc_F(R, r, self._beta_raw, self._var_beta_raw,
self._nobs, self.df)
@cache_readonly
def f_stat(self):
"""Returns the f-stat value."""
return f_stat_to_dict(self._f_stat_raw)
def f_test(self, hypothesis):
"""Runs the F test, given a joint hypothesis. The hypothesis is
represented by a collection of equations, in the form
A*x_1+B*x_2=C
You must provide the coefficients even if they're 1. No spaces.
The equations can be passed as either a single string or a
list of strings.
Examples
--------
o = ols(...)
o.f_test('1*x1+2*x2=0,1*x3=0')
o.f_test(['1*x1+2*x2=0','1*x3=0'])
"""
x_names = self._x.columns
R = []
r = []
if isinstance(hypothesis, str):
eqs = hypothesis.split(',')
elif isinstance(hypothesis, list):
eqs = hypothesis
else: # pragma: no cover
raise Exception('hypothesis must be either string or list')
for equation in eqs:
row = np.zeros(len(x_names))
lhs, rhs = equation.split('=')
for s in lhs.split('+'):
ss = s.split('*')
coeff = float(ss[0])
x_name = ss[1]
if x_name not in x_names:
raise Exception('no coefficient named %s' % x_name)
idx = x_names.get_loc(x_name)
row[idx] = coeff
rhs = float(rhs)
R.append(row)
r.append(rhs)
R = np.array(R)
q = len(r)
r = np.array(r).reshape(q, 1)
result = math.calc_F(R, r, self._beta_raw, self._var_beta_raw,
self._nobs, self.df)
return f_stat_to_dict(result)
@cache_readonly
def _p_value_raw(self):
"""Returns the raw p values."""
from scipy.stats import t
return 2 * t.sf(np.fabs(self._t_stat_raw),
self._df_resid_raw)
@cache_readonly
def p_value(self):
"""Returns the p values."""
return Series(self._p_value_raw, index=self.beta.index)
@cache_readonly
def _r2_raw(self):
"""Returns the raw r-squared values."""
if self._use_centered_tss:
return 1 - self.sm_ols.ssr / self.sm_ols.centered_tss
else:
return 1 - self.sm_ols.ssr / self.sm_ols.uncentered_tss
@property
def _use_centered_tss(self):
# has_intercept = np.abs(self._resid_raw.sum()) < _FP_ERR
return self._intercept
@cache_readonly
def r2(self):
"""Returns the r-squared values."""
return self._r2_raw
@cache_readonly
def _r2_adj_raw(self):
"""Returns the raw r-squared adjusted values."""
return self.sm_ols.rsquared_adj
@cache_readonly
def r2_adj(self):
"""Returns the r-squared adjusted values."""
return self._r2_adj_raw
@cache_readonly
def _resid_raw(self):
"""Returns the raw residuals."""
return self.sm_ols.resid
@cache_readonly
def resid(self):
"""Returns the residuals."""
return Series(self._resid_raw, index=self._x.index)
@cache_readonly
def _rmse_raw(self):
"""Returns the raw rmse values."""
return np.sqrt(self.sm_ols.mse_resid)
@cache_readonly
def rmse(self):
"""Returns the rmse value."""
return self._rmse_raw
@cache_readonly
def _std_err_raw(self):
"""Returns the raw standard err values."""
return np.sqrt(np.diag(self._var_beta_raw))
@cache_readonly
def std_err(self):
"""Returns the standard err values of the betas."""
return Series(self._std_err_raw, index=self.beta.index)
@cache_readonly
def _t_stat_raw(self):
"""Returns the raw t-stat value."""
return self._beta_raw / self._std_err_raw
@cache_readonly
def t_stat(self):
"""Returns the t-stat values of the betas."""
return Series(self._t_stat_raw, index=self.beta.index)
@cache_readonly
def _var_beta_raw(self):
"""
Returns the raw covariance of beta.
"""
x = self._x.values
y = self._y.values
xx = np.dot(x.T, x)
if self._nw_lags is None:
return math.inv(xx) * (self._rmse_raw ** 2)
else:
resid = y - np.dot(x, self._beta_raw)
m = (x.T * resid).T
xeps = math.newey_west(m, self._nw_lags, self._nobs, self._df_raw,
self._nw_overlap)
xx_inv = math.inv(xx)
return np.dot(xx_inv, np.dot(xeps, xx_inv))
@cache_readonly
def var_beta(self):
"""Returns the variance-covariance matrix of beta."""
return DataFrame(self._var_beta_raw, index=self.beta.index,
columns=self.beta.index)
@cache_readonly
def _y_fitted_raw(self):
"""Returns the raw fitted y values."""
if self._weights is None:
X = self._x_filtered.values
else:
# XXX
return self.sm_ols.fittedvalues
b = self._beta_raw
return np.dot(X, b)
@cache_readonly
def y_fitted(self):
"""Returns the fitted y values. This equals BX."""
if self._weights is None:
index = self._x_filtered.index
orig_index = index
else:
index = self._y.index
orig_index = self._y_orig.index
result = Series(self._y_fitted_raw, index=index)
return result.reindex(orig_index)
@cache_readonly
def _y_predict_raw(self):
"""Returns the raw predicted y values."""
return self._y_fitted_raw
@cache_readonly
def y_predict(self):
"""Returns the predicted y values.
For in-sample, this is same as y_fitted."""
return self.y_fitted
def predict(self, beta=None, x=None, fill_value=None,
fill_method=None, axis=0):
"""
Parameters
----------
beta : Series
x : Series or DataFrame
fill_value : scalar or dict, default None
fill_method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
axis : {0, 1}, default 0
See DataFrame.fillna for more details
Notes
-----
1. If both fill_value and fill_method are None then NaNs are dropped
(this is the default behavior)
2. An intercept will be automatically added to the new_y_values if
the model was fitted using an intercept
Returns
-------
Series of predicted values
"""
if beta is None and x is None:
return self.y_predict
if beta is None:
beta = self.beta
else:
beta = beta.reindex(self.beta.index)
if isnull(beta).any():
raise ValueError('Must supply betas for same variables')
if x is None:
x = self._x
orig_x = x
else:
orig_x = x
if fill_value is None and fill_method is None:
x = x.dropna(how='any')
else:
x = x.fillna(value=fill_value, method=fill_method, axis=axis)
if isinstance(x, Series):
x = DataFrame({'x': x})
if self._intercept:
x['intercept'] = 1.
x = x.reindex(columns=self._x.columns)
rs = np.dot(x.values, beta.values)
return Series(rs, x.index).reindex(orig_x.index)
RESULT_FIELDS = ['r2', 'r2_adj', 'df', 'df_model', 'df_resid', 'rmse',
'f_stat', 'beta', 'std_err', 't_stat', 'p_value', 'nobs']
@cache_readonly
def _results(self):
results = {}
for result in self.RESULT_FIELDS:
results[result] = getattr(self, result)
return results
@cache_readonly
def _coef_table(self):
buf = StringIO()
buf.write('%14s %10s %10s %10s %10s %10s %10s\n' %
('Variable', 'Coef', 'Std Err', 't-stat',
'p-value', 'CI 2.5%', 'CI 97.5%'))
buf.write(scom.banner(''))
coef_template = '\n%14s %10.4f %10.4f %10.2f %10.4f %10.4f %10.4f'
results = self._results
beta = results['beta']
for i, name in enumerate(beta.index):
if i and not (i % 5):
buf.write('\n' + scom.banner(''))
std_err = results['std_err'][name]
CI1 = beta[name] - 1.96 * std_err
CI2 = beta[name] + 1.96 * std_err
t_stat = results['t_stat'][name]
p_value = results['p_value'][name]
line = coef_template % (name,
beta[name], std_err, t_stat, p_value, CI1, CI2)
buf.write(line)
if self.nw_lags is not None:
buf.write('\n')
buf.write('*** The calculations are Newey-West '
'adjusted with lags %5d\n' % self.nw_lags)
return buf.getvalue()
@cache_readonly
def summary_as_matrix(self):
"""Returns the formatted results of the OLS as a DataFrame."""
results = self._results
beta = results['beta']
data = {'beta': results['beta'],
't-stat': results['t_stat'],
'p-value': results['p_value'],
'std err': results['std_err']}
return DataFrame(data, beta.index).T
@cache_readonly
def summary(self):
"""
This returns the formatted result of the OLS computation
"""
template = """
%(bannerTop)s
Formula: Y ~ %(formula)s
Number of Observations: %(nobs)d
Number of Degrees of Freedom: %(df)d
R-squared: %(r2)10.4f
Adj R-squared: %(r2_adj)10.4f
Rmse: %(rmse)10.4f
F-stat %(f_stat_shape)s: %(f_stat)10.4f, p-value: %(f_stat_p_value)10.4f
Degrees of Freedom: model %(df_model)d, resid %(df_resid)d
%(bannerCoef)s
%(coef_table)s
%(bannerEnd)s
"""
coef_table = self._coef_table
results = self._results
f_stat = results['f_stat']
bracketed = ['<%s>' % str(c) for c in results['beta'].index]
formula = StringIO()
formula.write(bracketed[0])
tot = len(bracketed[0])
line = 1
for coef in bracketed[1:]:
tot = tot + len(coef) + 3
if tot // (68 * line):
formula.write('\n' + ' ' * 12)
line += 1
formula.write(' + ' + coef)
params = {
'bannerTop': scom.banner('Summary of Regression Analysis'),
'bannerCoef': scom.banner('Summary of Estimated Coefficients'),
'bannerEnd': scom.banner('End of Summary'),
'formula': formula.getvalue(),
'r2': results['r2'],
'r2_adj': results['r2_adj'],
'nobs': results['nobs'],
'df': results['df'],
'df_model': results['df_model'],
'df_resid': results['df_resid'],
'coef_table': coef_table,
'rmse': results['rmse'],
'f_stat': f_stat['f-stat'],
'f_stat_shape': '(%d, %d)' % (f_stat['DF X'], f_stat['DF Resid']),
'f_stat_p_value': f_stat['p-value'],
}
return template % params
def __unicode__(self):
return self.summary
@cache_readonly
def _time_obs_count(self):
# XXX
return self._time_has_obs.astype(int)
@property
def _total_times(self):
return self._time_has_obs.sum()
class MovingOLS(OLS):
"""
Runs a rolling/expanding simple OLS.
Parameters
----------
y : Series
x : Series, DataFrame, or dict of Series
weights : array-like, optional
1d array of weights. If None, equivalent to an unweighted OLS.
window_type : {'full sample', 'rolling', 'expanding'}
Default expanding
window : int
size of window (for rolling/expanding OLS)
min_periods : int
Threshold of non-null data points to require.
If None, defaults to size of window.
intercept : bool
True if you want an intercept.
nw_lags : None or int
Number of Newey-West lags.
nw_overlap : boolean, default False
Assume data is overlapping when computing Newey-West estimator
"""
def __init__(self, y, x, weights=None, window_type='expanding',
window=None, min_periods=None, intercept=True,
nw_lags=None, nw_overlap=False):
self._args = dict(intercept=intercept, nw_lags=nw_lags,
nw_overlap=nw_overlap)
OLS.__init__(self, y=y, x=x, weights=weights, **self._args)
self._set_window(window_type, window, min_periods)
def _set_window(self, window_type, window, min_periods):
self._window_type = scom._get_window_type(window_type)
if self._is_rolling:
if window is None:
raise AssertionError("Must specify window.")
if min_periods is None:
min_periods = window
else:
window = len(self._x)
if min_periods is None:
min_periods = 1
self._window = int(window)
self._min_periods = min_periods
#------------------------------------------------------------------------------
# "Public" results
@cache_readonly
def beta(self):
"""Returns the betas in Series/DataFrame form."""
return DataFrame(self._beta_raw,
index=self._result_index,
columns=self._x.columns)
@cache_readonly
def rank(self):
return Series(self._rank_raw, index=self._result_index)
@cache_readonly
def df(self):
"""Returns the degrees of freedom."""
return Series(self._df_raw, index=self._result_index)
@cache_readonly
def df_model(self):
"""Returns the model degrees of freedom."""
return Series(self._df_model_raw, index=self._result_index)
@cache_readonly
def df_resid(self):
"""Returns the residual degrees of freedom."""
return Series(self._df_resid_raw, index=self._result_index)
@cache_readonly
def f_stat(self):
"""Returns the f-stat value."""
f_stat_dicts = dict((date, f_stat_to_dict(f_stat))
for date, f_stat in zip(self.beta.index,
self._f_stat_raw))
return DataFrame(f_stat_dicts).T
def f_test(self, hypothesis):
raise NotImplementedError('must use full sample')
@cache_readonly
def forecast_mean(self):
return Series(self._forecast_mean_raw, index=self._result_index)
@cache_readonly
def forecast_vol(self):
return Series(self._forecast_vol_raw, index=self._result_index)
@cache_readonly
def p_value(self):
"""Returns the p values."""
cols = self.beta.columns
return DataFrame(self._p_value_raw, columns=cols,
index=self._result_index)
@cache_readonly
def r2(self):
"""Returns the r-squared values."""
return Series(self._r2_raw, index=self._result_index)
@cache_readonly
def resid(self):
"""Returns the residuals."""
return Series(self._resid_raw[self._valid_obs_labels],
index=self._result_index)
@cache_readonly
def r2_adj(self):
"""Returns the r-squared adjusted values."""
index = self.r2.index
return Series(self._r2_adj_raw, index=index)
@cache_readonly
def rmse(self):
"""Returns the rmse values."""
return Series(self._rmse_raw, index=self._result_index)
@cache_readonly
def std_err(self):
"""Returns the standard err values."""
return DataFrame(self._std_err_raw, columns=self.beta.columns,
index=self._result_index)
@cache_readonly
def t_stat(self):
"""Returns the t-stat value."""
return DataFrame(self._t_stat_raw, columns=self.beta.columns,
index=self._result_index)
@cache_readonly
def var_beta(self):
"""Returns the covariance of beta."""
result = {}
result_index = self._result_index
for i in range(len(self._var_beta_raw)):
dm = DataFrame(self._var_beta_raw[i], columns=self.beta.columns,
index=self.beta.columns)
result[result_index[i]] = dm
return Panel.from_dict(result, intersect=False)
@cache_readonly
def y_fitted(self):
"""Returns the fitted y values."""
return Series(self._y_fitted_raw[self._valid_obs_labels],
index=self._result_index)
@cache_readonly
def y_predict(self):
"""Returns the predicted y values."""
return Series(self._y_predict_raw[self._valid_obs_labels],
index=self._result_index)
#------------------------------------------------------------------------------
# "raw" attributes, calculations
@property
def _is_rolling(self):
return self._window_type == 'rolling'
@cache_readonly
def _beta_raw(self):
"""Runs the regression and returns the beta."""
beta, indices, mask = self._rolling_ols_call
return beta[indices]
@cache_readonly
def _result_index(self):
return self._index[self._valid_indices]
@property
def _valid_indices(self):
return self._rolling_ols_call[1]
@cache_readonly
def _rolling_ols_call(self):
return self._calc_betas(self._x_trans, self._y_trans)
def _calc_betas(self, x, y):
N = len(self._index)
K = len(self._x.columns)
betas = np.empty((N, K), dtype=float)
betas[:] = np.NaN
valid = self._time_has_obs
enough = self._enough_obs
window = self._window
# Use transformed (demeaned) Y, X variables
cum_xx = self._cum_xx(x)
cum_xy = self._cum_xy(x, y)
for i in range(N):
if not valid[i] or not enough[i]:
continue
xx = cum_xx[i]
xy = cum_xy[i]
if self._is_rolling and i >= window:
xx = xx - cum_xx[i - window]
xy = xy - cum_xy[i - window]
betas[i] = math.solve(xx, xy)
mask = -np.isnan(betas).any(axis=1)
have_betas = np.arange(N)[mask]
return betas, have_betas, mask
def _rolling_rank(self):
dates = self._index
window = self._window
ranks = np.empty(len(dates), dtype=float)
ranks[:] = np.NaN
for i, date in enumerate(dates):
if self._is_rolling and i >= window:
prior_date = dates[i - window + 1]
else:
prior_date = dates[0]
x_slice = self._x.truncate(before=prior_date, after=date).values
if len(x_slice) == 0:
continue
ranks[i] = math.rank(x_slice)
return ranks
def _cum_xx(self, x):
dates = self._index
K = len(x.columns)
valid = self._time_has_obs
cum_xx = []
slicer = lambda df, dt: df.truncate(dt, dt).values
if not self._panel_model:
_get_index = x.index.get_loc
def slicer(df, dt):
i = _get_index(dt)
return df.values[i:i + 1, :]
last = np.zeros((K, K))
for i, date in enumerate(dates):
if not valid[i]:
cum_xx.append(last)
continue
x_slice = slicer(x, date)
xx = last = last + np.dot(x_slice.T, x_slice)
cum_xx.append(xx)
return cum_xx
def _cum_xy(self, x, y):
dates = self._index
valid = self._time_has_obs
cum_xy = []
x_slicer = lambda df, dt: df.truncate(dt, dt).values
if not self._panel_model:
_get_index = x.index.get_loc
def x_slicer(df, dt):
i = _get_index(dt)
return df.values[i:i + 1]
_y_get_index = y.index.get_loc
_values = y.values
if isinstance(y.index, MultiIndex):
def y_slicer(df, dt):
loc = _y_get_index(dt)
return _values[loc]
else:
def y_slicer(df, dt):
i = _y_get_index(dt)
return _values[i:i + 1]
last = np.zeros(len(x.columns))
for i, date in enumerate(dates):
if not valid[i]:
cum_xy.append(last)
continue
x_slice = x_slicer(x, date)
y_slice = y_slicer(y, date)
xy = last = last + np.dot(x_slice.T, y_slice)
cum_xy.append(xy)
return cum_xy
@cache_readonly
def _rank_raw(self):
rank = self._rolling_rank()
return rank[self._valid_indices]
@cache_readonly
def _df_raw(self):
"""Returns the degrees of freedom."""
return self._rank_raw
@cache_readonly
def _df_model_raw(self):
"""Returns the raw model degrees of freedom."""
return self._df_raw - 1
@cache_readonly
def _df_resid_raw(self):
"""Returns the raw residual degrees of freedom."""
return self._nobs - self._df_raw
@cache_readonly
def _f_stat_raw(self):
"""Returns the raw f-stat value."""
from scipy.stats import f
items = self.beta.columns
nobs = self._nobs
df = self._df_raw
df_resid = nobs - df
# var_beta has not been newey-west adjusted
if self._nw_lags is None:
F = self._r2_raw / (self._r2_raw - self._r2_adj_raw)
q = len(items)
if 'intercept' in items:
q -= 1
def get_result_simple(Fst, d):
return Fst, (q, d), 1 - f.cdf(Fst, q, d)
# Compute the P-value for each pair
result = starmap(get_result_simple, zip(F, df_resid))
return list(result)
K = len(items)
R = np.eye(K)
r = np.zeros((K, 1))
try:
intercept = items.get_loc('intercept')
R = np.concatenate((R[0: intercept], R[intercept + 1:]))
r = np.concatenate((r[0: intercept], r[intercept + 1:]))
except KeyError:
# no intercept
pass
def get_result(beta, vcov, n, d):
return math.calc_F(R, r, beta, vcov, n, d)
results = starmap(get_result,
zip(self._beta_raw, self._var_beta_raw, nobs, df))
return list(results)
@cache_readonly
def _p_value_raw(self):
"""Returns the raw p values."""
from scipy.stats import t
result = [2 * t.sf(a, b)
for a, b in zip(np.fabs(self._t_stat_raw),
self._df_resid_raw)]
return np.array(result)
@cache_readonly
def _resid_stats(self):
uncentered_sst = []
sst = []
sse = []
Yreg = self._y
Y = self._y_trans
X = self._x_trans
weights = self._weights
dates = self._index
window = self._window
for n, index in enumerate(self._valid_indices):
if self._is_rolling and index >= window:
prior_date = dates[index - window + 1]
else:
prior_date = dates[0]
date = dates[index]
beta = self._beta_raw[n]
X_slice = X.truncate(before=prior_date, after=date).values
Y_slice = _y_converter(Y.truncate(before=prior_date, after=date))
resid = Y_slice - np.dot(X_slice, beta)
if weights is not None:
Y_slice = _y_converter(Yreg.truncate(before=prior_date,
after=date))
weights_slice = weights.truncate(prior_date, date)
demeaned = Y_slice - np.average(Y_slice, weights=weights_slice)
SS_total = (weights_slice * demeaned ** 2).sum()
else:
SS_total = ((Y_slice - Y_slice.mean()) ** 2).sum()
SS_err = (resid ** 2).sum()
SST_uncentered = (Y_slice ** 2).sum()
sse.append(SS_err)
sst.append(SS_total)
uncentered_sst.append(SST_uncentered)
return {
'sse': np.array(sse),
'centered_tss': np.array(sst),
'uncentered_tss': np.array(uncentered_sst),
}
@cache_readonly
def _rmse_raw(self):
"""Returns the raw rmse values."""
return np.sqrt(self._resid_stats['sse'] / self._df_resid_raw)
@cache_readonly
def _r2_raw(self):
rs = self._resid_stats
if self._use_centered_tss:
return 1 - rs['sse'] / rs['centered_tss']
else:
return 1 - rs['sse'] / rs['uncentered_tss']
@cache_readonly
def _r2_adj_raw(self):
"""Returns the raw r-squared adjusted values."""
nobs = self._nobs
factors = (nobs - 1) / (nobs - self._df_raw)
return 1 - (1 - self._r2_raw) * factors
@cache_readonly
def _resid_raw(self):
"""Returns the raw residuals."""
return (self._y.values - self._y_fitted_raw)
@cache_readonly
def _std_err_raw(self):
"""Returns the raw standard err values."""
results = []
for i in range(len(self._var_beta_raw)):
results.append(np.sqrt(np.diag(self._var_beta_raw[i])))
return np.array(results)
@cache_readonly
def _t_stat_raw(self):
"""Returns the raw t-stat value."""
return self._beta_raw / self._std_err_raw
@cache_readonly
def _var_beta_raw(self):
"""Returns the raw covariance of beta."""
x = self._x_trans
y = self._y_trans
dates = self._index
nobs = self._nobs
rmse = self._rmse_raw
beta = self._beta_raw
df = self._df_raw
window = self._window
cum_xx = self._cum_xx(self._x)
results = []
for n, i in enumerate(self._valid_indices):
xx = cum_xx[i]
date = dates[i]
if self._is_rolling and i >= window:
xx = xx - cum_xx[i - window]
prior_date = dates[i - window + 1]
else:
prior_date = dates[0]
x_slice = x.truncate(before=prior_date, after=date)
y_slice = y.truncate(before=prior_date, after=date)
xv = x_slice.values
yv = np.asarray(y_slice)
if self._nw_lags is None:
result = math.inv(xx) * (rmse[n] ** 2)
else:
resid = yv - np.dot(xv, beta[n])
m = (xv.T * resid).T
xeps = math.newey_west(m, self._nw_lags, nobs[n], df[n],
self._nw_overlap)
xx_inv = math.inv(xx)
result = np.dot(xx_inv, np.dot(xeps, xx_inv))
results.append(result)
return np.array(results)
@cache_readonly
def _forecast_mean_raw(self):
"""Returns the raw covariance of beta."""
nobs = self._nobs
window = self._window
# x should be ones
dummy = DataFrame(index=self._y.index)
dummy['y'] = 1
cum_xy = self._cum_xy(dummy, self._y)
results = []
for n, i in enumerate(self._valid_indices):
sumy = cum_xy[i]
if self._is_rolling and i >= window:
sumy = sumy - cum_xy[i - window]
results.append(sumy[0] / nobs[n])
return np.array(results)
@cache_readonly
def _forecast_vol_raw(self):
"""Returns the raw covariance of beta."""
beta = self._beta_raw
window = self._window
dates = self._index
x = self._x
results = []
for n, i in enumerate(self._valid_indices):
date = dates[i]
if self._is_rolling and i >= window:
prior_date = dates[i - window + 1]
else:
prior_date = dates[0]
x_slice = x.truncate(prior_date, date).values
x_demeaned = x_slice - x_slice.mean(0)
x_cov = np.dot(x_demeaned.T, x_demeaned) / (len(x_slice) - 1)
B = beta[n]
result = np.dot(B, np.dot(x_cov, B))
results.append(np.sqrt(result))
return np.array(results)
@cache_readonly
def _y_fitted_raw(self):
"""Returns the raw fitted y values."""
return (self._x.values * self._beta_matrix(lag=0)).sum(1)
@cache_readonly
def _y_predict_raw(self):
"""Returns the raw predicted y values."""
return (self._x.values * self._beta_matrix(lag=1)).sum(1)
@cache_readonly
def _results(self):
results = {}
for result in self.RESULT_FIELDS:
value = getattr(self, result)
if isinstance(value, Series):
value = value[self.beta.index[-1]]
elif isinstance(value, DataFrame):
value = value.xs(self.beta.index[-1])
else: # pragma: no cover
raise Exception('Problem retrieving %s' % result)
results[result] = value
return results
@cache_readonly
def _window_time_obs(self):
window_obs = moments.rolling_sum(self._time_obs_count > 0,
self._window, min_periods=1)
window_obs[np.isnan(window_obs)] = 0
return window_obs.astype(int)
@cache_readonly
def _nobs_raw(self):
if self._is_rolling:
window = self._window
else:
# expanding case
window = len(self._index)
result = moments.rolling_sum(self._time_obs_count, window,
min_periods=1)
return result.astype(int)
def _beta_matrix(self, lag=0):
if lag < 0:
raise AssertionError("'lag' must be greater than or equal to 0, "
"input was {0}".format(lag))
betas = self._beta_raw
labels = np.arange(len(self._y)) - lag
indexer = self._valid_obs_labels.searchsorted(labels, side='left')
indexer[indexer == len(betas)] = len(betas) - 1
beta_matrix = betas[indexer]
beta_matrix[labels < self._valid_obs_labels[0]] = np.NaN
return beta_matrix
@cache_readonly
def _valid_obs_labels(self):
dates = self._index[self._valid_indices]
return self._y.index.searchsorted(dates)
@cache_readonly
def _nobs(self):
return self._nobs_raw[self._valid_indices]
@property
def nobs(self):
return Series(self._nobs, index=self._result_index)
@cache_readonly
def _enough_obs(self):
# XXX: what's the best way to determine where to start?
return self._nobs_raw >= max(self._min_periods,
len(self._x.columns) + 1)
def _safe_update(d, other):
"""
Combine dictionaries with non-overlapping keys
"""
for k, v in compat.iteritems(other):
if k in d:
raise Exception('Duplicate regressor: %s' % k)
d[k] = v
def _filter_data(lhs, rhs, weights=None):
"""
Cleans the input for single OLS.
Parameters
----------
lhs : Series
Dependent variable in the regression.
rhs : dict, whose values are Series, DataFrame, or dict
Explanatory variables of the regression.
weights : array-like, optional
1d array of weights. If None, equivalent to an unweighted OLS.
Returns
-------
Series, DataFrame
Cleaned lhs and rhs
"""
if not isinstance(lhs, Series):
if len(lhs) != len(rhs):
raise AssertionError("length of lhs must equal length of rhs")
lhs = Series(lhs, index=rhs.index)
rhs = _combine_rhs(rhs)
lhs = DataFrame({'__y__': lhs}, dtype=float)
pre_filt_rhs = rhs.dropna(how='any')
combined = rhs.join(lhs, how='outer')
if weights is not None:
combined['__weights__'] = weights
valid = (combined.count(1) == len(combined.columns)).values
index = combined.index
combined = combined[valid]
if weights is not None:
filt_weights = combined.pop('__weights__')
else:
filt_weights = None
filt_lhs = combined.pop('__y__')
filt_rhs = combined
if hasattr(filt_weights,'to_dense'):
filt_weights = filt_weights.to_dense()
return (filt_lhs.to_dense(), filt_rhs.to_dense(), filt_weights,
pre_filt_rhs.to_dense(), index, valid)
def _combine_rhs(rhs):
"""
Glue input X variables together while checking for potential
duplicates
"""
series = {}
if isinstance(rhs, Series):
series['x'] = rhs
elif isinstance(rhs, DataFrame):
series = rhs.copy()
elif isinstance(rhs, dict):
for name, value in compat.iteritems(rhs):
if isinstance(value, Series):
_safe_update(series, {name: value})
elif isinstance(value, (dict, DataFrame)):
_safe_update(series, value)
else: # pragma: no cover
raise Exception('Invalid RHS data type: %s' % type(value))
else: # pragma: no cover
raise Exception('Invalid RHS type: %s' % type(rhs))
if not isinstance(series, DataFrame):
series = DataFrame(series, dtype=float)
return series
# A little kludge so we can use this method for both
# MovingOLS and MovingPanelOLS
def _y_converter(y):
y = y.values.squeeze()
if y.ndim == 0: # pragma: no cover
return np.array([y])
else:
return y
def f_stat_to_dict(result):
f_stat, shape, p_value = result
result = {}
result['f-stat'] = f_stat
result['DF X'] = shape[0]
result['DF Resid'] = shape[1]
result['p-value'] = p_value
return result
|
gpl-3.0
|
ankurankan/scikit-learn
|
examples/cluster/plot_kmeans_digits.py
|
53
|
4524
|
"""
===========================================================
A demo of K-Means clustering on the handwritten digits data
===========================================================
In this example we compare the various initialization strategies for
K-means in terms of runtime and quality of the results.
As the ground truth is known here, we also apply different cluster
quality metrics to judge the goodness of fit of the cluster labels to the
ground truth.
Cluster quality metrics evaluated (see :ref:`clustering_evaluation` for
definitions and discussions of the metrics):
=========== ========================================================
Shorthand full name
=========== ========================================================
homo homogeneity score
compl completeness score
v-meas V measure
ARI adjusted Rand index
AMI adjusted mutual information
silhouette silhouette coefficient
=========== ========================================================
"""
print(__doc__)
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn import metrics
from sklearn.cluster import KMeans
from sklearn.datasets import load_digits
from sklearn.decomposition import PCA
from sklearn.preprocessing import scale
np.random.seed(42)
digits = load_digits()
data = scale(digits.data)
n_samples, n_features = data.shape
n_digits = len(np.unique(digits.target))
labels = digits.target
sample_size = 300
print("n_digits: %d, \t n_samples %d, \t n_features %d"
% (n_digits, n_samples, n_features))
print(79 * '_')
print('% 9s' % 'init'
' time inertia homo compl v-meas ARI AMI silhouette')
def bench_k_means(estimator, name, data):
t0 = time()
estimator.fit(data)
print('% 9s %.2fs %i %.3f %.3f %.3f %.3f %.3f %.3f'
% (name, (time() - t0), estimator.inertia_,
metrics.homogeneity_score(labels, estimator.labels_),
metrics.completeness_score(labels, estimator.labels_),
metrics.v_measure_score(labels, estimator.labels_),
metrics.adjusted_rand_score(labels, estimator.labels_),
metrics.adjusted_mutual_info_score(labels, estimator.labels_),
metrics.silhouette_score(data, estimator.labels_,
metric='euclidean',
sample_size=sample_size)))
bench_k_means(KMeans(init='k-means++', n_clusters=n_digits, n_init=10),
name="k-means++", data=data)
bench_k_means(KMeans(init='random', n_clusters=n_digits, n_init=10),
name="random", data=data)
# in this case the seeding of the centers is deterministic, hence we run the
# kmeans algorithm only once with n_init=1
pca = PCA(n_components=n_digits).fit(data)
bench_k_means(KMeans(init=pca.components_, n_clusters=n_digits, n_init=1),
name="PCA-based",
data=data)
print(79 * '_')
###############################################################################
# Visualize the results on PCA-reduced data
reduced_data = PCA(n_components=2).fit_transform(data)
kmeans = KMeans(init='k-means++', n_clusters=n_digits, n_init=10)
kmeans.fit(reduced_data)
# Step size of the mesh. Decrease to increase the quality of the VQ.
h = .02 # point in the mesh [x_min, m_max]x[y_min, y_max].
# Plot the decision boundary. For that, we will assign a color to each
x_min, x_max = reduced_data[:, 0].min() + 1, reduced_data[:, 0].max() - 1
y_min, y_max = reduced_data[:, 1].min() + 1, reduced_data[:, 1].max() - 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# Obtain labels for each point in mesh. Use last trained model.
Z = kmeans.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(1)
plt.clf()
plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()),
cmap=plt.cm.Paired,
aspect='auto', origin='lower')
plt.plot(reduced_data[:, 0], reduced_data[:, 1], 'k.', markersize=2)
# Plot the centroids as a white X
centroids = kmeans.cluster_centers_
plt.scatter(centroids[:, 0], centroids[:, 1],
marker='x', s=169, linewidths=3,
color='w', zorder=10)
plt.title('K-means clustering on the digits dataset (PCA-reduced data)\n'
'Centroids are marked with white cross')
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
plt.show()
|
bsd-3-clause
|
nhejazi/scikit-learn
|
examples/neighbors/plot_regression.py
|
15
|
1402
|
"""
============================
Nearest Neighbors regression
============================
Demonstrate the resolution of a regression problem
using a k-Nearest Neighbor and the interpolation of the
target using both barycenter and constant weights.
"""
print(__doc__)
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
#
# License: BSD 3 clause (C) INRIA
# #############################################################################
# Generate sample data
import numpy as np
import matplotlib.pyplot as plt
from sklearn import neighbors
np.random.seed(0)
X = np.sort(5 * np.random.rand(40, 1), axis=0)
T = np.linspace(0, 5, 500)[:, np.newaxis]
y = np.sin(X).ravel()
# Add noise to targets
y[::5] += 1 * (0.5 - np.random.rand(8))
# #############################################################################
# Fit regression model
n_neighbors = 5
for i, weights in enumerate(['uniform', 'distance']):
knn = neighbors.KNeighborsRegressor(n_neighbors, weights=weights)
y_ = knn.fit(X, y).predict(T)
plt.subplot(2, 1, i + 1)
plt.scatter(X, y, c='k', label='data')
plt.plot(T, y_, c='g', label='prediction')
plt.axis('tight')
plt.legend()
plt.title("KNeighborsRegressor (k = %i, weights = '%s')" % (n_neighbors,
weights))
plt.show()
|
bsd-3-clause
|
drewejohnson/drewtils
|
docs/conf.py
|
1
|
5093
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# drewtils documentation build configuration file, created by
# sphinx-quickstart on Tue Dec 5 01:45:19 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.coverage']
autodoc_default_options = {"members": True}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'drewtils'
copyright = '2017-2020, Andrew Johnson'
author = 'Andrew Johnson'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.2'
# The full version, including alpha/beta/rc tags.
release = '0.2.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'drewtilsdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'drewtils.tex', 'drewtils Documentation',
'Andrew Johnson', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'drewtils', 'drewtils Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'drewtils', 'drewtils Documentation',
author, 'drewtils', 'One line description of project.',
'Miscellaneous'),
]
intersphinx_mapping = {
'python': ('https://docs.python.org/3.5', None),
'pandas': ('https://pandas.pydata.org/pandas-docs/stable/', None),
}
|
mit
|
celiafish/VisTrails
|
vistrails/packages/tabledata/operations.py
|
2
|
28158
|
try:
import numpy
except ImportError: # pragma: no cover
numpy = None
import re
from vistrails.core.modules.vistrails_module import ModuleError
from .common import TableObject, Table, choose_column, choose_columns
# FIXME use pandas?
def utf8(obj):
if isinstance(obj, bytes):
return obj
elif isinstance(obj, unicode):
return obj.encode('utf-8')
else:
return bytes(obj)
class JoinedTables(TableObject):
def __init__(self, left_t, right_t, left_key_col, right_key_col,
case_sensitive=False, always_prefix=False):
self.left_t = left_t
self.right_t = right_t
self.left_key_col = left_key_col
self.right_key_col = right_key_col
self.case_sensitive = case_sensitive
self.always_prefix = always_prefix
self.build_column_names()
self.compute_row_map()
self.column_cache = {}
self.rows = len(self.row_map)
def build_column_names(self):
left_name = self.left_t.name
if left_name is None:
left_name = "left"
right_name = self.right_t.name
if right_name is None:
right_name = "right"
def get_col_names(table, other, prefix):
names = []
for i in xrange(table.columns):
should_prefix = self.always_prefix
if table.names is not None:
n = table.names[i]
else:
n = "col %d" % i
should_prefix = True
if not should_prefix and other.names is not None:
if n in other.names:
should_prefix = True
names.append("%s%s" % (prefix + "." if should_prefix else "",
n))
return names
self.names = (get_col_names(self.left_t, self.right_t, left_name) +
get_col_names(self.right_t, self.left_t, right_name))
self.columns = len(self.names)
def get_column(self, index, numeric=False):
if (index, numeric) in self.column_cache:
return self.column_cache[(index, numeric)]
result = []
if index < self.left_t.columns:
column = self.left_t.get_column(index, numeric)
for i in xrange(self.left_t.rows):
if i in self.row_map:
result.append(column[i])
else:
column = self.right_t.get_column(index - self.left_t.columns,
numeric)
for i in xrange(self.left_t.rows):
if i in self.row_map:
j = self.row_map[i]
result.append(column[j])
if numeric and numpy is not None:
result = numpy.array(result, dtype=numpy.float32)
self.column_cache[(index, numeric)] = result
return result
def compute_row_map(self):
def build_key_dict(table, key_col):
column = table.get_column(key_col)
if self.case_sensitive:
key_dict = dict((utf8(val).strip(), i)
for i, val in enumerate(column))
else:
key_dict = dict((utf8(val).strip().upper(), i)
for i, val in enumerate(column))
return key_dict
right_keys = build_key_dict(self.right_t, self.right_key_col)
self.row_map = {}
for left_row_idx, key in enumerate(
self.left_t.get_column(self.left_key_col)):
key = utf8(key).strip()
if not self.case_sensitive:
key = key.upper()
if key in right_keys:
self.row_map[left_row_idx] = right_keys[key]
class JoinTables(Table):
"""Joins data from two tables using equality of a pair of columns.
This creates a table by combining the fields from the two tables. It will
match the values in the two selected columns (one from each table). If a
row from one of the table has a value for the selected field that doesn't
exist in the other table, that row will not appear in the result
(INNER JOIN semantics).
"""
_input_ports = [('left_table', 'Table'),
('right_table', 'Table'),
('left_column_idx', 'basic:Integer'),
('left_column_name', 'basic:String'),
('right_column_idx', 'basic:Integer'),
('right_column_name', 'basic:String'),
('case_sensitive', 'basic:Boolean',
{"optional": True, "defaults": str(["False"])}),
('always_prefix', 'basic:Boolean',
{"optional": True, "defaults": str(["False"])})]
_output_ports = [('value', Table)]
def compute(self):
left_t = self.get_input('left_table')
right_t = self.get_input('right_table')
case_sensitive = self.get_input('case_sensitive')
always_prefix = self.get_input('always_prefix')
def get_column_idx(table, prefix):
col_name_port = "%s_column_name" % prefix
col_idx_port = '%s_column_idx' % prefix
try:
col_idx = choose_column(
table.columns,
column_names=table.names,
name=self.force_get_input(col_name_port, None),
index=self.force_get_input(col_idx_port, None))
except ValueError, e:
raise ModuleError(self, e.message)
return col_idx
left_key_col = get_column_idx(left_t, "left")
right_key_col = get_column_idx(right_t, "right")
table = JoinedTables(left_t, right_t, left_key_col, right_key_col,
case_sensitive, always_prefix)
self.set_output('value', table)
class ProjectedTable(TableObject):
def __init__(self, table, col_idxs, col_names):
self.table = table
self.col_map = dict(enumerate(col_idxs))
self.columns = len(self.col_map)
self.names = col_names
def get_column(self, index, numeric=False):
mapped_idx = self.col_map[index]
return self.table.get_column(mapped_idx, numeric)
@property
def rows(self):
return self.table.rows
class ProjectTable(Table):
"""Build a table from the columns of another table.
This allows you to restrict, reorder or rename the columns of a table. You
can also duplicate columns by mentioning them several times.
"""
_input_ports = [("table", "Table"),
("column_names", "basic:List"),
("column_indexes", "basic:List"),
("new_column_names", "basic:List",
{"optional": True})]
_output_ports = [("value", Table)]
def compute(self):
table = self.get_input("table")
try:
indexes = choose_columns(
table.columns,
column_names=table.names,
names=self.force_get_input('column_names', None),
indexes=self.force_get_input('column_indexes', None))
except ValueError, e:
raise ModuleError(self, e.message)
if self.has_input('new_column_names'):
column_names = self.get_input('new_column_names')
if len(column_names) != len(indexes):
raise ModuleError(self,
"new_column_names was specified but doesn't "
"have the right number of names")
else:
column_names = []
names = {}
for i in indexes:
name = table.names[i]
if name in names:
nb = names[name]
names[name] += 1
name = '%s_%d' % (name, nb)
else:
names[name] = 1
column_names.append(name)
projected_table = ProjectedTable(table, indexes, column_names)
self.set_output("value", projected_table)
class SelectFromTable(Table):
"""Builds a table from the rows of another table.
This allows you to filter the records in a table according to a condition
on a specific field.
"""
_input_ports = [('table', 'Table'),
('str_expr', 'basic:String,basic:String,basic:String',
{'entry_types': "['default','enum','default']",
'values': "[[], ['==', '!=', '=~'], []]"}),
('float_expr', 'basic:String,basic:String,basic:Float',
{'entry_types': "['default','enum','default']",
'values': "[[], ['==', '!=', '<', '>', '<=', '>='], []]"})]
_output_ports = [('value', Table)]
@staticmethod
def make_condition(comparand, comparer):
if isinstance(comparand, float):
with_cast = lambda f: lambda v: f(float(v))
else:
with_cast = lambda f: f
if comparer == '==':
return with_cast(lambda v: v == comparand)
elif comparer == '!=':
return with_cast(lambda v: v != comparand)
elif comparer == '<':
return with_cast(lambda v: v < comparand)
elif comparer == '>':
return with_cast(lambda v: v > comparand)
elif comparer == '<=':
return with_cast(lambda v: v <= comparand)
elif comparer == '>=':
return with_cast(lambda v: v >= comparand)
elif comparer == '=~':
regex = re.compile(comparand)
return regex.search
else:
raise ValueError("Invalid comparison operator %r" % comparer)
def compute(self):
table = self.get_input('table')
if self.has_input('str_expr'):
(col, comparer, comparand) = self.get_input('str_expr')
elif self.has_input('float_expr'):
(col, comparer, comparand) = self.get_input('float_expr')
else:
raise ModuleError(self, "Must have some expression")
try:
idx = int(col)
except ValueError:
try:
idx = table.names.index(col)
except ValueError:
raise ModuleError(self, "No column %r" % col)
else:
if idx < 0 or idx >= table.columns:
raise ModuleError(self,
"No column %d, table only has %d columns" % (
idx, table.columns))
condition = self.make_condition(comparand, comparer)
numeric = isinstance(comparand, float)
column = table.get_column(idx, numeric)
matched_rows = [i
for i, col_val in enumerate(column)
if condition(col_val)]
columns = []
for col in xrange(table.columns):
column = table.get_column(col)
columns.append([column[row] for row in matched_rows])
selected_table = TableObject(columns, len(matched_rows), table.names)
self.set_output('value', selected_table)
class AggregatedTable(TableObject):
def __init__(self, table, op, col, group_col):
self.table = table
self.op = op
self.col = col
self.group_col = group_col
self.build_map()
def build_map(self):
agg_map = {}
for i, val in enumerate(self.table.get_column(self.group_col)):
if val in agg_map:
agg_map[val].append(i)
else:
agg_map[val] = [i]
self.agg_rows = [(min(rows), rows) for rows in agg_map.itervalues()]
self.agg_rows.sort()
self.rows = len(self.agg_rows)
self.columns = 2
if self.table.names is not None:
self.names = [self.table.names[self.group_col],
self.table.names[self.col]]
def get_column(self, index, numeric=False):
def average(value_iter):
# value_iter can only be used once
sum = 0
count = 0
for count, v in enumerate(value_iter):
sum += v
return sum / (count+1)
op_map = {'sum': sum,
'average': average,
'min': min,
'max': max}
if index == 0:
col = self.table.get_column(self.group_col, numeric)
return [col[x[0]] for x in self.agg_rows]
else:
if self.op == 'count':
return [len(x[1]) for x in self.agg_rows]
elif self.op in op_map:
col = self.table.get_column(self.col, True)
return [op_map[self.op](col[idx] for idx in x[1])
for x in self.agg_rows]
else:
raise ValueError('Unknown operation: "%s"' % self.op)
class AggregateColumn(Table):
_input_ports = [('table', 'Table'),
('op', 'basic:String',
{'entry_types': "['enum']",
'values': "[['sum', 'count', 'average', 'min', 'max']]"}),
('column_name', 'basic:String'),
('column_index', 'basic:Integer'),
('group_by_name', 'basic:String'),
('group_by_index', 'basic:Integer')]
_output_ports = [('value', 'Table')]
def compute(self):
table = self.get_input('table')
op = self.get_input('op')
column_name = self.force_get_input('column_name', None)
column_index = self.force_get_input('column_index', None)
col_idx = choose_column(table.columns,
column_names=table.names,
name=column_name,
index=column_index)
group_by_name = self.force_get_input('group_by_name', None)
group_by_index = self.force_get_input('group_by_index', None)
gb_idx = choose_column(table.columns,
column_names=table.names,
name=group_by_name,
index=group_by_index)
res_table = AggregatedTable(table, op, col_idx, gb_idx)
self.set_output('value', res_table)
_modules = [JoinTables, ProjectTable, SelectFromTable, AggregateColumn]
###############################################################################
import unittest
from vistrails.tests.utils import execute, intercept_result
from .identifiers import identifier
class TestJoin(unittest.TestCase):
def test_join(self):
"""Test joining tables that have column names.
"""
with intercept_result(JoinTables, 'value') as results:
self.assertFalse(execute([
('BuildTable', identifier, [
('id', [('List', repr([1, '2', 4, 5]))]),
('A_name', [('List',
repr(['one', 2, 'four', 'five'])),
]),
]),
('BuildTable', identifier, [
('B_age', [('List',
repr([14, 50, '12', 22])),
]),
('id', [('List', repr(['1', 2, 3, 5]))]),
]),
('JoinTables', identifier, [
('left_column_idx', [('Integer', '0')]),
('right_column_name', [('String', 'id')]),
('right_column_idx', [('Integer', '1')]),
]),
],
[
(0, 'value', 2, 'left_table'),
(1, 'value', 2, 'right_table'),
],
add_port_specs=[
(0, 'input', 'id',
'org.vistrails.vistrails.basic:List'),
(0, 'input', 'A_name',
'org.vistrails.vistrails.basic:List'),
(1, 'input', 'B_age',
'org.vistrails.vistrails.basic:List'),
(1, 'input', 'id',
'org.vistrails.vistrails.basic:List'),
]))
self.assertEqual(len(results), 1)
table, = results
self.assertEqual(table.names, ['left.id', 'A_name',
'B_age', 'right.id'])
self.assertEqual(table.get_column(0, False), [1, '2', 5])
l = table.get_column(0, True)
self.assertIsInstance(l, numpy.ndarray)
self.assertEqual(list(l), [1, 2, 5])
self.assertEqual(table.get_column(3, False), ['1', 2, 5])
l = table.get_column(3, True)
self.assertIsInstance(l, numpy.ndarray)
self.assertEqual(list(l), [1, 2, 5])
self.assertEqual(table.get_column(1, False), ['one', 2, 'five'])
self.assertEqual(list(table.get_column(2, True)), [14, 50, 22])
def test_noname(self):
"""Tests joining tables that have no column names.
"""
with intercept_result(JoinTables, 'value') as results:
self.assertFalse(execute([
('WriteFile', 'org.vistrails.vistrails.basic', [
('in_value', [('String', '1;one\n2;2\n4;four\n'
'5;five')]),
]),
('read|CSVFile', identifier, [
('delimiter', [('String', ';')]),
('header_present', [('Boolean', 'False')]),
('sniff_header', [('Boolean', 'False')]),
]),
('WriteFile', 'org.vistrails.vistrails.basic', [
('in_value', [('String', '14;1\n50;2\n12;3\n22;5\n')]),
]),
('read|CSVFile', identifier, [
('delimiter', [('String', ';')]),
('header_present', [('Boolean', 'False')]),
('sniff_header', [('Boolean', 'False')]),
]),
('JoinTables', identifier, [
('left_column_idx', [('Integer', '0')]),
('right_column_idx', [('Integer', '1')]),
]),
],
[
(0, 'out_value', 1, 'file'),
(2, 'out_value', 3, 'file'),
(1, 'value', 4, 'left_table'),
(3, 'value', 4, 'right_table'),
]))
self.assertEqual(len(results), 1)
table, = results
self.assertEqual(table.names, ['left.col 0', 'left.col 1',
'right.col 0', 'right.col 1'])
self.assertEqual(table.get_column(0, False), ['1', '2', '5'])
self.assertEqual(table.get_column(1, False), ['one', '2', 'five'])
class TestProjection(unittest.TestCase):
def do_project(self, project_functions, error=None):
with intercept_result(ProjectTable, 'value') as results:
errors = execute([
('BuildTable', identifier, [
('letters', [('List', repr(['a', 'b', 'c', 'd']))]),
('numbers', [('List', repr([1, 2, 3, '4']))]),
('cardinals', [('List', repr(['one', 'two',
'three', 'four']))]),
('ordinals', [('List', repr(['first', 'second',
'third', 'fourth']))])
]),
('ProjectTable', identifier, project_functions),
],
[
(0, 'value', 1, 'table'),
],
add_port_specs=[
(0, 'input', 'letters',
'org.vistrails.vistrails.basic:List'),
(0, 'input', 'numbers',
'org.vistrails.vistrails.basic:List'),
(0, 'input', 'cardinals',
'org.vistrails.vistrails.basic:List'),
(0, 'input', 'ordinals',
'org.vistrails.vistrails.basic:List'),
])
if error is not None:
self.assertEqual([1], errors.keys())
self.assertIn(error, errors[1].message)
return None
else:
self.assertFalse(errors)
self.assertEqual(len(results), 1)
return results[0]
def test_column_numbers(self):
"""Projects using column numbers.
"""
self.do_project([
('column_indexes', [('List', '[0, 4, 1, 0]')]),
],
'table only has 4 columns')
result = self.do_project([
('column_indexes', [('List', '[0, 3, 1, 0]')]),
])
self.assertEqual(result.names, ['letters', 'ordinals',
'numbers', 'letters_1'])
self.assertEqual(result.get_column(0, False),
['a', 'b', 'c', 'd'])
self.assertEqual(list(result.get_column(2, True)),
[1, 2, 3, 4])
self.assertEqual(result.get_column(3, False),
['a', 'b', 'c', 'd'])
def test_column_numbers_rename(self):
"""Projects and rename columns, using column numbers.
"""
self.do_project([
('column_indexes', [('List', '[0, 3, 1, 0]')]),
('new_column_names', [('List', '["a", "b", "c"]')])
],
"doesn't have the right number of names")
result = self.do_project([
('column_indexes', [('List', '[0, 3, 1, 0]')]),
('new_column_names', [('List', '["a", "b", "c", "d"]')]),
])
self.assertEqual(result.names, ['a', 'b', 'c', 'd'])
def test_column_names(self):
"""Projects using column names.
"""
self.do_project([
('column_names', [('List', repr(['letters', 'crackers']))]),
],
"not found: 'crackers'")
self.do_project([
('column_names', [('List', repr(['letters', 'ordinals']))]),
('column_indexes', [('List', '[0, 2]')]),
],
"they don't agree")
self.do_project([
('column_names', [('List', repr(['letters', 'ordinals']))]),
('column_indexes', [('List', '[0, 3]')]),
])
result = self.do_project([
('column_names', [('List', repr(['letters', 'ordinals',
'letters']))]),
])
self.assertEqual(result.names, ['letters', 'ordinals', 'letters_1'])
class TestSelect(unittest.TestCase):
def do_select(self, select_functions, error=None):
with intercept_result(SelectFromTable, 'value') as results:
errors = execute([
('WriteFile', 'org.vistrails.vistrails.basic', [
('in_value', [('String', '22;a;T;abaab\n'
'43;b;F;aabab\n'
'-7;d;T;abbababb\n'
'500;e;F;aba abacc')]),
]),
('read|CSVFile', identifier, [
('delimiter', [('String', ';')]),
('header_present', [('Boolean', 'False')]),
('sniff_header', [('Boolean', 'False')]),
]),
('SelectFromTable', identifier, select_functions),
],
[
(0, 'out_value', 1, 'file'),
(1, 'value', 2, 'table'),
])
if error is not None:
self.assertEqual([2], errors.keys())
self.assertIn(error, errors[2].message)
return None
else:
self.assertFalse(errors)
self.assertEqual(len(results), 1)
return results[0]
def test_numeric(self):
"""Selects using the 'less-than' condition.
"""
self.do_select([
('float_expr', [('String', '6'),
('String', '<='),
('Float', '42.0')]),
],
"table only has 4 columns")
table = self.do_select([
('float_expr', [('String', '0'),
('String', '<='),
('Float', '42.0')]),
])
l = table.get_column(0, True)
self.assertIsInstance(l, numpy.ndarray)
self.assertEqual(list(l), [22, -7])
self.assertEqual(table.get_column(1, False), ['a', 'd'])
def test_text(self):
"""Selects using the 'equal' condition.
"""
table = self.do_select([
('str_expr', [('String', '2'),
('String', '=='),
('String', 'T')])
])
self.assertEqual(table.get_column(0, False), ['22', '-7'])
def test_regex(self):
"""Selects using the 'regex-match' condition.
"""
table = self.do_select([
('str_expr', [('String', '3'),
('String', '=~'),
('String', r'([ab])\1')]),
])
self.assertEqual(table.get_column(0, False), ['22', '43', '-7'])
class TestAggregate(unittest.TestCase):
def do_aggregate(self, agg_functions):
with intercept_result(AggregateColumn, 'value') as results:
errors = execute([
('WriteFile', 'org.vistrails.vistrails.basic', [
('in_value', [('String', '22;a;T;100\n'
'43;b;F;3\n'
'-7;d;T;41\n'
'500;e;F;21\n'
'20;a;T;1\n'
'43;b;F;23\n'
'21;a;F;41\n')]),
]),
('read|CSVFile', identifier, [
('delimiter', [('String', ';')]),
('header_present', [('Boolean', 'False')]),
('sniff_header', [('Boolean', 'False')]),
]),
('AggregateColumn', identifier, agg_functions),
],
[
(0, 'out_value', 1, 'file'),
(1, 'value', 2, 'table'),
])
self.assertFalse(errors)
self.assertEqual(len(results), 1)
return results[0]
def test_aggregate_sum(self):
table = self.do_aggregate([('op', [('String', 'sum')]),
('column_index', [('Integer', '0')]),
('group_by_index', [('Integer', '1')])])
self.assertEqual(table.get_column(0, False), ['a', 'b', 'd', 'e'])
self.assertEqual(table.get_column(1, True), [63, 86, -7, 500])
def test_aggregate_avg(self):
table = self.do_aggregate([('op', [('String', 'average')]),
('column_index', [('Integer', '3')]),
('group_by_index', [('Integer', '2')])])
self.assertEqual(table.get_column(0, False), ['T', 'F'])
second_col = list(table.get_column(1, True))
self.assertAlmostEqual(second_col[0], 47.3333333333333)
self.assertAlmostEqual(second_col[1], 22.0)
def test_aggregate_min(self):
table = self.do_aggregate([('op', [('String', 'min')]),
('column_index', [('Integer', '0')]),
('group_by_index', [('Integer', '2')])])
self.assertEqual(table.get_column(0, False), ['T', 'F'])
self.assertEqual(table.get_column(1, True), [-7, 21])
|
bsd-3-clause
|
google-research/google-research
|
ledge/utils.py
|
1
|
2167
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow.compat.v1 as tf
from matplotlib import pyplot as plt
plt.rcParams.update({'font.size': 13})
import numpy as np
def graphBatch(lines, filename, labels=None, ylim=None, title=None):
"""Plots several lines or Tensors on several figures.
Each figure is written to its own png file. Every figure must have
the same number of lines. It's common to have just one figure.
Args:
lines: (list of points) A list of lines to be drawn on the figure.
Each "points" is itself a list of y-values for that line.
filename: (str) Output file name.
labels: (list of str) labels[i] is the legend label for lines[i].
If omitted, there is no legend.
Returns: None
"""
color_list = ['blue', 'orange', 'purple', 'pink', 'cyan', 'red',
'yellow', 'magenta', 'teal']
x_values = range(len(lines[0]))
fig, ax = plt.subplots()
axes = plt.gca()
axes.set_ylim(ylim)
for l in range(len(lines)):
assert len(lines[l]) == len(lines[0])
ax.plot(x_values, lines[l], label = None if not labels else labels[l],
color=color_list[l % len(color_list)], marker='', linestyle='-')
ax.grid()
plt.title(title)
if labels:
ax.legend(loc='upper center', bbox_to_anchor=(0.5, -0.1), shadow=False,
ncol=3, numpoints=1, fontsize=10)
ax.axes.xaxis.set_ticklabels([])
ax.axes.yaxis.set_ticklabels([])
plt.grid(True)
with tf.io.gfile.GFile(filename, 'wb') as f:
plt.savefig(f, bbox_inches='tight', pad_inches=0)
print('wrote figure', filename)
plt.close()
|
apache-2.0
|
google/intelligent_annotation_dialogs
|
detector.py
|
1
|
2744
|
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The module that keeps and operates with detector's output.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
class Detector(object):
"""Keeps the detections and gets proposals for a given image-label pairs."""
def __init__(self, detections, predictive_fields):
"""Initialisation of the detector.
Args:
detections: pandas dataframe with detections
predictive_fields: list of features that will be used by IAD
"""
self.predictive_fields = predictive_fields
# We need to search in detection many times, sort them to make it faster
self.detections = detections.sort_values('image_id')
self.detections_nparray = self.detections.values[:, 0]
def get_box_proposals(self, image_id, class_id):
"""Gets a list of proposals for a given image and class.
Args:
image_id: image id for verification, str
class_id: class id for verification, int64
Returns:
coordinates: of all box proposals,
pandas dataframe with columns 'xmin', 'xmax', 'ymin', 'ymax'
features: corresponding to coordinates,
pandas dataframe with columns stated in predictive fields
"""
# as the images_id are sorted,
# let's find the first and last occurrence of image_id
# that will define the possible search range for proposals
in1 = np.searchsorted(self.detections_nparray, image_id, side='left')
in2 = np.searchsorted(self.detections_nparray, image_id, side='right')
subset2search = self.detections.iloc[in1:in2]
# now in this range find all class_ids
all_proposals = subset2search[subset2search['class_id'] == class_id]
all_proposals_sorted = all_proposals.sort_values(['prediction_score'],
ascending=0)
# features of the boxes correcponding to image_id, class_id
features = all_proposals_sorted[self.predictive_fields]
coordinates = all_proposals_sorted[['xmin', 'xmax', 'ymin', 'ymax']]
return coordinates, features
|
apache-2.0
|
Jacobsolawetz/trading-with-python
|
sandbox/spreadCalculations.py
|
78
|
1496
|
'''
Created on 28 okt 2011
@author: jev
'''
from tradingWithPython import estimateBeta, Spread, returns, Portfolio, readBiggerScreener
from tradingWithPython.lib import yahooFinance
from pandas import DataFrame, Series
import numpy as np
import matplotlib.pyplot as plt
import os
symbols = ['SPY','IWM']
y = yahooFinance.HistData('temp.csv')
y.startDate = (2007,1,1)
df = y.loadSymbols(symbols,forceDownload=False)
#df = y.downloadData(symbols)
res = readBiggerScreener('CointPairs.csv')
#---check with spread scanner
#sp = DataFrame(index=symbols)
#
#sp['last'] = df.ix[-1,:]
#sp['targetCapital'] = Series({'SPY':100,'IWM':-100})
#sp['targetShares'] = sp['targetCapital']/sp['last']
#print sp
#The dollar-neutral ratio is about 1 * IWM - 1.7 * IWM. You will get the spread = zero (or probably very near zero)
#s = Spread(symbols, histClose = df)
#print s
#s.value.plot()
#print 'beta (returns)', estimateBeta(df[symbols[0]],df[symbols[1]],algo='returns')
#print 'beta (log)', estimateBeta(df[symbols[0]],df[symbols[1]],algo='log')
#print 'beta (standard)', estimateBeta(df[symbols[0]],df[symbols[1]],algo='standard')
#p = Portfolio(df)
#p.setShares([1, -1.7])
#p.value.plot()
quote = yahooFinance.getQuote(symbols)
print quote
s = Spread(symbols,histClose=df, estimateBeta = False)
s.setLast(quote['last'])
s.setShares(Series({'SPY':1,'IWM':-1.7}))
print s
#s.value.plot()
#s.plot()
fig = figure(2)
s.plot()
|
bsd-3-clause
|
cybernet14/scikit-learn
|
examples/covariance/plot_mahalanobis_distances.py
|
348
|
6232
|
r"""
================================================================
Robust covariance estimation and Mahalanobis distances relevance
================================================================
An example to show covariance estimation with the Mahalanobis
distances on Gaussian distributed data.
For Gaussian distributed data, the distance of an observation
:math:`x_i` to the mode of the distribution can be computed using its
Mahalanobis distance: :math:`d_{(\mu,\Sigma)}(x_i)^2 = (x_i -
\mu)'\Sigma^{-1}(x_i - \mu)` where :math:`\mu` and :math:`\Sigma` are
the location and the covariance of the underlying Gaussian
distribution.
In practice, :math:`\mu` and :math:`\Sigma` are replaced by some
estimates. The usual covariance maximum likelihood estimate is very
sensitive to the presence of outliers in the data set and therefor,
the corresponding Mahalanobis distances are. One would better have to
use a robust estimator of covariance to guarantee that the estimation is
resistant to "erroneous" observations in the data set and that the
associated Mahalanobis distances accurately reflect the true
organisation of the observations.
The Minimum Covariance Determinant estimator is a robust,
high-breakdown point (i.e. it can be used to estimate the covariance
matrix of highly contaminated datasets, up to
:math:`\frac{n_\text{samples}-n_\text{features}-1}{2}` outliers)
estimator of covariance. The idea is to find
:math:`\frac{n_\text{samples}+n_\text{features}+1}{2}`
observations whose empirical covariance has the smallest determinant,
yielding a "pure" subset of observations from which to compute
standards estimates of location and covariance.
The Minimum Covariance Determinant estimator (MCD) has been introduced
by P.J.Rousseuw in [1].
This example illustrates how the Mahalanobis distances are affected by
outlying data: observations drawn from a contaminating distribution
are not distinguishable from the observations coming from the real,
Gaussian distribution that one may want to work with. Using MCD-based
Mahalanobis distances, the two populations become
distinguishable. Associated applications are outliers detection,
observations ranking, clustering, ...
For visualization purpose, the cubic root of the Mahalanobis distances
are represented in the boxplot, as Wilson and Hilferty suggest [2]
[1] P. J. Rousseeuw. Least median of squares regression. J. Am
Stat Ass, 79:871, 1984.
[2] Wilson, E. B., & Hilferty, M. M. (1931). The distribution of chi-square.
Proceedings of the National Academy of Sciences of the United States
of America, 17, 684-688.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.covariance import EmpiricalCovariance, MinCovDet
n_samples = 125
n_outliers = 25
n_features = 2
# generate data
gen_cov = np.eye(n_features)
gen_cov[0, 0] = 2.
X = np.dot(np.random.randn(n_samples, n_features), gen_cov)
# add some outliers
outliers_cov = np.eye(n_features)
outliers_cov[np.arange(1, n_features), np.arange(1, n_features)] = 7.
X[-n_outliers:] = np.dot(np.random.randn(n_outliers, n_features), outliers_cov)
# fit a Minimum Covariance Determinant (MCD) robust estimator to data
robust_cov = MinCovDet().fit(X)
# compare estimators learnt from the full data set with true parameters
emp_cov = EmpiricalCovariance().fit(X)
###############################################################################
# Display results
fig = plt.figure()
plt.subplots_adjust(hspace=-.1, wspace=.4, top=.95, bottom=.05)
# Show data set
subfig1 = plt.subplot(3, 1, 1)
inlier_plot = subfig1.scatter(X[:, 0], X[:, 1],
color='black', label='inliers')
outlier_plot = subfig1.scatter(X[:, 0][-n_outliers:], X[:, 1][-n_outliers:],
color='red', label='outliers')
subfig1.set_xlim(subfig1.get_xlim()[0], 11.)
subfig1.set_title("Mahalanobis distances of a contaminated data set:")
# Show contours of the distance functions
xx, yy = np.meshgrid(np.linspace(plt.xlim()[0], plt.xlim()[1], 100),
np.linspace(plt.ylim()[0], plt.ylim()[1], 100))
zz = np.c_[xx.ravel(), yy.ravel()]
mahal_emp_cov = emp_cov.mahalanobis(zz)
mahal_emp_cov = mahal_emp_cov.reshape(xx.shape)
emp_cov_contour = subfig1.contour(xx, yy, np.sqrt(mahal_emp_cov),
cmap=plt.cm.PuBu_r,
linestyles='dashed')
mahal_robust_cov = robust_cov.mahalanobis(zz)
mahal_robust_cov = mahal_robust_cov.reshape(xx.shape)
robust_contour = subfig1.contour(xx, yy, np.sqrt(mahal_robust_cov),
cmap=plt.cm.YlOrBr_r, linestyles='dotted')
subfig1.legend([emp_cov_contour.collections[1], robust_contour.collections[1],
inlier_plot, outlier_plot],
['MLE dist', 'robust dist', 'inliers', 'outliers'],
loc="upper right", borderaxespad=0)
plt.xticks(())
plt.yticks(())
# Plot the scores for each point
emp_mahal = emp_cov.mahalanobis(X - np.mean(X, 0)) ** (0.33)
subfig2 = plt.subplot(2, 2, 3)
subfig2.boxplot([emp_mahal[:-n_outliers], emp_mahal[-n_outliers:]], widths=.25)
subfig2.plot(1.26 * np.ones(n_samples - n_outliers),
emp_mahal[:-n_outliers], '+k', markeredgewidth=1)
subfig2.plot(2.26 * np.ones(n_outliers),
emp_mahal[-n_outliers:], '+k', markeredgewidth=1)
subfig2.axes.set_xticklabels(('inliers', 'outliers'), size=15)
subfig2.set_ylabel(r"$\sqrt[3]{\rm{(Mahal. dist.)}}$", size=16)
subfig2.set_title("1. from non-robust estimates\n(Maximum Likelihood)")
plt.yticks(())
robust_mahal = robust_cov.mahalanobis(X - robust_cov.location_) ** (0.33)
subfig3 = plt.subplot(2, 2, 4)
subfig3.boxplot([robust_mahal[:-n_outliers], robust_mahal[-n_outliers:]],
widths=.25)
subfig3.plot(1.26 * np.ones(n_samples - n_outliers),
robust_mahal[:-n_outliers], '+k', markeredgewidth=1)
subfig3.plot(2.26 * np.ones(n_outliers),
robust_mahal[-n_outliers:], '+k', markeredgewidth=1)
subfig3.axes.set_xticklabels(('inliers', 'outliers'), size=15)
subfig3.set_ylabel(r"$\sqrt[3]{\rm{(Mahal. dist.)}}$", size=16)
subfig3.set_title("2. from robust estimates\n(Minimum Covariance Determinant)")
plt.yticks(())
plt.show()
|
bsd-3-clause
|
omnisis/trading-with-python
|
cookbook/reconstructVXX/reconstructVXX.py
|
77
|
3574
|
# -*- coding: utf-8 -*-
"""
Reconstructing VXX from futures data
author: Jev Kuznetsov
License : BSD
"""
from __future__ import division
from pandas import *
import numpy as np
import os
class Future(object):
""" vix future class, used to keep data structures simple """
def __init__(self,series,code=None):
""" code is optional, example '2010_01' """
self.series = series.dropna() # price data
self.settleDate = self.series.index[-1]
self.dt = len(self.series) # roll period (this is default, should be recalculated)
self.code = code # string code 'YYYY_MM'
def monthNr(self):
""" get month nr from the future code """
return int(self.code.split('_')[1])
def dr(self,date):
""" days remaining before settlement, on a given date """
return(sum(self.series.index>date))
def price(self,date):
""" price on a date """
return self.series.get_value(date)
def returns(df):
""" daily return """
return (df/df.shift(1)-1)
def recounstructVXX():
"""
calculate VXX returns
needs a previously preprocessed file vix_futures.csv
"""
dataDir = os.path.expanduser('~')+'/twpData'
X = DataFrame.from_csv(dataDir+'/vix_futures.csv') # raw data table
# build end dates list & futures classes
futures = []
codes = X.columns
endDates = []
for code in codes:
f = Future(X[code],code=code)
print code,':', f.settleDate
endDates.append(f.settleDate)
futures.append(f)
endDates = np.array(endDates)
# set roll period of each future
for i in range(1,len(futures)):
futures[i].dt = futures[i].dr(futures[i-1].settleDate)
# Y is the result table
idx = X.index
Y = DataFrame(index=idx, columns=['first','second','days_left','w1','w2',
'ret','30days_avg'])
# W is the weight matrix
W = DataFrame(data = np.zeros(X.values.shape),index=idx,columns = X.columns)
# for VXX calculation see http://www.ipathetn.com/static/pdf/vix-prospectus.pdf
# page PS-20
for date in idx:
i =np.nonzero(endDates>=date)[0][0] # find first not exprired future
first = futures[i] # first month futures class
second = futures[i+1] # second month futures class
dr = first.dr(date) # number of remaining dates in the first futures contract
dt = first.dt #number of business days in roll period
W.set_value(date,codes[i],100*dr/dt)
W.set_value(date,codes[i+1],100*(dt-dr)/dt)
# this is all just debug info
p1 = first.price(date)
p2 = second.price(date)
w1 = 100*dr/dt
w2 = 100*(dt-dr)/dt
Y.set_value(date,'first',p1)
Y.set_value(date,'second',p2)
Y.set_value(date,'days_left',first.dr(date))
Y.set_value(date,'w1',w1)
Y.set_value(date,'w2',w2)
Y.set_value(date,'30days_avg',(p1*w1+p2*w2)/100)
valCurr = (X*W.shift(1)).sum(axis=1) # value on day N
valYest = (X.shift(1)*W.shift(1)).sum(axis=1) # value on day N-1
Y['ret'] = valCurr/valYest-1 # index return on day N
return Y
##-------------------Main script---------------------------
if __name__=="__main__":
Y = recounstructVXX()
print Y.head(30)#
Y.to_csv('reconstructedVXX.csv')
|
bsd-3-clause
|
jseabold/statsmodels
|
statsmodels/datasets/stackloss/data.py
|
4
|
1852
|
"""Stack loss data"""
from statsmodels.datasets import utils as du
__docformat__ = 'restructuredtext'
COPYRIGHT = """This is public domain. """
TITLE = __doc__
SOURCE = """
Brownlee, K. A. (1965), "Statistical Theory and Methodology in
Science and Engineering", 2nd edition, New York:Wiley.
"""
DESCRSHORT = """Stack loss plant data of Brownlee (1965)"""
DESCRLONG = """The stack loss plant data of Brownlee (1965) contains
21 days of measurements from a plant's oxidation of ammonia to nitric acid.
The nitric oxide pollutants are captured in an absorption tower."""
NOTE = """::
Number of Observations - 21
Number of Variables - 4
Variable name definitions::
STACKLOSS - 10 times the percentage of ammonia going into the plant
that escapes from the absoroption column
AIRFLOW - Rate of operation of the plant
WATERTEMP - Cooling water temperature in the absorption tower
ACIDCONC - Acid concentration of circulating acid minus 50 times 10.
"""
def load(as_pandas=None):
"""
Load the stack loss data and returns a Dataset class instance.
Parameters
----------
as_pandas : bool
Flag indicating whether to return pandas DataFrames and Series
or numpy recarrays and arrays. If True, returns pandas.
Returns
-------
Dataset
See DATASET_PROPOSAL.txt for more information.
"""
return du.as_numpy_dataset(load_pandas(), as_pandas=as_pandas)
def load_pandas():
"""
Load the stack loss data and returns a Dataset class instance.
Returns
-------
Dataset
See DATASET_PROPOSAL.txt for more information.
"""
data = _get_data()
return du.process_pandas(data, endog_idx=0)
def _get_data():
return du.load_csv(__file__, 'stackloss.csv').astype(float)
|
bsd-3-clause
|
aditipawde/TimeTable1
|
TimeTable1/try (copy).py
|
1
|
3163
|
import dataAccessSQLAlchemy as da
import pandas as pd
import random
import numpy as np
print("welcome");
f_join_subject_subjectClassTeacher = da.execquery('select s.subjectId, subjectShortName, totalHrs, eachSlot, c.classId, teacherId from subject s, subjectClassTeacher c where s.subjectId = c.subjectId;')
f_join_subject_subjectClassTeacher.insert(5,'batchId','-')
f_join_subject_subjectClassTeacher.insert(6,'category','T') #T for theory
#f_join_subject_subjectClassTeacher.rename(columns={'classId':'classOrBatchId'}, inplace=True)
f_join_subject_subjectBatchTeacher = da.execquery('select s.subjectId, subjectShortName, totalHrs, eachSlot, sbt.batchId, bc.classId, teacherId from subject s, subjectBatchTeacher sbt, batchClass bc where s.subjectId = sbt.subjectId AND sbt.batchId = bc.batchId;')
f_join_subject_subjectBatchTeacher.insert(6,'category','L') #L for Lab
#f_join_subject_subjectBatchTeacher.rename(columns={'batchId':'classOrBatchId'}, inplace=True)
f_subjectBatchClassTeacher = pd.concat([f_join_subject_subjectClassTeacher, f_join_subject_subjectBatchTeacher])
print(f_subjectBatchClassTeacher)
#x = f_subject.join(f_subjectBatchTeacher.set_index('subjectId'), on='subjectId')
x=f_subjectBatchClassTeacher
x=x.reset_index()
x.to_csv("x.csv")
totallectures_list = (x['totalHrs'] / x['eachSlot'])
# Create empty dataframe to save all the requirements
req_all = pd.DataFrame(index=range(int(totallectures_list.sum())), columns=list(x))
j = 0
for i in range(len(req_all)):
if((x.iloc[j]['totalHrs']/x.iloc[j]['eachSlot'])>0):
req_all.loc[[i]] = x.iloc[[j]].values
x.set_value(j,'totalHrs', x.loc[j]['totalHrs'] - x.loc[j]['eachSlot'])
if (x.iloc[j]['totalHrs'] == 0):
j = j + 1
#print(req_all)
req_all.to_csv("req_all.csv")
#These values need to be calculated from the database
n_classes=14
n_days=5
n_slots=10
n_maxlecsperslot=4
timetable_np = np.empty((n_classes, n_days, n_slots, n_maxlecsperslot))*np.nan
#print(timetable_np)
for c in (set(req_all.classId)): #First take one class
#print(c)
#http://stackoverflow.com/questions/17071871/select-rows-from-a-dataframe-based-on-values-in-a-column-in-pandas
req_forgivenclass=req_all.loc[req_all['classId'] == c] #List all the requirements for that class in req_forgivenclass
#print(req_forgivenclass)
#print(set(req_forgivenclass.index)) #These are the indices of the requirements for this class
for req in set(req_forgivenclass.index): #Schedule each of these requirements
notassigned = 1
while(notassigned==1): #Keep on scheduling till not found
r_day=random.randint(0,n_days-1)
r_slot = random.randint(0, n_slots-1)
r_lecnumber=random.randint(0,n_maxlecsperslot-1)
if(np.isnan(np.sum(timetable_np[c,r_day,r_slot,r_lecnumber]))): #Check if that slot is empty, this way of using np.isnan is the fastest way of doing so
timetable_np[c,r_day,r_slot,r_lecnumber]=req
notassigned=0
arr_2d = timetable_np.reshape(70,40)
print(arr_2d)
pd.DataFrame(arr_2d).to_csv('tt_2d')
read_arr_2d =
#print(timetable_np[c,:,:,:])
|
lgpl-3.0
|
rabernat/xray
|
xarray/core/coordinates.py
|
1
|
9856
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import Mapping
from contextlib import contextmanager
import pandas as pd
from . import formatting
from .utils import Frozen
from .merge import (
merge_coords, expand_and_merge_variables, merge_coords_for_inplace_math)
from .pycompat import OrderedDict
class AbstractCoordinates(Mapping, formatting.ReprMixin):
def __getitem__(self, key):
raise NotImplementedError
def __setitem__(self, key, value):
self.update({key: value})
@property
def indexes(self):
return self._data.indexes
@property
def variables(self):
raise NotImplementedError
def _update_coords(self, coords):
raise NotImplementedError
def __iter__(self):
# needs to be in the same order as the dataset variables
for k in self.variables:
if k in self._names:
yield k
def __len__(self):
return len(self._names)
def __contains__(self, key):
return key in self._names
def __unicode__(self):
return formatting.coords_repr(self)
@property
def dims(self):
return self._data.dims
def to_index(self, ordered_dims=None):
"""Convert all index coordinates into a :py:class:`pandas.Index`.
Parameters
----------
ordered_dims : sequence, optional
Possibly reordered version of this object's dimensions indicating
the order in which dimensions should appear on the result.
Returns
-------
pandas.Index
Index subclass corresponding to the outer-product of all dimension
coordinates. This will be a MultiIndex if this object is has more
than more dimension.
"""
if ordered_dims is None:
ordered_dims = self.dims
elif set(ordered_dims) != set(self.dims):
raise ValueError('ordered_dims must match dims, but does not: '
'{} vs {}'.format(ordered_dims, self.dims))
if len(ordered_dims) == 0:
raise ValueError('no valid index for a 0-dimensional object')
elif len(ordered_dims) == 1:
(dim,) = ordered_dims
return self._data.get_index(dim)
else:
indexes = [self._data.get_index(k) for k in ordered_dims]
names = list(ordered_dims)
return pd.MultiIndex.from_product(indexes, names=names)
def update(self, other):
other_vars = getattr(other, 'variables', other)
coords = merge_coords([self.variables, other_vars],
priority_arg=1, indexes=self.indexes)
self._update_coords(coords)
def _merge_raw(self, other):
"""For use with binary arithmetic."""
if other is None:
variables = OrderedDict(self.variables)
else:
# don't align because we already called xarray.align
variables = expand_and_merge_variables(
[self.variables, other.variables])
return variables
@contextmanager
def _merge_inplace(self, other):
"""For use with in-place binary arithmetic."""
if other is None:
yield
else:
# don't include indexes in priority_vars, because we didn't align
# first
priority_vars = OrderedDict(
(k, v) for k, v in self.variables.items() if k not in self.dims)
variables = merge_coords_for_inplace_math(
[self.variables, other.variables], priority_vars=priority_vars)
yield
self._update_coords(variables)
def merge(self, other):
"""Merge two sets of coordinates to create a new Dataset
The method implements the logic used for joining coordinates in the
result of a binary operation performed on xarray objects:
- If two index coordinates conflict (are not equal), an exception is
raised. You must align your data before passing it to this method.
- If an index coordinate and a non-index coordinate conflict, the non-
index coordinate is dropped.
- If two non-index coordinates conflict, both are dropped.
Parameters
----------
other : DatasetCoordinates or DataArrayCoordinates
The coordinates from another dataset or data array.
Returns
-------
merged : Dataset
A new Dataset with merged coordinates.
"""
from .dataset import Dataset
if other is None:
return self.to_dataset()
else:
other_vars = getattr(other, 'variables', other)
coords = expand_and_merge_variables([self.variables, other_vars])
return Dataset._from_vars_and_coord_names(coords, set(coords))
class DatasetCoordinates(AbstractCoordinates):
"""Dictionary like container for Dataset coordinates.
Essentially an immutable OrderedDict with keys given by the array's
dimensions and the values given by the corresponding xarray.Coordinate
objects.
"""
def __init__(self, dataset):
self._data = dataset
@property
def _names(self):
return self._data._coord_names
@property
def variables(self):
return Frozen(OrderedDict((k, v)
for k, v in self._data.variables.items()
if k in self._names))
def __getitem__(self, key):
if key in self._data.data_vars:
raise KeyError(key)
return self._data[key]
def to_dataset(self):
"""Convert these coordinates into a new Dataset
"""
return self._data._copy_listed(self._names)
def _update_coords(self, coords):
from .dataset import calculate_dimensions
variables = self._data._variables.copy()
variables.update(coords)
# check for inconsistent state *before* modifying anything in-place
dims = calculate_dimensions(variables)
new_coord_names = set(coords)
for dim, size in dims.items():
if dim in variables:
new_coord_names.add(dim)
self._data._variables = variables
self._data._coord_names.update(new_coord_names)
self._data._dims = dict(dims)
def __delitem__(self, key):
if key in self:
del self._data[key]
else:
raise KeyError(key)
def _ipython_key_completions_(self):
"""Provide method for the key-autocompletions in IPython. """
return [key for key in self._data._ipython_key_completions_()
if key not in self._data.data_vars]
class DataArrayCoordinates(AbstractCoordinates):
"""Dictionary like container for DataArray coordinates.
Essentially an OrderedDict with keys given by the array's
dimensions and the values given by corresponding DataArray objects.
"""
def __init__(self, dataarray):
self._data = dataarray
@property
def _names(self):
return set(self._data._coords)
def __getitem__(self, key):
return self._data._getitem_coord(key)
def _update_coords(self, coords):
from .dataset import calculate_dimensions
dims = calculate_dimensions(coords)
if not set(dims) <= set(self.dims):
raise ValueError('cannot add coordinates with new dimensions to '
'a DataArray')
self._data._coords = coords
@property
def variables(self):
return Frozen(self._data._coords)
def _to_dataset(self, shallow_copy=True):
from .dataset import Dataset
coords = OrderedDict((k, v.copy(deep=False) if shallow_copy else v)
for k, v in self._data._coords.items())
return Dataset._from_vars_and_coord_names(coords, set(coords))
def to_dataset(self):
return self._to_dataset()
def __delitem__(self, key):
del self._data._coords[key]
def _ipython_key_completions_(self):
"""Provide method for the key-autocompletions in IPython. """
return self._data._ipython_key_completions_()
class LevelCoordinatesSource(object):
"""Iterator for MultiIndex level coordinates.
Used for attribute style lookup with AttrAccessMixin. Not returned directly
by any public methods.
"""
def __init__(self, data_object):
self._data = data_object
def __getitem__(self, key):
# not necessary -- everything here can already be found in coords.
raise KeyError
def __iter__(self):
return iter(self._data._level_coords)
class Indexes(Mapping, formatting.ReprMixin):
"""Ordered Mapping[str, pandas.Index] for xarray objects.
"""
def __init__(self, variables, sizes):
"""Not for public consumption.
Arguments
---------
variables : OrderedDict[Any, Variable]
Reference to OrderedDict holding variable objects. Should be the
same dictionary used by the source object.
sizes : OrderedDict[Any, int]
Map from dimension names to sizes.
"""
self._variables = variables
self._sizes = sizes
def __iter__(self):
for key in self._sizes:
if key in self._variables:
yield key
def __len__(self):
return sum(key in self._variables for key in self._sizes)
def __contains__(self, key):
return key in self._sizes and key in self._variables
def __getitem__(self, key):
if key not in self._sizes:
raise KeyError(key)
return self._variables[key].to_index()
def __unicode__(self):
return formatting.indexes_repr(self)
|
apache-2.0
|
dmitry-r/incubator-airflow
|
airflow/hooks/hive_hooks.py
|
7
|
28649
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
from builtins import zip
from past.builtins import basestring
import collections
import unicodecsv as csv
import itertools
import logging
import re
import subprocess
import time
from tempfile import NamedTemporaryFile
import hive_metastore
from airflow.exceptions import AirflowException
from airflow.hooks.base_hook import BaseHook
from airflow.utils.helpers import as_flattened_list
from airflow.utils.file import TemporaryDirectory
from airflow import configuration
import airflow.security.utils as utils
HIVE_QUEUE_PRIORITIES = ['VERY_HIGH', 'HIGH', 'NORMAL', 'LOW', 'VERY_LOW']
class HiveCliHook(BaseHook):
"""Simple wrapper around the hive CLI.
It also supports the ``beeline``
a lighter CLI that runs JDBC and is replacing the heavier
traditional CLI. To enable ``beeline``, set the use_beeline param in the
extra field of your connection as in ``{ "use_beeline": true }``
Note that you can also set default hive CLI parameters using the
``hive_cli_params`` to be used in your connection as in
``{"hive_cli_params": "-hiveconf mapred.job.tracker=some.jobtracker:444"}``
Parameters passed here can be overridden by run_cli's hive_conf param
The extra connection parameter ``auth`` gets passed as in the ``jdbc``
connection string as is.
:param mapred_queue: queue used by the Hadoop Scheduler (Capacity or Fair)
:type mapred_queue: string
:param mapred_queue_priority: priority within the job queue.
Possible settings include: VERY_HIGH, HIGH, NORMAL, LOW, VERY_LOW
:type mapred_queue_priority: string
:param mapred_job_name: This name will appear in the jobtracker.
This can make monitoring easier.
:type mapred_job_name: string
"""
def __init__(
self,
hive_cli_conn_id="hive_cli_default",
run_as=None,
mapred_queue=None,
mapred_queue_priority=None,
mapred_job_name=None):
conn = self.get_connection(hive_cli_conn_id)
self.hive_cli_params = conn.extra_dejson.get('hive_cli_params', '')
self.use_beeline = conn.extra_dejson.get('use_beeline', False)
self.auth = conn.extra_dejson.get('auth', 'noSasl')
self.conn = conn
self.run_as = run_as
if mapred_queue_priority:
mapred_queue_priority = mapred_queue_priority.upper()
if mapred_queue_priority not in HIVE_QUEUE_PRIORITIES:
raise AirflowException(
"Invalid Mapred Queue Priority. Valid values are: "
"{}".format(', '.join(HIVE_QUEUE_PRIORITIES)))
self.mapred_queue = mapred_queue
self.mapred_queue_priority = mapred_queue_priority
self.mapred_job_name = mapred_job_name
def _prepare_cli_cmd(self):
"""
This function creates the command list from available information
"""
conn = self.conn
hive_bin = 'hive'
cmd_extra = []
if self.use_beeline:
hive_bin = 'beeline'
jdbc_url = "jdbc:hive2://{conn.host}:{conn.port}/{conn.schema}"
if configuration.get('core', 'security') == 'kerberos':
template = conn.extra_dejson.get(
'principal', "hive/_HOST@EXAMPLE.COM")
if "_HOST" in template:
template = utils.replace_hostname_pattern(
utils.get_components(template))
proxy_user = "" # noqa
if conn.extra_dejson.get('proxy_user') == "login" and conn.login:
proxy_user = "hive.server2.proxy.user={0}".format(conn.login)
elif conn.extra_dejson.get('proxy_user') == "owner" and self.run_as:
proxy_user = "hive.server2.proxy.user={0}".format(self.run_as)
jdbc_url += ";principal={template};{proxy_user}"
elif self.auth:
jdbc_url += ";auth=" + self.auth
jdbc_url = jdbc_url.format(**locals())
cmd_extra += ['-u', jdbc_url]
if conn.login:
cmd_extra += ['-n', conn.login]
if conn.password:
cmd_extra += ['-p', conn.password]
hive_params_list = self.hive_cli_params.split()
return [hive_bin] + cmd_extra + hive_params_list
def _prepare_hiveconf(self, d):
"""
This function prepares a list of hiveconf params
from a dictionary of key value pairs.
:param d:
:type d: dict
>>> hh = HiveCliHook()
>>> hive_conf = {"hive.exec.dynamic.partition": "true",
... "hive.exec.dynamic.partition.mode": "nonstrict"}
>>> hh._prepare_hiveconf(hive_conf)
["-hiveconf", "hive.exec.dynamic.partition=true",\
"-hiveconf", "hive.exec.dynamic.partition.mode=nonstrict"]
"""
if not d:
return []
return as_flattened_list(
itertools.izip(
["-hiveconf"] * len(d),
["{}={}".format(k, v) for k, v in d.items()]
)
)
def run_cli(self, hql, schema=None, verbose=True, hive_conf=None):
"""
Run an hql statement using the hive cli. If hive_conf is specified
it should be a dict and the entries will be set as key/value pairs
in HiveConf
:param hive_conf: if specified these key value pairs will be passed
to hive as ``-hiveconf "key"="value"``. Note that they will be
passed after the ``hive_cli_params`` and thus will override
whatever values are specified in the database.
:type hive_conf: dict
>>> hh = HiveCliHook()
>>> result = hh.run_cli("USE airflow;")
>>> ("OK" in result)
True
"""
conn = self.conn
schema = schema or conn.schema
if schema:
hql = "USE {schema};\n{hql}".format(**locals())
with TemporaryDirectory(prefix='airflow_hiveop_') as tmp_dir:
with NamedTemporaryFile(dir=tmp_dir) as f:
f.write(hql.encode('UTF-8'))
f.flush()
hive_cmd = self._prepare_cli_cmd()
hive_conf_params = self._prepare_hiveconf(hive_conf)
if self.mapred_queue:
hive_conf_params.extend(
['-hiveconf',
'mapreduce.job.queuename={}'
.format(self.mapred_queue)])
if self.mapred_queue_priority:
hive_conf_params.extend(
['-hiveconf',
'mapreduce.job.priority={}'
.format(self.mapred_queue_priority)])
if self.mapred_job_name:
hive_conf_params.extend(
['-hiveconf',
'mapred.job.name={}'
.format(self.mapred_job_name)])
hive_cmd.extend(hive_conf_params)
hive_cmd.extend(['-f', f.name])
if verbose:
logging.info(" ".join(hive_cmd))
sp = subprocess.Popen(
hive_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd=tmp_dir)
self.sp = sp
stdout = ''
while True:
line = sp.stdout.readline()
if not line:
break
stdout += line.decode('UTF-8')
if verbose:
logging.info(line.decode('UTF-8').strip())
sp.wait()
if sp.returncode:
raise AirflowException(stdout)
return stdout
def test_hql(self, hql):
"""
Test an hql statement using the hive cli and EXPLAIN
"""
create, insert, other = [], [], []
for query in hql.split(';'): # naive
query_original = query
query = query.lower().strip()
if query.startswith('create table'):
create.append(query_original)
elif query.startswith(('set ',
'add jar ',
'create temporary function')):
other.append(query_original)
elif query.startswith('insert'):
insert.append(query_original)
other = ';'.join(other)
for query_set in [create, insert]:
for query in query_set:
query_preview = ' '.join(query.split())[:50]
logging.info("Testing HQL [{0} (...)]".format(query_preview))
if query_set == insert:
query = other + '; explain ' + query
else:
query = 'explain ' + query
try:
self.run_cli(query, verbose=False)
except AirflowException as e:
message = e.args[0].split('\n')[-2]
logging.info(message)
error_loc = re.search('(\d+):(\d+)', message)
if error_loc and error_loc.group(1).isdigit():
l = int(error_loc.group(1))
begin = max(l-2, 0)
end = min(l+3, len(query.split('\n')))
context = '\n'.join(query.split('\n')[begin:end])
logging.info("Context :\n {0}".format(context))
else:
logging.info("SUCCESS")
def load_df(
self,
df,
table,
create=True,
recreate=False,
field_dict=None,
delimiter=',',
encoding='utf8',
pandas_kwargs=None, **kwargs):
"""
Loads a pandas DataFrame into hive.
Hive data types will be inferred if not passed but column names will
not be sanitized.
:param table: target Hive table, use dot notation to target a
specific database
:type table: str
:param create: whether to create the table if it doesn't exist
:type create: bool
:param recreate: whether to drop and recreate the table at every
execution
:type recreate: bool
:param field_dict: mapping from column name to hive data type
:type field_dict: dict
:param encoding: string encoding to use when writing DataFrame to file
:type encoding: str
:param pandas_kwargs: passed to DataFrame.to_csv
:type pandas_kwargs: dict
:param kwargs: passed to self.load_file
"""
def _infer_field_types_from_df(df):
DTYPE_KIND_HIVE_TYPE = {
'b': 'BOOLEAN', # boolean
'i': 'BIGINT', # signed integer
'u': 'BIGINT', # unsigned integer
'f': 'DOUBLE', # floating-point
'c': 'STRING', # complex floating-point
'O': 'STRING', # object
'S': 'STRING', # (byte-)string
'U': 'STRING', # Unicode
'V': 'STRING' # void
}
return dict((col, DTYPE_KIND_HIVE_TYPE[dtype.kind]) for col, dtype in df.dtypes.iteritems())
if pandas_kwargs is None:
pandas_kwargs = {}
with TemporaryDirectory(prefix='airflow_hiveop_') as tmp_dir:
with NamedTemporaryFile(dir=tmp_dir) as f:
if field_dict is None and (create or recreate):
field_dict = _infer_field_types_from_df(df)
df.to_csv(f, sep=delimiter, **pandas_kwargs)
return self.load_file(filepath=f.name,
table=table,
delimiter=delimiter,
field_dict=field_dict,
**kwargs)
def load_file(
self,
filepath,
table,
delimiter=",",
field_dict=None,
create=True,
overwrite=True,
partition=None,
recreate=False,
tblproperties=None):
"""
Loads a local file into Hive
Note that the table generated in Hive uses ``STORED AS textfile``
which isn't the most efficient serialization format. If a
large amount of data is loaded and/or if the tables gets
queried considerably, you may want to use this operator only to
stage the data into a temporary table before loading it into its
final destination using a ``HiveOperator``.
:param filepath: local filepath of the file to load
:type filepath: str
:param table: target Hive table, use dot notation to target a
specific database
:type table: str
:param delimiter: field delimiter in the file
:type delimiter: str
:param field_dict: A dictionary of the fields name in the file
as keys and their Hive types as values
:type field_dict: dict
:param create: whether to create the table if it doesn't exist
:type create: bool
:param overwrite: whether to overwrite the data in table or partition
:type overwrite: bool
:param partition: target partition as a dict of partition columns
and values
:type partition: dict
:param recreate: whether to drop and recreate the table at every
execution
:type recreate: bool
:param tblproperties: TBLPROPERTIES of the hive table being created
:type tblproperties: dict
"""
hql = ''
if recreate:
hql += "DROP TABLE IF EXISTS {table};\n"
if create or recreate:
if field_dict is None:
raise ValueError("Must provide a field dict when creating a table")
fields = ",\n ".join(
[k + ' ' + v for k, v in field_dict.items()])
hql += "CREATE TABLE IF NOT EXISTS {table} (\n{fields})\n"
if partition:
pfields = ",\n ".join(
[p + " STRING" for p in partition])
hql += "PARTITIONED BY ({pfields})\n"
hql += "ROW FORMAT DELIMITED\n"
hql += "FIELDS TERMINATED BY '{delimiter}'\n"
hql += "STORED AS textfile\n"
if tblproperties is not None:
tprops = ", ".join(
["'{0}'='{1}'".format(k, v) for k, v in tblproperties.items()])
hql += "TBLPROPERTIES({tprops})\n"
hql += ";"
hql = hql.format(**locals())
logging.info(hql)
self.run_cli(hql)
hql = "LOAD DATA LOCAL INPATH '{filepath}' "
if overwrite:
hql += "OVERWRITE "
hql += "INTO TABLE {table} "
if partition:
pvals = ", ".join(
["{0}='{1}'".format(k, v) for k, v in partition.items()])
hql += "PARTITION ({pvals});"
hql = hql.format(**locals())
logging.info(hql)
self.run_cli(hql)
def kill(self):
if hasattr(self, 'sp'):
if self.sp.poll() is None:
print("Killing the Hive job")
self.sp.terminate()
time.sleep(60)
self.sp.kill()
class HiveMetastoreHook(BaseHook):
""" Wrapper to interact with the Hive Metastore"""
def __init__(self, metastore_conn_id='metastore_default'):
self.metastore_conn = self.get_connection(metastore_conn_id)
self.metastore = self.get_metastore_client()
def __getstate__(self):
# This is for pickling to work despite the thirft hive client not
# being pickable
d = dict(self.__dict__)
del d['metastore']
return d
def __setstate__(self, d):
self.__dict__.update(d)
self.__dict__['metastore'] = self.get_metastore_client()
def get_metastore_client(self):
"""
Returns a Hive thrift client.
"""
from thrift.transport import TSocket, TTransport
from thrift.protocol import TBinaryProtocol
from hive_service import ThriftHive
ms = self.metastore_conn
auth_mechanism = ms.extra_dejson.get('authMechanism', 'NOSASL')
if configuration.get('core', 'security') == 'kerberos':
auth_mechanism = ms.extra_dejson.get('authMechanism', 'GSSAPI')
kerberos_service_name = ms.extra_dejson.get('kerberos_service_name', 'hive')
socket = TSocket.TSocket(ms.host, ms.port)
if configuration.get('core', 'security') == 'kerberos' and auth_mechanism == 'GSSAPI':
try:
import saslwrapper as sasl
except ImportError:
import sasl
def sasl_factory():
sasl_client = sasl.Client()
sasl_client.setAttr("host", ms.host)
sasl_client.setAttr("service", kerberos_service_name)
sasl_client.init()
return sasl_client
from thrift_sasl import TSaslClientTransport
transport = TSaslClientTransport(sasl_factory, "GSSAPI", socket)
else:
transport = TTransport.TBufferedTransport(socket)
protocol = TBinaryProtocol.TBinaryProtocol(transport)
return ThriftHive.Client(protocol)
def get_conn(self):
return self.metastore
def check_for_partition(self, schema, table, partition):
"""
Checks whether a partition exists
:param schema: Name of hive schema (database) @table belongs to
:type schema: string
:param table: Name of hive table @partition belongs to
:type schema: string
:partition: Expression that matches the partitions to check for
(eg `a = 'b' AND c = 'd'`)
:type schema: string
:rtype: boolean
>>> hh = HiveMetastoreHook()
>>> t = 'static_babynames_partitioned'
>>> hh.check_for_partition('airflow', t, "ds='2015-01-01'")
True
"""
self.metastore._oprot.trans.open()
partitions = self.metastore.get_partitions_by_filter(
schema, table, partition, 1)
self.metastore._oprot.trans.close()
if partitions:
return True
else:
return False
def check_for_named_partition(self, schema, table, partition_name):
"""
Checks whether a partition with a given name exists
:param schema: Name of hive schema (database) @table belongs to
:type schema: string
:param table: Name of hive table @partition belongs to
:type schema: string
:partition: Name of the partitions to check for (eg `a=b/c=d`)
:type schema: string
:rtype: boolean
>>> hh = HiveMetastoreHook()
>>> t = 'static_babynames_partitioned'
>>> hh.check_for_named_partition('airflow', t, "ds=2015-01-01")
True
>>> hh.check_for_named_partition('airflow', t, "ds=xxx")
False
"""
self.metastore._oprot.trans.open()
try:
self.metastore.get_partition_by_name(
schema, table, partition_name)
return True
except hive_metastore.ttypes.NoSuchObjectException:
return False
finally:
self.metastore._oprot.trans.close()
def get_table(self, table_name, db='default'):
"""Get a metastore table object
>>> hh = HiveMetastoreHook()
>>> t = hh.get_table(db='airflow', table_name='static_babynames')
>>> t.tableName
'static_babynames'
>>> [col.name for col in t.sd.cols]
['state', 'year', 'name', 'gender', 'num']
"""
self.metastore._oprot.trans.open()
if db == 'default' and '.' in table_name:
db, table_name = table_name.split('.')[:2]
table = self.metastore.get_table(dbname=db, tbl_name=table_name)
self.metastore._oprot.trans.close()
return table
def get_tables(self, db, pattern='*'):
"""
Get a metastore table object
"""
self.metastore._oprot.trans.open()
tables = self.metastore.get_tables(db_name=db, pattern=pattern)
objs = self.metastore.get_table_objects_by_name(db, tables)
self.metastore._oprot.trans.close()
return objs
def get_databases(self, pattern='*'):
"""
Get a metastore table object
"""
self.metastore._oprot.trans.open()
dbs = self.metastore.get_databases(pattern)
self.metastore._oprot.trans.close()
return dbs
def get_partitions(
self, schema, table_name, filter=None):
"""
Returns a list of all partitions in a table. Works only
for tables with less than 32767 (java short max val).
For subpartitioned table, the number might easily exceed this.
>>> hh = HiveMetastoreHook()
>>> t = 'static_babynames_partitioned'
>>> parts = hh.get_partitions(schema='airflow', table_name=t)
>>> len(parts)
1
>>> parts
[{'ds': '2015-01-01'}]
"""
self.metastore._oprot.trans.open()
table = self.metastore.get_table(dbname=schema, tbl_name=table_name)
if len(table.partitionKeys) == 0:
raise AirflowException("The table isn't partitioned")
else:
if filter:
parts = self.metastore.get_partitions_by_filter(
db_name=schema, tbl_name=table_name,
filter=filter, max_parts=32767)
else:
parts = self.metastore.get_partitions(
db_name=schema, tbl_name=table_name, max_parts=32767)
self.metastore._oprot.trans.close()
pnames = [p.name for p in table.partitionKeys]
return [dict(zip(pnames, p.values)) for p in parts]
def max_partition(self, schema, table_name, field=None, filter=None):
"""
Returns the maximum value for all partitions in a table. Works only
for tables that have a single partition key. For subpartitioned
table, we recommend using signal tables.
>>> hh = HiveMetastoreHook()
>>> t = 'static_babynames_partitioned'
>>> hh.max_partition(schema='airflow', table_name=t)
'2015-01-01'
"""
parts = self.get_partitions(schema, table_name, filter)
if not parts:
return None
elif len(parts[0]) == 1:
field = list(parts[0].keys())[0]
elif not field:
raise AirflowException(
"Please specify the field you want the max "
"value for")
return max([p[field] for p in parts])
def table_exists(self, table_name, db='default'):
"""
Check if table exists
>>> hh = HiveMetastoreHook()
>>> hh.table_exists(db='airflow', table_name='static_babynames')
True
>>> hh.table_exists(db='airflow', table_name='does_not_exist')
False
"""
try:
t = self.get_table(table_name, db)
return True
except Exception as e:
return False
class HiveServer2Hook(BaseHook):
"""
Wrapper around the impyla library
Note that the default authMechanism is PLAIN, to override it you
can specify it in the ``extra`` of your connection in the UI as in
"""
def __init__(self, hiveserver2_conn_id='hiveserver2_default'):
self.hiveserver2_conn_id = hiveserver2_conn_id
def get_conn(self, schema=None):
db = self.get_connection(self.hiveserver2_conn_id)
auth_mechanism = db.extra_dejson.get('authMechanism', 'PLAIN')
kerberos_service_name = None
if configuration.get('core', 'security') == 'kerberos':
auth_mechanism = db.extra_dejson.get('authMechanism', 'GSSAPI')
kerberos_service_name = db.extra_dejson.get('kerberos_service_name', 'hive')
# impyla uses GSSAPI instead of KERBEROS as a auth_mechanism identifier
if auth_mechanism == 'KERBEROS':
logging.warning("Detected deprecated 'KERBEROS' for authMechanism for %s. Please use 'GSSAPI' instead",
self.hiveserver2_conn_id)
auth_mechanism = 'GSSAPI'
from impala.dbapi import connect
return connect(
host=db.host,
port=db.port,
auth_mechanism=auth_mechanism,
kerberos_service_name=kerberos_service_name,
user=db.login,
database=schema or db.schema or 'default')
def get_results(self, hql, schema='default', arraysize=1000):
from impala.error import ProgrammingError
with self.get_conn(schema) as conn:
if isinstance(hql, basestring):
hql = [hql]
results = {
'data': [],
'header': [],
}
cur = conn.cursor()
for statement in hql:
cur.execute(statement)
records = []
try:
# impala Lib raises when no results are returned
# we're silencing here as some statements in the list
# may be `SET` or DDL
records = cur.fetchall()
except ProgrammingError:
logging.debug("get_results returned no records")
if records:
results = {
'data': records,
'header': cur.description,
}
return results
def to_csv(
self,
hql,
csv_filepath,
schema='default',
delimiter=',',
lineterminator='\r\n',
output_header=True,
fetch_size=1000):
schema = schema or 'default'
with self.get_conn(schema) as conn:
with conn.cursor() as cur:
logging.info("Running query: " + hql)
cur.execute(hql)
schema = cur.description
with open(csv_filepath, 'wb') as f:
writer = csv.writer(f,
delimiter=delimiter,
lineterminator=lineterminator,
encoding='utf-8')
if output_header:
writer.writerow([c[0] for c in cur.description])
i = 0
while True:
rows = [row for row in cur.fetchmany(fetch_size) if row]
if not rows:
break
writer.writerows(rows)
i += len(rows)
logging.info("Written {0} rows so far.".format(i))
logging.info("Done. Loaded a total of {0} rows.".format(i))
def get_records(self, hql, schema='default'):
"""
Get a set of records from a Hive query.
>>> hh = HiveServer2Hook()
>>> sql = "SELECT * FROM airflow.static_babynames LIMIT 100"
>>> len(hh.get_records(sql))
100
"""
return self.get_results(hql, schema=schema)['data']
def get_pandas_df(self, hql, schema='default'):
"""
Get a pandas dataframe from a Hive query
>>> hh = HiveServer2Hook()
>>> sql = "SELECT * FROM airflow.static_babynames LIMIT 100"
>>> df = hh.get_pandas_df(sql)
>>> len(df.index)
100
"""
import pandas as pd
res = self.get_results(hql, schema=schema)
df = pd.DataFrame(res['data'])
df.columns = [c[0] for c in res['header']]
return df
|
apache-2.0
|
shibaniahegde/OpenStak_swift
|
swift/common/middleware/xprofile.py
|
36
|
9905
|
# Copyright (c) 2010-2012 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Profiling middleware for Swift Servers.
The current implementation is based on eventlet aware profiler.(For the
future, more profilers could be added in to collect more data for analysis.)
Profiling all incoming requests and accumulating cpu timing statistics
information for performance tuning and optimization. An mini web UI is also
provided for profiling data analysis. It can be accessed from the URL as
below.
Index page for browse profile data::
http://SERVER_IP:PORT/__profile__
List all profiles to return profile ids in json format::
http://SERVER_IP:PORT/__profile__/
http://SERVER_IP:PORT/__profile__/all
Retrieve specific profile data in different formats::
http://SERVER_IP:PORT/__profile__/PROFILE_ID?format=[default|json|csv|ods]
http://SERVER_IP:PORT/__profile__/current?format=[default|json|csv|ods]
http://SERVER_IP:PORT/__profile__/all?format=[default|json|csv|ods]
Retrieve metrics from specific function in json format::
http://SERVER_IP:PORT/__profile__/PROFILE_ID/NFL?format=json
http://SERVER_IP:PORT/__profile__/current/NFL?format=json
http://SERVER_IP:PORT/__profile__/all/NFL?format=json
NFL is defined by concatenation of file name, function name and the first
line number.
e.g.::
account.py:50(GETorHEAD)
or with full path:
opt/stack/swift/swift/proxy/controllers/account.py:50(GETorHEAD)
A list of URL examples:
http://localhost:8080/__profile__ (proxy server)
http://localhost:6000/__profile__/all (object server)
http://localhost:6001/__profile__/current (container server)
http://localhost:6002/__profile__/12345?format=json (account server)
The profiling middleware can be configured in paste file for WSGI servers such
as proxy, account, container and object servers. Please refer to the sample
configuration files in etc directory.
The profiling data is provided with four formats such as binary(by default),
json, csv and odf spreadsheet which requires installing odfpy library.
sudo pip install odfpy
There's also a simple visualization capability which is enabled by using
matplotlib toolkit. it is also required to be installed if you want to use
it to visualize statistic data.
sudo apt-get install python-matplotlib
"""
import os
import sys
import time
from eventlet import greenthread, GreenPool, patcher
import eventlet.green.profile as eprofile
from swift import gettext_ as _
from swift.common.utils import get_logger, config_true_value
from swift.common.swob import Request
from x_profile.exceptions import NotFoundException, MethodNotAllowed,\
ProfileException
from x_profile.html_viewer import HTMLViewer
from x_profile.profile_model import ProfileLog
# True if we are running on Python 3.
PY3 = sys.version_info[0] == 3
if PY3: # pragma: no cover
text_type = str
else:
text_type = unicode
def bytes_(s, encoding='utf-8', errors='strict'):
if isinstance(s, text_type): # pragma: no cover
return s.encode(encoding, errors)
return s
try:
from urllib.parse import parse_qs
except ImportError:
try:
from urlparse import parse_qs
except ImportError: # pragma: no cover
from cgi import parse_qs
DEFAULT_PROFILE_PREFIX = '/tmp/log/swift/profile/default.profile'
# unwind the iterator; it may call start_response, do lots of work, etc
PROFILE_EXEC_EAGER = """
app_iter = self.app(environ, start_response)
app_iter_ = list(app_iter)
if hasattr(app_iter, 'close'):
app_iter.close()
"""
# don't unwind the iterator (don't consume resources)
PROFILE_EXEC_LAZY = """
app_iter_ = self.app(environ, start_response)
"""
thread = patcher.original('thread') # non-monkeypatched module needed
# This monkey patch code fix the problem of eventlet profile tool
# which can not accumulate profiling results across multiple calls
# of runcalls and runctx.
def new_setup(self):
self._has_setup = True
self.cur = None
self.timings = {}
self.current_tasklet = greenthread.getcurrent()
self.thread_id = thread.get_ident()
self.simulate_call("profiler")
def new_runctx(self, cmd, globals, locals):
if not getattr(self, '_has_setup', False):
self._setup()
try:
return self.base.runctx(self, cmd, globals, locals)
finally:
self.TallyTimings()
def new_runcall(self, func, *args, **kw):
if not getattr(self, '_has_setup', False):
self._setup()
try:
return self.base.runcall(self, func, *args, **kw)
finally:
self.TallyTimings()
class ProfileMiddleware(object):
def __init__(self, app, conf):
self.app = app
self.logger = get_logger(conf, log_route='profile')
self.log_filename_prefix = conf.get('log_filename_prefix',
DEFAULT_PROFILE_PREFIX)
dirname = os.path.dirname(self.log_filename_prefix)
# Notes: this effort may fail due to permission denied.
# it is better to be created and authorized to current
# user in advance.
if not os.path.exists(dirname):
os.makedirs(dirname)
self.dump_interval = float(conf.get('dump_interval', 5.0))
self.dump_timestamp = config_true_value(conf.get(
'dump_timestamp', 'no'))
self.flush_at_shutdown = config_true_value(conf.get(
'flush_at_shutdown', 'no'))
self.path = conf.get('path', '__profile__').replace('/', '')
self.unwind = config_true_value(conf.get('unwind', 'no'))
self.profile_module = conf.get('profile_module',
'eventlet.green.profile')
self.profiler = get_profiler(self.profile_module)
self.profile_log = ProfileLog(self.log_filename_prefix,
self.dump_timestamp)
self.viewer = HTMLViewer(self.path, self.profile_module,
self.profile_log)
self.dump_pool = GreenPool(1000)
self.last_dump_at = None
def __del__(self):
if self.flush_at_shutdown:
self.profile_log.clear(str(os.getpid()))
def _combine_body_qs(self, request):
wsgi_input = request.environ['wsgi.input']
query_dict = request.params
qs_in_body = wsgi_input.read()
query_dict.update(parse_qs(qs_in_body, keep_blank_values=True,
strict_parsing=False))
return query_dict
def dump_checkpoint(self):
current_time = time.time()
if self.last_dump_at is None or self.last_dump_at +\
self.dump_interval < current_time:
self.dump_pool.spawn_n(self.profile_log.dump_profile,
self.profiler, os.getpid())
self.last_dump_at = current_time
def __call__(self, environ, start_response):
request = Request(environ)
path_entry = request.path_info.split('/')
# hijack favicon request sent by browser so that it doesn't
# invoke profiling hook and contaminate the data.
if path_entry[1] == 'favicon.ico':
start_response('200 OK', [])
return ''
elif path_entry[1] == self.path:
try:
self.dump_checkpoint()
query_dict = self._combine_body_qs(request)
content, headers = self.viewer.render(request.url,
request.method,
path_entry,
query_dict,
self.renew_profile)
start_response('200 OK', headers)
return [bytes_(content)]
except MethodNotAllowed as mx:
start_response('405 Method Not Allowed', [])
return '%s' % mx
except NotFoundException as nx:
start_response('404 Not Found', [])
return '%s' % nx
except ProfileException as pf:
start_response('500 Internal Server Error', [])
return '%s' % pf
except Exception as ex:
start_response('500 Internal Server Error', [])
return _('Error on render profiling results: %s') % ex
else:
_locals = locals()
code = self.unwind and PROFILE_EXEC_EAGER or\
PROFILE_EXEC_LAZY
self.profiler.runctx(code, globals(), _locals)
app_iter = _locals['app_iter_']
self.dump_checkpoint()
return app_iter
def renew_profile(self):
self.profiler = get_profiler(self.profile_module)
def get_profiler(profile_module):
if profile_module == 'eventlet.green.profile':
eprofile.Profile._setup = new_setup
eprofile.Profile.runctx = new_runctx
eprofile.Profile.runcall = new_runcall
# hacked method to import profile module supported in python 2.6
__import__(profile_module)
return sys.modules[profile_module].Profile()
def filter_factory(global_conf, **local_conf):
conf = global_conf.copy()
conf.update(local_conf)
def profile_filter(app):
return ProfileMiddleware(app, conf)
return profile_filter
|
apache-2.0
|
bikong2/scikit-learn
|
sklearn/manifold/t_sne.py
|
52
|
34602
|
# Author: Alexander Fabisch -- <afabisch@informatik.uni-bremen.de>
# Author: Christopher Moody <chrisemoody@gmail.com>
# Author: Nick Travers <nickt@squareup.com>
# License: BSD 3 clause (C) 2014
# This is the exact and Barnes-Hut t-SNE implementation. There are other
# modifications of the algorithm:
# * Fast Optimization for t-SNE:
# http://cseweb.ucsd.edu/~lvdmaaten/workshops/nips2010/papers/vandermaaten.pdf
import numpy as np
from scipy import linalg
import scipy.sparse as sp
from scipy.spatial.distance import pdist
from scipy.spatial.distance import squareform
from ..neighbors import BallTree
from ..base import BaseEstimator
from ..utils import check_array
from ..utils import check_random_state
from ..utils.extmath import _ravel
from ..decomposition import RandomizedPCA
from ..metrics.pairwise import pairwise_distances
from . import _utils
from . import _barnes_hut_tsne
from ..utils.fixes import astype
MACHINE_EPSILON = np.finfo(np.double).eps
def _joint_probabilities(distances, desired_perplexity, verbose):
"""Compute joint probabilities p_ij from distances.
Parameters
----------
distances : array, shape (n_samples * (n_samples-1) / 2,)
Distances of samples are stored as condensed matrices, i.e.
we omit the diagonal and duplicate entries and store everything
in a one-dimensional array.
desired_perplexity : float
Desired perplexity of the joint probability distributions.
verbose : int
Verbosity level.
Returns
-------
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
"""
# Compute conditional probabilities such that they approximately match
# the desired perplexity
distances = astype(distances, np.float32, copy=False)
conditional_P = _utils._binary_search_perplexity(
distances, None, desired_perplexity, verbose)
P = conditional_P + conditional_P.T
sum_P = np.maximum(np.sum(P), MACHINE_EPSILON)
P = np.maximum(squareform(P) / sum_P, MACHINE_EPSILON)
return P
def _joint_probabilities_nn(distances, neighbors, desired_perplexity, verbose):
"""Compute joint probabilities p_ij from distances using just nearest
neighbors.
This method is approximately equal to _joint_probabilities. The latter
is O(N), but limiting the joint probability to nearest neighbors improves
this substantially to O(uN).
Parameters
----------
distances : array, shape (n_samples * (n_samples-1) / 2,)
Distances of samples are stored as condensed matrices, i.e.
we omit the diagonal and duplicate entries and store everything
in a one-dimensional array.
desired_perplexity : float
Desired perplexity of the joint probability distributions.
verbose : int
Verbosity level.
Returns
-------
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
"""
# Compute conditional probabilities such that they approximately match
# the desired perplexity
distances = astype(distances, np.float32, copy=False)
neighbors = astype(neighbors, np.int64, copy=False)
conditional_P = _utils._binary_search_perplexity(
distances, neighbors, desired_perplexity, verbose)
m = "All probabilities should be finite"
assert np.all(np.isfinite(conditional_P)), m
P = conditional_P + conditional_P.T
sum_P = np.maximum(np.sum(P), MACHINE_EPSILON)
P = np.maximum(squareform(P) / sum_P, MACHINE_EPSILON)
assert np.all(np.abs(P) <= 1.0)
return P
def _kl_divergence(params, P, degrees_of_freedom, n_samples, n_components,
skip_num_points=0):
"""t-SNE objective function: gradient of the KL divergence
of p_ijs and q_ijs and the absolute error.
Parameters
----------
params : array, shape (n_params,)
Unraveled embedding.
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
degrees_of_freedom : float
Degrees of freedom of the Student's-t distribution.
n_samples : int
Number of samples.
n_components : int
Dimension of the embedded space.
skip_num_points : int (optional, default:0)
This does not compute the gradient for points with indices below
`skip_num_points`. This is useful when computing transforms of new
data where you'd like to keep the old data fixed.
Returns
-------
kl_divergence : float
Kullback-Leibler divergence of p_ij and q_ij.
grad : array, shape (n_params,)
Unraveled gradient of the Kullback-Leibler divergence with respect to
the embedding.
"""
X_embedded = params.reshape(n_samples, n_components)
# Q is a heavy-tailed distribution: Student's t-distribution
n = pdist(X_embedded, "sqeuclidean")
n += 1.
n /= degrees_of_freedom
n **= (degrees_of_freedom + 1.0) / -2.0
Q = np.maximum(n / (2.0 * np.sum(n)), MACHINE_EPSILON)
# Optimization trick below: np.dot(x, y) is faster than
# np.sum(x * y) because it calls BLAS
# Objective: C (Kullback-Leibler divergence of P and Q)
kl_divergence = 2.0 * np.dot(P, np.log(P / Q))
# Gradient: dC/dY
grad = np.ndarray((n_samples, n_components))
PQd = squareform((P - Q) * n)
for i in range(skip_num_points, n_samples):
np.dot(_ravel(PQd[i]), X_embedded[i] - X_embedded, out=grad[i])
grad = grad.ravel()
c = 2.0 * (degrees_of_freedom + 1.0) / degrees_of_freedom
grad *= c
return kl_divergence, grad
def _kl_divergence_error(params, P, neighbors, degrees_of_freedom, n_samples,
n_components):
"""t-SNE objective function: the absolute error of the
KL divergence of p_ijs and q_ijs.
Parameters
----------
params : array, shape (n_params,)
Unraveled embedding.
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
neighbors : array (n_samples, K)
The neighbors is not actually required to calculate the
divergence, but is here to match the signature of the
gradient function
degrees_of_freedom : float
Degrees of freedom of the Student's-t distribution.
n_samples : int
Number of samples.
n_components : int
Dimension of the embedded space.
Returns
-------
kl_divergence : float
Kullback-Leibler divergence of p_ij and q_ij.
grad : array, shape (n_params,)
Unraveled gradient of the Kullback-Leibler divergence with respect to
the embedding.
"""
X_embedded = params.reshape(n_samples, n_components)
# Q is a heavy-tailed distribution: Student's t-distribution
n = pdist(X_embedded, "sqeuclidean")
n += 1.
n /= degrees_of_freedom
n **= (degrees_of_freedom + 1.0) / -2.0
Q = np.maximum(n / (2.0 * np.sum(n)), MACHINE_EPSILON)
# Optimization trick below: np.dot(x, y) is faster than
# np.sum(x * y) because it calls BLAS
# Objective: C (Kullback-Leibler divergence of P and Q)
if len(P.shape) == 2:
P = squareform(P)
kl_divergence = 2.0 * np.dot(P, np.log(P / Q))
return kl_divergence
def _kl_divergence_bh(params, P, neighbors, degrees_of_freedom, n_samples,
n_components, angle=0.5, skip_num_points=0,
verbose=False):
"""t-SNE objective function: KL divergence of p_ijs and q_ijs.
Uses Barnes-Hut tree methods to calculate the gradient that
runs in O(NlogN) instead of O(N^2)
Parameters
----------
params : array, shape (n_params,)
Unraveled embedding.
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
neighbors: int64 array, shape (n_samples, K)
Array with element [i, j] giving the index for the jth
closest neighbor to point i.
degrees_of_freedom : float
Degrees of freedom of the Student's-t distribution.
n_samples : int
Number of samples.
n_components : int
Dimension of the embedded space.
angle : float (default: 0.5)
This is the trade-off between speed and accuracy for Barnes-Hut T-SNE.
'angle' is the angular size (referred to as theta in [3]) of a distant
node as measured from a point. If this size is below 'angle' then it is
used as a summary node of all points contained within it.
This method is not very sensitive to changes in this parameter
in the range of 0.2 - 0.8. Angle less than 0.2 has quickly increasing
computation time and angle greater 0.8 has quickly increasing error.
skip_num_points : int (optional, default:0)
This does not compute the gradient for points with indices below
`skip_num_points`. This is useful when computing transforms of new
data where you'd like to keep the old data fixed.
verbose : int
Verbosity level.
Returns
-------
kl_divergence : float
Kullback-Leibler divergence of p_ij and q_ij.
grad : array, shape (n_params,)
Unraveled gradient of the Kullback-Leibler divergence with respect to
the embedding.
"""
params = astype(params, np.float32, copy=False)
X_embedded = params.reshape(n_samples, n_components)
neighbors = astype(neighbors, np.int64, copy=False)
if len(P.shape) == 1:
sP = squareform(P).astype(np.float32)
else:
sP = P.astype(np.float32)
grad = np.zeros(X_embedded.shape, dtype=np.float32)
error = _barnes_hut_tsne.gradient(sP, X_embedded, neighbors,
grad, angle, n_components, verbose,
dof=degrees_of_freedom)
c = 2.0 * (degrees_of_freedom + 1.0) / degrees_of_freedom
grad = grad.ravel()
grad *= c
return error, grad
def _gradient_descent(objective, p0, it, n_iter, objective_error=None,
n_iter_check=1, n_iter_without_progress=50,
momentum=0.5, learning_rate=1000.0, min_gain=0.01,
min_grad_norm=1e-7, min_error_diff=1e-7, verbose=0,
args=None, kwargs=None):
"""Batch gradient descent with momentum and individual gains.
Parameters
----------
objective : function or callable
Should return a tuple of cost and gradient for a given parameter
vector. When expensive to compute, the cost can optionally
be None and can be computed every n_iter_check steps using
the objective_error function.
p0 : array-like, shape (n_params,)
Initial parameter vector.
it : int
Current number of iterations (this function will be called more than
once during the optimization).
n_iter : int
Maximum number of gradient descent iterations.
n_iter_check : int
Number of iterations before evaluating the global error. If the error
is sufficiently low, we abort the optimization.
objective_error : function or callable
Should return a tuple of cost and gradient for a given parameter
vector.
n_iter_without_progress : int, optional (default: 30)
Maximum number of iterations without progress before we abort the
optimization.
momentum : float, within (0.0, 1.0), optional (default: 0.5)
The momentum generates a weight for previous gradients that decays
exponentially.
learning_rate : float, optional (default: 1000.0)
The learning rate should be extremely high for t-SNE! Values in the
range [100.0, 1000.0] are common.
min_gain : float, optional (default: 0.01)
Minimum individual gain for each parameter.
min_grad_norm : float, optional (default: 1e-7)
If the gradient norm is below this threshold, the optimization will
be aborted.
min_error_diff : float, optional (default: 1e-7)
If the absolute difference of two successive cost function values
is below this threshold, the optimization will be aborted.
verbose : int, optional (default: 0)
Verbosity level.
args : sequence
Arguments to pass to objective function.
kwargs : dict
Keyword arguments to pass to objective function.
Returns
-------
p : array, shape (n_params,)
Optimum parameters.
error : float
Optimum.
i : int
Last iteration.
"""
if args is None:
args = []
if kwargs is None:
kwargs = {}
p = p0.copy().ravel()
update = np.zeros_like(p)
gains = np.ones_like(p)
error = np.finfo(np.float).max
best_error = np.finfo(np.float).max
best_iter = 0
for i in range(it, n_iter):
new_error, grad = objective(p, *args, **kwargs)
grad_norm = linalg.norm(grad)
inc = update * grad >= 0.0
dec = np.invert(inc)
gains[inc] += 0.05
gains[dec] *= 0.95
np.clip(gains, min_gain, np.inf)
grad *= gains
update = momentum * update - learning_rate * grad
p += update
if (i + 1) % n_iter_check == 0:
if new_error is None:
new_error = objective_error(p, *args)
error_diff = np.abs(new_error - error)
error = new_error
if verbose >= 2:
m = "[t-SNE] Iteration %d: error = %.7f, gradient norm = %.7f"
print(m % (i + 1, error, grad_norm))
if error < best_error:
best_error = error
best_iter = i
elif i - best_iter > n_iter_without_progress:
if verbose >= 2:
print("[t-SNE] Iteration %d: did not make any progress "
"during the last %d episodes. Finished."
% (i + 1, n_iter_without_progress))
break
if grad_norm <= min_grad_norm:
if verbose >= 2:
print("[t-SNE] Iteration %d: gradient norm %f. Finished."
% (i + 1, grad_norm))
break
if error_diff <= min_error_diff:
if verbose >= 2:
m = "[t-SNE] Iteration %d: error difference %f. Finished."
print(m % (i + 1, error_diff))
break
if new_error is not None:
error = new_error
return p, error, i
def trustworthiness(X, X_embedded, n_neighbors=5, precomputed=False):
"""Expresses to what extent the local structure is retained.
The trustworthiness is within [0, 1]. It is defined as
.. math::
T(k) = 1 - \frac{2}{nk (2n - 3k - 1)} \sum^n_{i=1}
\sum_{j \in U^{(k)}_i (r(i, j) - k)}
where :math:`r(i, j)` is the rank of the embedded datapoint j
according to the pairwise distances between the embedded datapoints,
:math:`U^{(k)}_i` is the set of points that are in the k nearest
neighbors in the embedded space but not in the original space.
* "Neighborhood Preservation in Nonlinear Projection Methods: An
Experimental Study"
J. Venna, S. Kaski
* "Learning a Parametric Embedding by Preserving Local Structure"
L.J.P. van der Maaten
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row.
X_embedded : array, shape (n_samples, n_components)
Embedding of the training data in low-dimensional space.
n_neighbors : int, optional (default: 5)
Number of neighbors k that will be considered.
precomputed : bool, optional (default: False)
Set this flag if X is a precomputed square distance matrix.
Returns
-------
trustworthiness : float
Trustworthiness of the low-dimensional embedding.
"""
if precomputed:
dist_X = X
else:
dist_X = pairwise_distances(X, squared=True)
dist_X_embedded = pairwise_distances(X_embedded, squared=True)
ind_X = np.argsort(dist_X, axis=1)
ind_X_embedded = np.argsort(dist_X_embedded, axis=1)[:, 1:n_neighbors + 1]
n_samples = X.shape[0]
t = 0.0
ranks = np.zeros(n_neighbors)
for i in range(n_samples):
for j in range(n_neighbors):
ranks[j] = np.where(ind_X[i] == ind_X_embedded[i, j])[0][0]
ranks -= n_neighbors
t += np.sum(ranks[ranks > 0])
t = 1.0 - t * (2.0 / (n_samples * n_neighbors *
(2.0 * n_samples - 3.0 * n_neighbors - 1.0)))
return t
class TSNE(BaseEstimator):
"""t-distributed Stochastic Neighbor Embedding.
t-SNE [1] is a tool to visualize high-dimensional data. It converts
similarities between data points to joint probabilities and tries
to minimize the Kullback-Leibler divergence between the joint
probabilities of the low-dimensional embedding and the
high-dimensional data. t-SNE has a cost function that is not convex,
i.e. with different initializations we can get different results.
It is highly recommended to use another dimensionality reduction
method (e.g. PCA for dense data or TruncatedSVD for sparse data)
to reduce the number of dimensions to a reasonable amount (e.g. 50)
if the number of features is very high. This will suppress some
noise and speed up the computation of pairwise distances between
samples. For more tips see Laurens van der Maaten's FAQ [2].
Read more in the :ref:`User Guide <t_sne>`.
Parameters
----------
n_components : int, optional (default: 2)
Dimension of the embedded space.
perplexity : float, optional (default: 30)
The perplexity is related to the number of nearest neighbors that
is used in other manifold learning algorithms. Larger datasets
usually require a larger perplexity. Consider selcting a value
between 5 and 50. The choice is not extremely critical since t-SNE
is quite insensitive to this parameter.
early_exaggeration : float, optional (default: 4.0)
Controls how tight natural clusters in the original space are in
the embedded space and how much space will be between them. For
larger values, the space between natural clusters will be larger
in the embedded space. Again, the choice of this parameter is not
very critical. If the cost function increases during initial
optimization, the early exaggeration factor or the learning rate
might be too high.
learning_rate : float, optional (default: 1000)
The learning rate can be a critical parameter. It should be
between 100 and 1000. If the cost function increases during initial
optimization, the early exaggeration factor or the learning rate
might be too high. If the cost function gets stuck in a bad local
minimum increasing the learning rate helps sometimes.
n_iter : int, optional (default: 1000)
Maximum number of iterations for the optimization. Should be at
least 200.
n_iter_without_progress : int, optional (default: 30)
Maximum number of iterations without progress before we abort the
optimization.
min_grad_norm : float, optional (default: 1E-7)
If the gradient norm is below this threshold, the optimization will
be aborted.
metric : string or callable, optional
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them. The default is "euclidean" which is
interpreted as squared euclidean distance.
init : string, optional (default: "random")
Initialization of embedding. Possible options are 'random' and 'pca'.
PCA initialization cannot be used with precomputed distances and is
usually more globally stable than random initialization.
verbose : int, optional (default: 0)
Verbosity level.
random_state : int or RandomState instance or None (default)
Pseudo Random Number generator seed control. If None, use the
numpy.random singleton. Note that different initializations
might result in different local minima of the cost function.
method : string (default: 'barnes_hut')
By default the gradient calculation algorithm uses Barnes-Hut
approximation running in O(NlogN) time. method='exact'
will run on the slower, but exact, algorithm in O(N^2) time. The
exact algorithm should be used when nearest-neighbor errors need
to be better than 3%. However, the exact method cannot scale to
millions of examples.
angle : float (default: 0.5)
Only used if method='barnes_hut'
This is the trade-off between speed and accuracy for Barnes-Hut T-SNE.
'angle' is the angular size (referred to as theta in [3]) of a distant
node as measured from a point. If this size is below 'angle' then it is
used as a summary node of all points contained within it.
This method is not very sensitive to changes in this parameter
in the range of 0.2 - 0.8. Angle less than 0.2 has quickly increasing
computation time and angle greater 0.8 has quickly increasing error.
Attributes
----------
embedding_ : array-like, shape (n_samples, n_components)
Stores the embedding vectors.
Examples
--------
>>> import numpy as np
>>> from sklearn.manifold import TSNE
>>> X = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]])
>>> model = TSNE(n_components=2, random_state=0)
>>> np.set_printoptions(suppress=True)
>>> model.fit_transform(X) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
array([[ 0.00017599, 0.00003993],
[ 0.00009891, 0.00021913],
[ 0.00018554, -0.00009357],
[ 0.00009528, -0.00001407]])
References
----------
[1] van der Maaten, L.J.P.; Hinton, G.E. Visualizing High-Dimensional Data
Using t-SNE. Journal of Machine Learning Research 9:2579-2605, 2008.
[2] van der Maaten, L.J.P. t-Distributed Stochastic Neighbor Embedding
http://homepage.tudelft.nl/19j49/t-SNE.html
[3] L.J.P. van der Maaten. Accelerating t-SNE using Tree-Based Algorithms.
Journal of Machine Learning Research 15(Oct):3221-3245, 2014.
http://lvdmaaten.github.io/publications/papers/JMLR_2014.pdf
"""
def __init__(self, n_components=2, perplexity=30.0,
early_exaggeration=4.0, learning_rate=1000.0, n_iter=1000,
n_iter_without_progress=30, min_grad_norm=1e-7,
metric="euclidean", init="random", verbose=0,
random_state=None, method='barnes_hut', angle=0.5):
if init not in ["pca", "random"] or isinstance(init, np.ndarray):
msg = "'init' must be 'pca', 'random' or a NumPy array"
raise ValueError(msg)
self.n_components = n_components
self.perplexity = perplexity
self.early_exaggeration = early_exaggeration
self.learning_rate = learning_rate
self.n_iter = n_iter
self.n_iter_without_progress = n_iter_without_progress
self.min_grad_norm = min_grad_norm
self.metric = metric
self.init = init
self.verbose = verbose
self.random_state = random_state
self.method = method
self.angle = angle
self.embedding_ = None
def _fit(self, X, skip_num_points=0):
"""Fit the model using X as training data.
Note that sparse arrays can only be handled by method='exact'.
It is recommended that you convert your sparse array to dense
(e.g. `X.toarray()`) if it fits in memory, or otherwise using a
dimensionality reduction technique (e.g. TrucnatedSVD).
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row. Note that this
when method='barnes_hut', X cannot be a sparse array and if need be
will be converted to a 32 bit float array. Method='exact' allows
sparse arrays and 64bit floating point inputs.
skip_num_points : int (optional, default:0)
This does not compute the gradient for points with indices below
`skip_num_points`. This is useful when computing transforms of new
data where you'd like to keep the old data fixed.
"""
if self.method not in ['barnes_hut', 'exact']:
raise ValueError("'method' must be 'barnes_hut' or 'exact'")
if self.angle < 0.0 or self.angle > 1.0:
raise ValueError("'angle' must be between 0.0 - 1.0")
if self.method == 'barnes_hut' and sp.issparse(X):
raise TypeError('A sparse matrix was passed, but dense '
'data is required for method="barnes_hut". Use '
'X.toarray() to convert to a dense numpy array if '
'the array is small enough for it to fit in '
'memory. Otherwise consider dimensionality '
'reduction techniques (e.g. TruncatedSVD)')
X = check_array(X, dtype=np.float32)
else:
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'], dtype=np.float64)
random_state = check_random_state(self.random_state)
if self.early_exaggeration < 1.0:
raise ValueError("early_exaggeration must be at least 1, but is "
"%f" % self.early_exaggeration)
if self.n_iter < 200:
raise ValueError("n_iter should be at least 200")
if self.metric == "precomputed":
if self.init == 'pca':
raise ValueError("The parameter init=\"pca\" cannot be used "
"with metric=\"precomputed\".")
if X.shape[0] != X.shape[1]:
raise ValueError("X should be a square distance matrix")
distances = X
else:
if self.verbose:
print("[t-SNE] Computing pairwise distances...")
if self.metric == "euclidean":
distances = pairwise_distances(X, metric=self.metric,
squared=True)
else:
distances = pairwise_distances(X, metric=self.metric)
if not np.all(distances >= 0):
raise ValueError("All distances should be positive, either "
"the metric or precomputed distances given "
"as X are not correct")
# Degrees of freedom of the Student's t-distribution. The suggestion
# degrees_of_freedom = n_components - 1 comes from
# "Learning a Parametric Embedding by Preserving Local Structure"
# Laurens van der Maaten, 2009.
degrees_of_freedom = max(self.n_components - 1.0, 1)
n_samples = X.shape[0]
# the number of nearest neighbors to find
k = min(n_samples - 1, int(3. * self.perplexity + 1))
neighbors_nn = None
if self.method == 'barnes_hut':
if self.verbose:
print("[t-SNE] Computing %i nearest neighbors..." % k)
if self.metric == 'precomputed':
# Use the precomputed distances to find
# the k nearest neighbors and their distances
neighbors_nn = np.argsort(distances, axis=1)[:, :k]
else:
# Find the nearest neighbors for every point
bt = BallTree(X)
# LvdM uses 3 * perplexity as the number of neighbors
# And we add one to not count the data point itself
# In the event that we have very small # of points
# set the neighbors to n - 1
distances_nn, neighbors_nn = bt.query(X, k=k + 1)
neighbors_nn = neighbors_nn[:, 1:]
P = _joint_probabilities_nn(distances, neighbors_nn,
self.perplexity, self.verbose)
else:
P = _joint_probabilities(distances, self.perplexity, self.verbose)
assert np.all(np.isfinite(P)), "All probabilities should be finite"
assert np.all(P >= 0), "All probabilities should be zero or positive"
assert np.all(P <= 1), ("All probabilities should be less "
"or then equal to one")
if self.init == 'pca':
pca = RandomizedPCA(n_components=self.n_components,
random_state=random_state)
X_embedded = pca.fit_transform(X)
elif isinstance(self.init, np.ndarray):
X_embedded = self.init
elif self.init == 'random':
X_embedded = None
else:
raise ValueError("Unsupported initialization scheme: %s"
% self.init)
return self._tsne(P, degrees_of_freedom, n_samples, random_state,
X_embedded=X_embedded,
neighbors=neighbors_nn,
skip_num_points=skip_num_points)
def _tsne(self, P, degrees_of_freedom, n_samples, random_state,
X_embedded=None, neighbors=None, skip_num_points=0):
"""Runs t-SNE."""
# t-SNE minimizes the Kullback-Leiber divergence of the Gaussians P
# and the Student's t-distributions Q. The optimization algorithm that
# we use is batch gradient descent with three stages:
# * early exaggeration with momentum 0.5
# * early exaggeration with momentum 0.8
# * final optimization with momentum 0.8
# The embedding is initialized with iid samples from Gaussians with
# standard deviation 1e-4.
if X_embedded is None:
# Initialize embedding randomly
X_embedded = 1e-4 * random_state.randn(n_samples,
self.n_components)
params = X_embedded.ravel()
opt_args = {}
opt_args = {"n_iter": 50, "momentum": 0.5, "it": 0,
"learning_rate": self.learning_rate,
"verbose": self.verbose, "n_iter_check": 25,
"kwargs": dict(skip_num_points=skip_num_points)}
if self.method == 'barnes_hut':
m = "Must provide an array of neighbors to use Barnes-Hut"
assert neighbors is not None, m
obj_func = _kl_divergence_bh
objective_error = _kl_divergence_error
sP = squareform(P).astype(np.float32)
neighbors = neighbors.astype(np.int64)
args = [sP, neighbors, degrees_of_freedom, n_samples,
self.n_components]
opt_args['args'] = args
opt_args['min_grad_norm'] = 1e-3
opt_args['n_iter_without_progress'] = 30
# Don't always calculate the cost since that calculation
# can be nearly as expensive as the gradient
opt_args['objective_error'] = objective_error
opt_args['kwargs']['angle'] = self.angle
opt_args['kwargs']['verbose'] = self.verbose
else:
obj_func = _kl_divergence
opt_args['args'] = [P, degrees_of_freedom, n_samples,
self.n_components]
opt_args['min_error_diff'] = 0.0
opt_args['min_grad_norm'] = 0.0
# Early exaggeration
P *= self.early_exaggeration
params, error, it = _gradient_descent(obj_func, params, **opt_args)
opt_args['n_iter'] = 100
opt_args['momentum'] = 0.8
opt_args['it'] = it + 1
params, error, it = _gradient_descent(obj_func, params, **opt_args)
if self.verbose:
print("[t-SNE] Error after %d iterations with early "
"exaggeration: %f" % (it + 1, error))
# Save the final number of iterations
self.n_iter_final = it
# Final optimization
P /= self.early_exaggeration
opt_args['n_iter'] = self.n_iter
opt_args['it'] = it + 1
params, error, it = _gradient_descent(obj_func, params, **opt_args)
if self.verbose:
print("[t-SNE] Error after %d iterations: %f" % (it + 1, error))
X_embedded = params.reshape(n_samples, self.n_components)
return X_embedded
def fit_transform(self, X, y=None):
"""Fit X into an embedded space and return that transformed
output.
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row.
Returns
-------
X_new : array, shape (n_samples, n_components)
Embedding of the training data in low-dimensional space.
"""
embedding = self._fit(X)
self.embedding_ = embedding
return self.embedding_
def fit(self, X, y=None):
"""Fit X into an embedded space.
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row. If the method
is 'exact', X may be a sparse matrix of type 'csr', 'csc'
or 'coo'.
"""
self.fit_transform(X)
return self
def _check_fitted(self):
if self.embedding_ is None:
raise ValueError("Cannot call `transform` unless `fit` has"
"already been called")
|
bsd-3-clause
|
luo66/scikit-learn
|
sklearn/manifold/t_sne.py
|
48
|
20644
|
# Author: Alexander Fabisch -- <afabisch@informatik.uni-bremen.de>
# License: BSD 3 clause (C) 2014
# This is the standard t-SNE implementation. There are faster modifications of
# the algorithm:
# * Barnes-Hut-SNE: reduces the complexity of the gradient computation from
# N^2 to N log N (http://arxiv.org/abs/1301.3342)
# * Fast Optimization for t-SNE:
# http://cseweb.ucsd.edu/~lvdmaaten/workshops/nips2010/papers/vandermaaten.pdf
import numpy as np
from scipy import linalg
from scipy.spatial.distance import pdist
from scipy.spatial.distance import squareform
from ..base import BaseEstimator
from ..utils import check_array
from ..utils import check_random_state
from ..utils.extmath import _ravel
from ..decomposition import RandomizedPCA
from ..metrics.pairwise import pairwise_distances
from . import _utils
MACHINE_EPSILON = np.finfo(np.double).eps
def _joint_probabilities(distances, desired_perplexity, verbose):
"""Compute joint probabilities p_ij from distances.
Parameters
----------
distances : array, shape (n_samples * (n_samples-1) / 2,)
Distances of samples are stored as condensed matrices, i.e.
we omit the diagonal and duplicate entries and store everything
in a one-dimensional array.
desired_perplexity : float
Desired perplexity of the joint probability distributions.
verbose : int
Verbosity level.
Returns
-------
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
"""
# Compute conditional probabilities such that they approximately match
# the desired perplexity
conditional_P = _utils._binary_search_perplexity(
distances, desired_perplexity, verbose)
P = conditional_P + conditional_P.T
sum_P = np.maximum(np.sum(P), MACHINE_EPSILON)
P = np.maximum(squareform(P) / sum_P, MACHINE_EPSILON)
return P
def _kl_divergence(params, P, alpha, n_samples, n_components):
"""t-SNE objective function: KL divergence of p_ijs and q_ijs.
Parameters
----------
params : array, shape (n_params,)
Unraveled embedding.
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
alpha : float
Degrees of freedom of the Student's-t distribution.
n_samples : int
Number of samples.
n_components : int
Dimension of the embedded space.
Returns
-------
kl_divergence : float
Kullback-Leibler divergence of p_ij and q_ij.
grad : array, shape (n_params,)
Unraveled gradient of the Kullback-Leibler divergence with respect to
the embedding.
"""
X_embedded = params.reshape(n_samples, n_components)
# Q is a heavy-tailed distribution: Student's t-distribution
n = pdist(X_embedded, "sqeuclidean")
n += 1.
n /= alpha
n **= (alpha + 1.0) / -2.0
Q = np.maximum(n / (2.0 * np.sum(n)), MACHINE_EPSILON)
# Optimization trick below: np.dot(x, y) is faster than
# np.sum(x * y) because it calls BLAS
# Objective: C (Kullback-Leibler divergence of P and Q)
kl_divergence = 2.0 * np.dot(P, np.log(P / Q))
# Gradient: dC/dY
grad = np.ndarray((n_samples, n_components))
PQd = squareform((P - Q) * n)
for i in range(n_samples):
np.dot(_ravel(PQd[i]), X_embedded[i] - X_embedded, out=grad[i])
grad = grad.ravel()
c = 2.0 * (alpha + 1.0) / alpha
grad *= c
return kl_divergence, grad
def _gradient_descent(objective, p0, it, n_iter, n_iter_without_progress=30,
momentum=0.5, learning_rate=1000.0, min_gain=0.01,
min_grad_norm=1e-7, min_error_diff=1e-7, verbose=0,
args=None):
"""Batch gradient descent with momentum and individual gains.
Parameters
----------
objective : function or callable
Should return a tuple of cost and gradient for a given parameter
vector.
p0 : array-like, shape (n_params,)
Initial parameter vector.
it : int
Current number of iterations (this function will be called more than
once during the optimization).
n_iter : int
Maximum number of gradient descent iterations.
n_iter_without_progress : int, optional (default: 30)
Maximum number of iterations without progress before we abort the
optimization.
momentum : float, within (0.0, 1.0), optional (default: 0.5)
The momentum generates a weight for previous gradients that decays
exponentially.
learning_rate : float, optional (default: 1000.0)
The learning rate should be extremely high for t-SNE! Values in the
range [100.0, 1000.0] are common.
min_gain : float, optional (default: 0.01)
Minimum individual gain for each parameter.
min_grad_norm : float, optional (default: 1e-7)
If the gradient norm is below this threshold, the optimization will
be aborted.
min_error_diff : float, optional (default: 1e-7)
If the absolute difference of two successive cost function values
is below this threshold, the optimization will be aborted.
verbose : int, optional (default: 0)
Verbosity level.
args : sequence
Arguments to pass to objective function.
Returns
-------
p : array, shape (n_params,)
Optimum parameters.
error : float
Optimum.
i : int
Last iteration.
"""
if args is None:
args = []
p = p0.copy().ravel()
update = np.zeros_like(p)
gains = np.ones_like(p)
error = np.finfo(np.float).max
best_error = np.finfo(np.float).max
best_iter = 0
for i in range(it, n_iter):
new_error, grad = objective(p, *args)
error_diff = np.abs(new_error - error)
error = new_error
grad_norm = linalg.norm(grad)
if error < best_error:
best_error = error
best_iter = i
elif i - best_iter > n_iter_without_progress:
if verbose >= 2:
print("[t-SNE] Iteration %d: did not make any progress "
"during the last %d episodes. Finished."
% (i + 1, n_iter_without_progress))
break
if min_grad_norm >= grad_norm:
if verbose >= 2:
print("[t-SNE] Iteration %d: gradient norm %f. Finished."
% (i + 1, grad_norm))
break
if min_error_diff >= error_diff:
if verbose >= 2:
print("[t-SNE] Iteration %d: error difference %f. Finished."
% (i + 1, error_diff))
break
inc = update * grad >= 0.0
dec = np.invert(inc)
gains[inc] += 0.05
gains[dec] *= 0.95
np.clip(gains, min_gain, np.inf)
grad *= gains
update = momentum * update - learning_rate * grad
p += update
if verbose >= 2 and (i + 1) % 10 == 0:
print("[t-SNE] Iteration %d: error = %.7f, gradient norm = %.7f"
% (i + 1, error, grad_norm))
return p, error, i
def trustworthiness(X, X_embedded, n_neighbors=5, precomputed=False):
"""Expresses to what extent the local structure is retained.
The trustworthiness is within [0, 1]. It is defined as
.. math::
T(k) = 1 - \frac{2}{nk (2n - 3k - 1)} \sum^n_{i=1}
\sum_{j \in U^{(k)}_i (r(i, j) - k)}
where :math:`r(i, j)` is the rank of the embedded datapoint j
according to the pairwise distances between the embedded datapoints,
:math:`U^{(k)}_i` is the set of points that are in the k nearest
neighbors in the embedded space but not in the original space.
* "Neighborhood Preservation in Nonlinear Projection Methods: An
Experimental Study"
J. Venna, S. Kaski
* "Learning a Parametric Embedding by Preserving Local Structure"
L.J.P. van der Maaten
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row.
X_embedded : array, shape (n_samples, n_components)
Embedding of the training data in low-dimensional space.
n_neighbors : int, optional (default: 5)
Number of neighbors k that will be considered.
precomputed : bool, optional (default: False)
Set this flag if X is a precomputed square distance matrix.
Returns
-------
trustworthiness : float
Trustworthiness of the low-dimensional embedding.
"""
if precomputed:
dist_X = X
else:
dist_X = pairwise_distances(X, squared=True)
dist_X_embedded = pairwise_distances(X_embedded, squared=True)
ind_X = np.argsort(dist_X, axis=1)
ind_X_embedded = np.argsort(dist_X_embedded, axis=1)[:, 1:n_neighbors + 1]
n_samples = X.shape[0]
t = 0.0
ranks = np.zeros(n_neighbors)
for i in range(n_samples):
for j in range(n_neighbors):
ranks[j] = np.where(ind_X[i] == ind_X_embedded[i, j])[0][0]
ranks -= n_neighbors
t += np.sum(ranks[ranks > 0])
t = 1.0 - t * (2.0 / (n_samples * n_neighbors *
(2.0 * n_samples - 3.0 * n_neighbors - 1.0)))
return t
class TSNE(BaseEstimator):
"""t-distributed Stochastic Neighbor Embedding.
t-SNE [1] is a tool to visualize high-dimensional data. It converts
similarities between data points to joint probabilities and tries
to minimize the Kullback-Leibler divergence between the joint
probabilities of the low-dimensional embedding and the
high-dimensional data. t-SNE has a cost function that is not convex,
i.e. with different initializations we can get different results.
It is highly recommended to use another dimensionality reduction
method (e.g. PCA for dense data or TruncatedSVD for sparse data)
to reduce the number of dimensions to a reasonable amount (e.g. 50)
if the number of features is very high. This will suppress some
noise and speed up the computation of pairwise distances between
samples. For more tips see Laurens van der Maaten's FAQ [2].
Read more in the :ref:`User Guide <t_sne>`.
Parameters
----------
n_components : int, optional (default: 2)
Dimension of the embedded space.
perplexity : float, optional (default: 30)
The perplexity is related to the number of nearest neighbors that
is used in other manifold learning algorithms. Larger datasets
usually require a larger perplexity. Consider selcting a value
between 5 and 50. The choice is not extremely critical since t-SNE
is quite insensitive to this parameter.
early_exaggeration : float, optional (default: 4.0)
Controls how tight natural clusters in the original space are in
the embedded space and how much space will be between them. For
larger values, the space between natural clusters will be larger
in the embedded space. Again, the choice of this parameter is not
very critical. If the cost function increases during initial
optimization, the early exaggeration factor or the learning rate
might be too high.
learning_rate : float, optional (default: 1000)
The learning rate can be a critical parameter. It should be
between 100 and 1000. If the cost function increases during initial
optimization, the early exaggeration factor or the learning rate
might be too high. If the cost function gets stuck in a bad local
minimum increasing the learning rate helps sometimes.
n_iter : int, optional (default: 1000)
Maximum number of iterations for the optimization. Should be at
least 200.
n_iter_without_progress : int, optional (default: 30)
Maximum number of iterations without progress before we abort the
optimization.
min_grad_norm : float, optional (default: 1E-7)
If the gradient norm is below this threshold, the optimization will
be aborted.
metric : string or callable, optional
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them. The default is "euclidean" which is
interpreted as squared euclidean distance.
init : string, optional (default: "random")
Initialization of embedding. Possible options are 'random' and 'pca'.
PCA initialization cannot be used with precomputed distances and is
usually more globally stable than random initialization.
verbose : int, optional (default: 0)
Verbosity level.
random_state : int or RandomState instance or None (default)
Pseudo Random Number generator seed control. If None, use the
numpy.random singleton. Note that different initializations
might result in different local minima of the cost function.
Attributes
----------
embedding_ : array-like, shape (n_samples, n_components)
Stores the embedding vectors.
training_data_ : array-like, shape (n_samples, n_features)
Stores the training data.
Examples
--------
>>> import numpy as np
>>> from sklearn.manifold import TSNE
>>> X = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]])
>>> model = TSNE(n_components=2, random_state=0)
>>> model.fit_transform(X) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
array([[ 887.28..., 238.61...],
[ -714.79..., 3243.34...],
[ 957.30..., -2505.78...],
[-1130.28..., -974.78...])
References
----------
[1] van der Maaten, L.J.P.; Hinton, G.E. Visualizing High-Dimensional Data
Using t-SNE. Journal of Machine Learning Research 9:2579-2605, 2008.
[2] van der Maaten, L.J.P. t-Distributed Stochastic Neighbor Embedding
http://homepage.tudelft.nl/19j49/t-SNE.html
"""
def __init__(self, n_components=2, perplexity=30.0,
early_exaggeration=4.0, learning_rate=1000.0, n_iter=1000,
n_iter_without_progress=30, min_grad_norm=1e-7,
metric="euclidean", init="random", verbose=0,
random_state=None):
if init not in ["pca", "random"]:
raise ValueError("'init' must be either 'pca' or 'random'")
self.n_components = n_components
self.perplexity = perplexity
self.early_exaggeration = early_exaggeration
self.learning_rate = learning_rate
self.n_iter = n_iter
self.n_iter_without_progress = n_iter_without_progress
self.min_grad_norm = min_grad_norm
self.metric = metric
self.init = init
self.verbose = verbose
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model using X as training data.
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row.
"""
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'], dtype=np.float64)
random_state = check_random_state(self.random_state)
if self.early_exaggeration < 1.0:
raise ValueError("early_exaggeration must be at least 1, but is "
"%f" % self.early_exaggeration)
if self.n_iter < 200:
raise ValueError("n_iter should be at least 200")
if self.metric == "precomputed":
if self.init == 'pca':
raise ValueError("The parameter init=\"pca\" cannot be used "
"with metric=\"precomputed\".")
if X.shape[0] != X.shape[1]:
raise ValueError("X should be a square distance matrix")
distances = X
else:
if self.verbose:
print("[t-SNE] Computing pairwise distances...")
if self.metric == "euclidean":
distances = pairwise_distances(X, metric=self.metric, squared=True)
else:
distances = pairwise_distances(X, metric=self.metric)
# Degrees of freedom of the Student's t-distribution. The suggestion
# alpha = n_components - 1 comes from "Learning a Parametric Embedding
# by Preserving Local Structure" Laurens van der Maaten, 2009.
alpha = max(self.n_components - 1.0, 1)
n_samples = X.shape[0]
self.training_data_ = X
P = _joint_probabilities(distances, self.perplexity, self.verbose)
if self.init == 'pca':
pca = RandomizedPCA(n_components=self.n_components,
random_state=random_state)
X_embedded = pca.fit_transform(X)
elif self.init == 'random':
X_embedded = None
else:
raise ValueError("Unsupported initialization scheme: %s"
% self.init)
self.embedding_ = self._tsne(P, alpha, n_samples, random_state,
X_embedded=X_embedded)
return self
def _tsne(self, P, alpha, n_samples, random_state, X_embedded=None):
"""Runs t-SNE."""
# t-SNE minimizes the Kullback-Leiber divergence of the Gaussians P
# and the Student's t-distributions Q. The optimization algorithm that
# we use is batch gradient descent with three stages:
# * early exaggeration with momentum 0.5
# * early exaggeration with momentum 0.8
# * final optimization with momentum 0.8
# The embedding is initialized with iid samples from Gaussians with
# standard deviation 1e-4.
if X_embedded is None:
# Initialize embedding randomly
X_embedded = 1e-4 * random_state.randn(n_samples,
self.n_components)
params = X_embedded.ravel()
# Early exaggeration
P *= self.early_exaggeration
params, error, it = _gradient_descent(
_kl_divergence, params, it=0, n_iter=50, momentum=0.5,
min_grad_norm=0.0, min_error_diff=0.0,
learning_rate=self.learning_rate, verbose=self.verbose,
args=[P, alpha, n_samples, self.n_components])
params, error, it = _gradient_descent(
_kl_divergence, params, it=it + 1, n_iter=100, momentum=0.8,
min_grad_norm=0.0, min_error_diff=0.0,
learning_rate=self.learning_rate, verbose=self.verbose,
args=[P, alpha, n_samples, self.n_components])
if self.verbose:
print("[t-SNE] Error after %d iterations with early "
"exaggeration: %f" % (it + 1, error))
# Final optimization
P /= self.early_exaggeration
params, error, it = _gradient_descent(
_kl_divergence, params, it=it + 1, n_iter=self.n_iter,
min_grad_norm=self.min_grad_norm,
n_iter_without_progress=self.n_iter_without_progress,
momentum=0.8, learning_rate=self.learning_rate,
verbose=self.verbose, args=[P, alpha, n_samples,
self.n_components])
if self.verbose:
print("[t-SNE] Error after %d iterations: %f" % (it + 1, error))
X_embedded = params.reshape(n_samples, self.n_components)
return X_embedded
def fit_transform(self, X, y=None):
"""Transform X to the embedded space.
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row.
Returns
-------
X_new : array, shape (n_samples, n_components)
Embedding of the training data in low-dimensional space.
"""
self.fit(X)
return self.embedding_
|
bsd-3-clause
|
zorojean/scikit-learn
|
examples/manifold/plot_manifold_sphere.py
|
258
|
5101
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=============================================
Manifold Learning methods on a severed sphere
=============================================
An application of the different :ref:`manifold` techniques
on a spherical data-set. Here one can see the use of
dimensionality reduction in order to gain some intuition
regarding the manifold learning methods. Regarding the dataset,
the poles are cut from the sphere, as well as a thin slice down its
side. This enables the manifold learning techniques to
'spread it open' whilst projecting it onto two dimensions.
For a similar example, where the methods are applied to the
S-curve dataset, see :ref:`example_manifold_plot_compare_methods.py`
Note that the purpose of the :ref:`MDS <multidimensional_scaling>` is
to find a low-dimensional representation of the data (here 2D) in
which the distances respect well the distances in the original
high-dimensional space, unlike other manifold-learning algorithms,
it does not seeks an isotropic representation of the data in
the low-dimensional space. Here the manifold problem matches fairly
that of representing a flat map of the Earth, as with
`map projection <http://en.wikipedia.org/wiki/Map_projection>`_
"""
# Author: Jaques Grobler <jaques.grobler@inria.fr>
# License: BSD 3 clause
print(__doc__)
from time import time
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.ticker import NullFormatter
from sklearn import manifold
from sklearn.utils import check_random_state
# Next line to silence pyflakes.
Axes3D
# Variables for manifold learning.
n_neighbors = 10
n_samples = 1000
# Create our sphere.
random_state = check_random_state(0)
p = random_state.rand(n_samples) * (2 * np.pi - 0.55)
t = random_state.rand(n_samples) * np.pi
# Sever the poles from the sphere.
indices = ((t < (np.pi - (np.pi / 8))) & (t > ((np.pi / 8))))
colors = p[indices]
x, y, z = np.sin(t[indices]) * np.cos(p[indices]), \
np.sin(t[indices]) * np.sin(p[indices]), \
np.cos(t[indices])
# Plot our dataset.
fig = plt.figure(figsize=(15, 8))
plt.suptitle("Manifold Learning with %i points, %i neighbors"
% (1000, n_neighbors), fontsize=14)
ax = fig.add_subplot(251, projection='3d')
ax.scatter(x, y, z, c=p[indices], cmap=plt.cm.rainbow)
try:
# compatibility matplotlib < 1.0
ax.view_init(40, -10)
except:
pass
sphere_data = np.array([x, y, z]).T
# Perform Locally Linear Embedding Manifold learning
methods = ['standard', 'ltsa', 'hessian', 'modified']
labels = ['LLE', 'LTSA', 'Hessian LLE', 'Modified LLE']
for i, method in enumerate(methods):
t0 = time()
trans_data = manifold\
.LocallyLinearEmbedding(n_neighbors, 2,
method=method).fit_transform(sphere_data).T
t1 = time()
print("%s: %.2g sec" % (methods[i], t1 - t0))
ax = fig.add_subplot(252 + i)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("%s (%.2g sec)" % (labels[i], t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
# Perform Isomap Manifold learning.
t0 = time()
trans_data = manifold.Isomap(n_neighbors, n_components=2)\
.fit_transform(sphere_data).T
t1 = time()
print("%s: %.2g sec" % ('ISO', t1 - t0))
ax = fig.add_subplot(257)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("%s (%.2g sec)" % ('Isomap', t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
# Perform Multi-dimensional scaling.
t0 = time()
mds = manifold.MDS(2, max_iter=100, n_init=1)
trans_data = mds.fit_transform(sphere_data).T
t1 = time()
print("MDS: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(258)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("MDS (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
# Perform Spectral Embedding.
t0 = time()
se = manifold.SpectralEmbedding(n_components=2,
n_neighbors=n_neighbors)
trans_data = se.fit_transform(sphere_data).T
t1 = time()
print("Spectral Embedding: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(259)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("Spectral Embedding (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
# Perform t-distributed stochastic neighbor embedding.
t0 = time()
tsne = manifold.TSNE(n_components=2, init='pca', random_state=0)
trans_data = tsne.fit_transform(sphere_data).T
t1 = time()
print("t-SNE: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(250)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("t-SNE (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
plt.show()
|
bsd-3-clause
|
466152112/scikit-learn
|
sklearn/utils/metaestimators.py
|
283
|
2353
|
"""Utilities for meta-estimators"""
# Author: Joel Nothman
# Andreas Mueller
# Licence: BSD
from operator import attrgetter
from functools import update_wrapper
__all__ = ['if_delegate_has_method']
class _IffHasAttrDescriptor(object):
"""Implements a conditional property using the descriptor protocol.
Using this class to create a decorator will raise an ``AttributeError``
if the ``attribute_name`` is not present on the base object.
This allows ducktyping of the decorated method based on ``attribute_name``.
See https://docs.python.org/3/howto/descriptor.html for an explanation of
descriptors.
"""
def __init__(self, fn, attribute_name):
self.fn = fn
self.get_attribute = attrgetter(attribute_name)
# update the docstring of the descriptor
update_wrapper(self, fn)
def __get__(self, obj, type=None):
# raise an AttributeError if the attribute is not present on the object
if obj is not None:
# delegate only on instances, not the classes.
# this is to allow access to the docstrings.
self.get_attribute(obj)
# lambda, but not partial, allows help() to work with update_wrapper
out = lambda *args, **kwargs: self.fn(obj, *args, **kwargs)
# update the docstring of the returned function
update_wrapper(out, self.fn)
return out
def if_delegate_has_method(delegate):
"""Create a decorator for methods that are delegated to a sub-estimator
This enables ducktyping by hasattr returning True according to the
sub-estimator.
>>> from sklearn.utils.metaestimators import if_delegate_has_method
>>>
>>>
>>> class MetaEst(object):
... def __init__(self, sub_est):
... self.sub_est = sub_est
...
... @if_delegate_has_method(delegate='sub_est')
... def predict(self, X):
... return self.sub_est.predict(X)
...
>>> class HasPredict(object):
... def predict(self, X):
... return X.sum(axis=1)
...
>>> class HasNoPredict(object):
... pass
...
>>> hasattr(MetaEst(HasPredict()), 'predict')
True
>>> hasattr(MetaEst(HasNoPredict()), 'predict')
False
"""
return lambda fn: _IffHasAttrDescriptor(fn, '%s.%s' % (delegate, fn.__name__))
|
bsd-3-clause
|
lbishal/scikit-learn
|
sklearn/manifold/t_sne.py
|
13
|
34618
|
# Author: Alexander Fabisch -- <afabisch@informatik.uni-bremen.de>
# Author: Christopher Moody <chrisemoody@gmail.com>
# Author: Nick Travers <nickt@squareup.com>
# License: BSD 3 clause (C) 2014
# This is the exact and Barnes-Hut t-SNE implementation. There are other
# modifications of the algorithm:
# * Fast Optimization for t-SNE:
# http://cseweb.ucsd.edu/~lvdmaaten/workshops/nips2010/papers/vandermaaten.pdf
import numpy as np
from scipy import linalg
import scipy.sparse as sp
from scipy.spatial.distance import pdist
from scipy.spatial.distance import squareform
from ..neighbors import BallTree
from ..base import BaseEstimator
from ..utils import check_array
from ..utils import check_random_state
from ..utils.extmath import _ravel
from ..decomposition import RandomizedPCA
from ..metrics.pairwise import pairwise_distances
from . import _utils
from . import _barnes_hut_tsne
from ..utils.fixes import astype
MACHINE_EPSILON = np.finfo(np.double).eps
def _joint_probabilities(distances, desired_perplexity, verbose):
"""Compute joint probabilities p_ij from distances.
Parameters
----------
distances : array, shape (n_samples * (n_samples-1) / 2,)
Distances of samples are stored as condensed matrices, i.e.
we omit the diagonal and duplicate entries and store everything
in a one-dimensional array.
desired_perplexity : float
Desired perplexity of the joint probability distributions.
verbose : int
Verbosity level.
Returns
-------
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
"""
# Compute conditional probabilities such that they approximately match
# the desired perplexity
distances = astype(distances, np.float32, copy=False)
conditional_P = _utils._binary_search_perplexity(
distances, None, desired_perplexity, verbose)
P = conditional_P + conditional_P.T
sum_P = np.maximum(np.sum(P), MACHINE_EPSILON)
P = np.maximum(squareform(P) / sum_P, MACHINE_EPSILON)
return P
def _joint_probabilities_nn(distances, neighbors, desired_perplexity, verbose):
"""Compute joint probabilities p_ij from distances using just nearest
neighbors.
This method is approximately equal to _joint_probabilities. The latter
is O(N), but limiting the joint probability to nearest neighbors improves
this substantially to O(uN).
Parameters
----------
distances : array, shape (n_samples * (n_samples-1) / 2,)
Distances of samples are stored as condensed matrices, i.e.
we omit the diagonal and duplicate entries and store everything
in a one-dimensional array.
desired_perplexity : float
Desired perplexity of the joint probability distributions.
verbose : int
Verbosity level.
Returns
-------
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
"""
# Compute conditional probabilities such that they approximately match
# the desired perplexity
distances = astype(distances, np.float32, copy=False)
neighbors = astype(neighbors, np.int64, copy=False)
conditional_P = _utils._binary_search_perplexity(
distances, neighbors, desired_perplexity, verbose)
m = "All probabilities should be finite"
assert np.all(np.isfinite(conditional_P)), m
P = conditional_P + conditional_P.T
sum_P = np.maximum(np.sum(P), MACHINE_EPSILON)
P = np.maximum(squareform(P) / sum_P, MACHINE_EPSILON)
assert np.all(np.abs(P) <= 1.0)
return P
def _kl_divergence(params, P, degrees_of_freedom, n_samples, n_components,
skip_num_points=0):
"""t-SNE objective function: gradient of the KL divergence
of p_ijs and q_ijs and the absolute error.
Parameters
----------
params : array, shape (n_params,)
Unraveled embedding.
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
degrees_of_freedom : float
Degrees of freedom of the Student's-t distribution.
n_samples : int
Number of samples.
n_components : int
Dimension of the embedded space.
skip_num_points : int (optional, default:0)
This does not compute the gradient for points with indices below
`skip_num_points`. This is useful when computing transforms of new
data where you'd like to keep the old data fixed.
Returns
-------
kl_divergence : float
Kullback-Leibler divergence of p_ij and q_ij.
grad : array, shape (n_params,)
Unraveled gradient of the Kullback-Leibler divergence with respect to
the embedding.
"""
X_embedded = params.reshape(n_samples, n_components)
# Q is a heavy-tailed distribution: Student's t-distribution
n = pdist(X_embedded, "sqeuclidean")
n += 1.
n /= degrees_of_freedom
n **= (degrees_of_freedom + 1.0) / -2.0
Q = np.maximum(n / (2.0 * np.sum(n)), MACHINE_EPSILON)
# Optimization trick below: np.dot(x, y) is faster than
# np.sum(x * y) because it calls BLAS
# Objective: C (Kullback-Leibler divergence of P and Q)
kl_divergence = 2.0 * np.dot(P, np.log(P / Q))
# Gradient: dC/dY
grad = np.ndarray((n_samples, n_components))
PQd = squareform((P - Q) * n)
for i in range(skip_num_points, n_samples):
np.dot(_ravel(PQd[i]), X_embedded[i] - X_embedded, out=grad[i])
grad = grad.ravel()
c = 2.0 * (degrees_of_freedom + 1.0) / degrees_of_freedom
grad *= c
return kl_divergence, grad
def _kl_divergence_error(params, P, neighbors, degrees_of_freedom, n_samples,
n_components):
"""t-SNE objective function: the absolute error of the
KL divergence of p_ijs and q_ijs.
Parameters
----------
params : array, shape (n_params,)
Unraveled embedding.
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
neighbors : array (n_samples, K)
The neighbors is not actually required to calculate the
divergence, but is here to match the signature of the
gradient function
degrees_of_freedom : float
Degrees of freedom of the Student's-t distribution.
n_samples : int
Number of samples.
n_components : int
Dimension of the embedded space.
Returns
-------
kl_divergence : float
Kullback-Leibler divergence of p_ij and q_ij.
grad : array, shape (n_params,)
Unraveled gradient of the Kullback-Leibler divergence with respect to
the embedding.
"""
X_embedded = params.reshape(n_samples, n_components)
# Q is a heavy-tailed distribution: Student's t-distribution
n = pdist(X_embedded, "sqeuclidean")
n += 1.
n /= degrees_of_freedom
n **= (degrees_of_freedom + 1.0) / -2.0
Q = np.maximum(n / (2.0 * np.sum(n)), MACHINE_EPSILON)
# Optimization trick below: np.dot(x, y) is faster than
# np.sum(x * y) because it calls BLAS
# Objective: C (Kullback-Leibler divergence of P and Q)
if len(P.shape) == 2:
P = squareform(P)
kl_divergence = 2.0 * np.dot(P, np.log(P / Q))
return kl_divergence
def _kl_divergence_bh(params, P, neighbors, degrees_of_freedom, n_samples,
n_components, angle=0.5, skip_num_points=0,
verbose=False):
"""t-SNE objective function: KL divergence of p_ijs and q_ijs.
Uses Barnes-Hut tree methods to calculate the gradient that
runs in O(NlogN) instead of O(N^2)
Parameters
----------
params : array, shape (n_params,)
Unraveled embedding.
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
neighbors: int64 array, shape (n_samples, K)
Array with element [i, j] giving the index for the jth
closest neighbor to point i.
degrees_of_freedom : float
Degrees of freedom of the Student's-t distribution.
n_samples : int
Number of samples.
n_components : int
Dimension of the embedded space.
angle : float (default: 0.5)
This is the trade-off between speed and accuracy for Barnes-Hut T-SNE.
'angle' is the angular size (referred to as theta in [3]) of a distant
node as measured from a point. If this size is below 'angle' then it is
used as a summary node of all points contained within it.
This method is not very sensitive to changes in this parameter
in the range of 0.2 - 0.8. Angle less than 0.2 has quickly increasing
computation time and angle greater 0.8 has quickly increasing error.
skip_num_points : int (optional, default:0)
This does not compute the gradient for points with indices below
`skip_num_points`. This is useful when computing transforms of new
data where you'd like to keep the old data fixed.
verbose : int
Verbosity level.
Returns
-------
kl_divergence : float
Kullback-Leibler divergence of p_ij and q_ij.
grad : array, shape (n_params,)
Unraveled gradient of the Kullback-Leibler divergence with respect to
the embedding.
"""
params = astype(params, np.float32, copy=False)
X_embedded = params.reshape(n_samples, n_components)
neighbors = astype(neighbors, np.int64, copy=False)
if len(P.shape) == 1:
sP = squareform(P).astype(np.float32)
else:
sP = P.astype(np.float32)
grad = np.zeros(X_embedded.shape, dtype=np.float32)
error = _barnes_hut_tsne.gradient(sP, X_embedded, neighbors,
grad, angle, n_components, verbose,
dof=degrees_of_freedom)
c = 2.0 * (degrees_of_freedom + 1.0) / degrees_of_freedom
grad = grad.ravel()
grad *= c
return error, grad
def _gradient_descent(objective, p0, it, n_iter, objective_error=None,
n_iter_check=1, n_iter_without_progress=50,
momentum=0.5, learning_rate=1000.0, min_gain=0.01,
min_grad_norm=1e-7, min_error_diff=1e-7, verbose=0,
args=None, kwargs=None):
"""Batch gradient descent with momentum and individual gains.
Parameters
----------
objective : function or callable
Should return a tuple of cost and gradient for a given parameter
vector. When expensive to compute, the cost can optionally
be None and can be computed every n_iter_check steps using
the objective_error function.
p0 : array-like, shape (n_params,)
Initial parameter vector.
it : int
Current number of iterations (this function will be called more than
once during the optimization).
n_iter : int
Maximum number of gradient descent iterations.
n_iter_check : int
Number of iterations before evaluating the global error. If the error
is sufficiently low, we abort the optimization.
objective_error : function or callable
Should return a tuple of cost and gradient for a given parameter
vector.
n_iter_without_progress : int, optional (default: 30)
Maximum number of iterations without progress before we abort the
optimization.
momentum : float, within (0.0, 1.0), optional (default: 0.5)
The momentum generates a weight for previous gradients that decays
exponentially.
learning_rate : float, optional (default: 1000.0)
The learning rate should be extremely high for t-SNE! Values in the
range [100.0, 1000.0] are common.
min_gain : float, optional (default: 0.01)
Minimum individual gain for each parameter.
min_grad_norm : float, optional (default: 1e-7)
If the gradient norm is below this threshold, the optimization will
be aborted.
min_error_diff : float, optional (default: 1e-7)
If the absolute difference of two successive cost function values
is below this threshold, the optimization will be aborted.
verbose : int, optional (default: 0)
Verbosity level.
args : sequence
Arguments to pass to objective function.
kwargs : dict
Keyword arguments to pass to objective function.
Returns
-------
p : array, shape (n_params,)
Optimum parameters.
error : float
Optimum.
i : int
Last iteration.
"""
if args is None:
args = []
if kwargs is None:
kwargs = {}
p = p0.copy().ravel()
update = np.zeros_like(p)
gains = np.ones_like(p)
error = np.finfo(np.float).max
best_error = np.finfo(np.float).max
best_iter = 0
for i in range(it, n_iter):
new_error, grad = objective(p, *args, **kwargs)
grad_norm = linalg.norm(grad)
inc = update * grad >= 0.0
dec = np.invert(inc)
gains[inc] += 0.05
gains[dec] *= 0.95
np.clip(gains, min_gain, np.inf)
grad *= gains
update = momentum * update - learning_rate * grad
p += update
if (i + 1) % n_iter_check == 0:
if new_error is None:
new_error = objective_error(p, *args)
error_diff = np.abs(new_error - error)
error = new_error
if verbose >= 2:
m = "[t-SNE] Iteration %d: error = %.7f, gradient norm = %.7f"
print(m % (i + 1, error, grad_norm))
if error < best_error:
best_error = error
best_iter = i
elif i - best_iter > n_iter_without_progress:
if verbose >= 2:
print("[t-SNE] Iteration %d: did not make any progress "
"during the last %d episodes. Finished."
% (i + 1, n_iter_without_progress))
break
if grad_norm <= min_grad_norm:
if verbose >= 2:
print("[t-SNE] Iteration %d: gradient norm %f. Finished."
% (i + 1, grad_norm))
break
if error_diff <= min_error_diff:
if verbose >= 2:
m = "[t-SNE] Iteration %d: error difference %f. Finished."
print(m % (i + 1, error_diff))
break
if new_error is not None:
error = new_error
return p, error, i
def trustworthiness(X, X_embedded, n_neighbors=5, precomputed=False):
"""Expresses to what extent the local structure is retained.
The trustworthiness is within [0, 1]. It is defined as
.. math::
T(k) = 1 - \frac{2}{nk (2n - 3k - 1)} \sum^n_{i=1}
\sum_{j \in U^{(k)}_i (r(i, j) - k)}
where :math:`r(i, j)` is the rank of the embedded datapoint j
according to the pairwise distances between the embedded datapoints,
:math:`U^{(k)}_i` is the set of points that are in the k nearest
neighbors in the embedded space but not in the original space.
* "Neighborhood Preservation in Nonlinear Projection Methods: An
Experimental Study"
J. Venna, S. Kaski
* "Learning a Parametric Embedding by Preserving Local Structure"
L.J.P. van der Maaten
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row.
X_embedded : array, shape (n_samples, n_components)
Embedding of the training data in low-dimensional space.
n_neighbors : int, optional (default: 5)
Number of neighbors k that will be considered.
precomputed : bool, optional (default: False)
Set this flag if X is a precomputed square distance matrix.
Returns
-------
trustworthiness : float
Trustworthiness of the low-dimensional embedding.
"""
if precomputed:
dist_X = X
else:
dist_X = pairwise_distances(X, squared=True)
dist_X_embedded = pairwise_distances(X_embedded, squared=True)
ind_X = np.argsort(dist_X, axis=1)
ind_X_embedded = np.argsort(dist_X_embedded, axis=1)[:, 1:n_neighbors + 1]
n_samples = X.shape[0]
t = 0.0
ranks = np.zeros(n_neighbors)
for i in range(n_samples):
for j in range(n_neighbors):
ranks[j] = np.where(ind_X[i] == ind_X_embedded[i, j])[0][0]
ranks -= n_neighbors
t += np.sum(ranks[ranks > 0])
t = 1.0 - t * (2.0 / (n_samples * n_neighbors *
(2.0 * n_samples - 3.0 * n_neighbors - 1.0)))
return t
class TSNE(BaseEstimator):
"""t-distributed Stochastic Neighbor Embedding.
t-SNE [1] is a tool to visualize high-dimensional data. It converts
similarities between data points to joint probabilities and tries
to minimize the Kullback-Leibler divergence between the joint
probabilities of the low-dimensional embedding and the
high-dimensional data. t-SNE has a cost function that is not convex,
i.e. with different initializations we can get different results.
It is highly recommended to use another dimensionality reduction
method (e.g. PCA for dense data or TruncatedSVD for sparse data)
to reduce the number of dimensions to a reasonable amount (e.g. 50)
if the number of features is very high. This will suppress some
noise and speed up the computation of pairwise distances between
samples. For more tips see Laurens van der Maaten's FAQ [2].
Read more in the :ref:`User Guide <t_sne>`.
Parameters
----------
n_components : int, optional (default: 2)
Dimension of the embedded space.
perplexity : float, optional (default: 30)
The perplexity is related to the number of nearest neighbors that
is used in other manifold learning algorithms. Larger datasets
usually require a larger perplexity. Consider selcting a value
between 5 and 50. The choice is not extremely critical since t-SNE
is quite insensitive to this parameter.
early_exaggeration : float, optional (default: 4.0)
Controls how tight natural clusters in the original space are in
the embedded space and how much space will be between them. For
larger values, the space between natural clusters will be larger
in the embedded space. Again, the choice of this parameter is not
very critical. If the cost function increases during initial
optimization, the early exaggeration factor or the learning rate
might be too high.
learning_rate : float, optional (default: 1000)
The learning rate can be a critical parameter. It should be
between 100 and 1000. If the cost function increases during initial
optimization, the early exaggeration factor or the learning rate
might be too high. If the cost function gets stuck in a bad local
minimum increasing the learning rate helps sometimes.
n_iter : int, optional (default: 1000)
Maximum number of iterations for the optimization. Should be at
least 200.
n_iter_without_progress : int, optional (default: 30)
Maximum number of iterations without progress before we abort the
optimization.
.. versionadded:: 0.17
parameter *n_iter_without_progress* to control stopping criteria.
min_grad_norm : float, optional (default: 1E-7)
If the gradient norm is below this threshold, the optimization will
be aborted.
metric : string or callable, optional
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them. The default is "euclidean" which is
interpreted as squared euclidean distance.
init : string, optional (default: "random")
Initialization of embedding. Possible options are 'random' and 'pca'.
PCA initialization cannot be used with precomputed distances and is
usually more globally stable than random initialization.
verbose : int, optional (default: 0)
Verbosity level.
random_state : int or RandomState instance or None (default)
Pseudo Random Number generator seed control. If None, use the
numpy.random singleton. Note that different initializations
might result in different local minima of the cost function.
method : string (default: 'barnes_hut')
By default the gradient calculation algorithm uses Barnes-Hut
approximation running in O(NlogN) time. method='exact'
will run on the slower, but exact, algorithm in O(N^2) time. The
exact algorithm should be used when nearest-neighbor errors need
to be better than 3%. However, the exact method cannot scale to
millions of examples.
.. versionadded:: 0.17
Approximate optimization *method* via the Barnes-Hut.
angle : float (default: 0.5)
Only used if method='barnes_hut'
This is the trade-off between speed and accuracy for Barnes-Hut T-SNE.
'angle' is the angular size (referred to as theta in [3]) of a distant
node as measured from a point. If this size is below 'angle' then it is
used as a summary node of all points contained within it.
This method is not very sensitive to changes in this parameter
in the range of 0.2 - 0.8. Angle less than 0.2 has quickly increasing
computation time and angle greater 0.8 has quickly increasing error.
Attributes
----------
embedding_ : array-like, shape (n_samples, n_components)
Stores the embedding vectors.
Examples
--------
>>> import numpy as np
>>> from sklearn.manifold import TSNE
>>> X = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]])
>>> model = TSNE(n_components=2, random_state=0)
>>> np.set_printoptions(suppress=True)
>>> model.fit_transform(X) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
array([[ 0.00017599, 0.00003993],
[ 0.00009891, 0.00021913],
[ 0.00018554, -0.00009357],
[ 0.00009528, -0.00001407]])
References
----------
[1] van der Maaten, L.J.P.; Hinton, G.E. Visualizing High-Dimensional Data
Using t-SNE. Journal of Machine Learning Research 9:2579-2605, 2008.
[2] van der Maaten, L.J.P. t-Distributed Stochastic Neighbor Embedding
http://homepage.tudelft.nl/19j49/t-SNE.html
[3] L.J.P. van der Maaten. Accelerating t-SNE using Tree-Based Algorithms.
Journal of Machine Learning Research 15(Oct):3221-3245, 2014.
http://lvdmaaten.github.io/publications/papers/JMLR_2014.pdf
"""
def __init__(self, n_components=2, perplexity=30.0,
early_exaggeration=4.0, learning_rate=1000.0, n_iter=1000,
n_iter_without_progress=30, min_grad_norm=1e-7,
metric="euclidean", init="random", verbose=0,
random_state=None, method='barnes_hut', angle=0.5):
if init not in ["pca", "random"] or isinstance(init, np.ndarray):
msg = "'init' must be 'pca', 'random' or a NumPy array"
raise ValueError(msg)
self.n_components = n_components
self.perplexity = perplexity
self.early_exaggeration = early_exaggeration
self.learning_rate = learning_rate
self.n_iter = n_iter
self.n_iter_without_progress = n_iter_without_progress
self.min_grad_norm = min_grad_norm
self.metric = metric
self.init = init
self.verbose = verbose
self.random_state = random_state
self.method = method
self.angle = angle
self.embedding_ = None
def _fit(self, X, skip_num_points=0):
"""Fit the model using X as training data.
Note that sparse arrays can only be handled by method='exact'.
It is recommended that you convert your sparse array to dense
(e.g. `X.toarray()`) if it fits in memory, or otherwise using a
dimensionality reduction technique (e.g. TrucnatedSVD).
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row. Note that this
when method='barnes_hut', X cannot be a sparse array and if need be
will be converted to a 32 bit float array. Method='exact' allows
sparse arrays and 64bit floating point inputs.
skip_num_points : int (optional, default:0)
This does not compute the gradient for points with indices below
`skip_num_points`. This is useful when computing transforms of new
data where you'd like to keep the old data fixed.
"""
if self.method not in ['barnes_hut', 'exact']:
raise ValueError("'method' must be 'barnes_hut' or 'exact'")
if self.angle < 0.0 or self.angle > 1.0:
raise ValueError("'angle' must be between 0.0 - 1.0")
if self.method == 'barnes_hut' and sp.issparse(X):
raise TypeError('A sparse matrix was passed, but dense '
'data is required for method="barnes_hut". Use '
'X.toarray() to convert to a dense numpy array if '
'the array is small enough for it to fit in '
'memory. Otherwise consider dimensionality '
'reduction techniques (e.g. TruncatedSVD)')
X = check_array(X, dtype=np.float32)
else:
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'], dtype=np.float64)
random_state = check_random_state(self.random_state)
if self.early_exaggeration < 1.0:
raise ValueError("early_exaggeration must be at least 1, but is "
"%f" % self.early_exaggeration)
if self.n_iter < 200:
raise ValueError("n_iter should be at least 200")
if self.metric == "precomputed":
if self.init == 'pca':
raise ValueError("The parameter init=\"pca\" cannot be used "
"with metric=\"precomputed\".")
if X.shape[0] != X.shape[1]:
raise ValueError("X should be a square distance matrix")
distances = X
else:
if self.verbose:
print("[t-SNE] Computing pairwise distances...")
if self.metric == "euclidean":
distances = pairwise_distances(X, metric=self.metric,
squared=True)
else:
distances = pairwise_distances(X, metric=self.metric)
if not np.all(distances >= 0):
raise ValueError("All distances should be positive, either "
"the metric or precomputed distances given "
"as X are not correct")
# Degrees of freedom of the Student's t-distribution. The suggestion
# degrees_of_freedom = n_components - 1 comes from
# "Learning a Parametric Embedding by Preserving Local Structure"
# Laurens van der Maaten, 2009.
degrees_of_freedom = max(self.n_components - 1.0, 1)
n_samples = X.shape[0]
# the number of nearest neighbors to find
k = min(n_samples - 1, int(3. * self.perplexity + 1))
neighbors_nn = None
if self.method == 'barnes_hut':
if self.verbose:
print("[t-SNE] Computing %i nearest neighbors..." % k)
if self.metric == 'precomputed':
# Use the precomputed distances to find
# the k nearest neighbors and their distances
neighbors_nn = np.argsort(distances, axis=1)[:, :k]
else:
# Find the nearest neighbors for every point
bt = BallTree(X)
# LvdM uses 3 * perplexity as the number of neighbors
# And we add one to not count the data point itself
# In the event that we have very small # of points
# set the neighbors to n - 1
distances_nn, neighbors_nn = bt.query(X, k=k + 1)
neighbors_nn = neighbors_nn[:, 1:]
P = _joint_probabilities_nn(distances, neighbors_nn,
self.perplexity, self.verbose)
else:
P = _joint_probabilities(distances, self.perplexity, self.verbose)
assert np.all(np.isfinite(P)), "All probabilities should be finite"
assert np.all(P >= 0), "All probabilities should be zero or positive"
assert np.all(P <= 1), ("All probabilities should be less "
"or then equal to one")
if self.init == 'pca':
pca = RandomizedPCA(n_components=self.n_components,
random_state=random_state)
X_embedded = pca.fit_transform(X)
elif isinstance(self.init, np.ndarray):
X_embedded = self.init
elif self.init == 'random':
X_embedded = None
else:
raise ValueError("Unsupported initialization scheme: %s"
% self.init)
return self._tsne(P, degrees_of_freedom, n_samples, random_state,
X_embedded=X_embedded,
neighbors=neighbors_nn,
skip_num_points=skip_num_points)
def _tsne(self, P, degrees_of_freedom, n_samples, random_state,
X_embedded=None, neighbors=None, skip_num_points=0):
"""Runs t-SNE."""
# t-SNE minimizes the Kullback-Leiber divergence of the Gaussians P
# and the Student's t-distributions Q. The optimization algorithm that
# we use is batch gradient descent with three stages:
# * early exaggeration with momentum 0.5
# * early exaggeration with momentum 0.8
# * final optimization with momentum 0.8
# The embedding is initialized with iid samples from Gaussians with
# standard deviation 1e-4.
if X_embedded is None:
# Initialize embedding randomly
X_embedded = 1e-4 * random_state.randn(n_samples,
self.n_components)
params = X_embedded.ravel()
opt_args = {}
opt_args = {"n_iter": 50, "momentum": 0.5, "it": 0,
"learning_rate": self.learning_rate,
"verbose": self.verbose, "n_iter_check": 25,
"kwargs": dict(skip_num_points=skip_num_points)}
if self.method == 'barnes_hut':
m = "Must provide an array of neighbors to use Barnes-Hut"
assert neighbors is not None, m
obj_func = _kl_divergence_bh
objective_error = _kl_divergence_error
sP = squareform(P).astype(np.float32)
neighbors = neighbors.astype(np.int64)
args = [sP, neighbors, degrees_of_freedom, n_samples,
self.n_components]
opt_args['args'] = args
opt_args['min_grad_norm'] = 1e-3
opt_args['n_iter_without_progress'] = 30
# Don't always calculate the cost since that calculation
# can be nearly as expensive as the gradient
opt_args['objective_error'] = objective_error
opt_args['kwargs']['angle'] = self.angle
opt_args['kwargs']['verbose'] = self.verbose
else:
obj_func = _kl_divergence
opt_args['args'] = [P, degrees_of_freedom, n_samples,
self.n_components]
opt_args['min_error_diff'] = 0.0
opt_args['min_grad_norm'] = 0.0
# Early exaggeration
P *= self.early_exaggeration
params, error, it = _gradient_descent(obj_func, params, **opt_args)
opt_args['n_iter'] = 100
opt_args['momentum'] = 0.8
opt_args['it'] = it + 1
params, error, it = _gradient_descent(obj_func, params, **opt_args)
if self.verbose:
print("[t-SNE] Error after %d iterations with early "
"exaggeration: %f" % (it + 1, error))
# Save the final number of iterations
self.n_iter_final = it
# Final optimization
P /= self.early_exaggeration
opt_args['n_iter'] = self.n_iter
opt_args['it'] = it + 1
params, error, it = _gradient_descent(obj_func, params, **opt_args)
if self.verbose:
print("[t-SNE] Error after %d iterations: %f" % (it + 1, error))
X_embedded = params.reshape(n_samples, self.n_components)
return X_embedded
def fit_transform(self, X, y=None):
"""Fit X into an embedded space and return that transformed
output.
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row.
Returns
-------
X_new : array, shape (n_samples, n_components)
Embedding of the training data in low-dimensional space.
"""
embedding = self._fit(X)
self.embedding_ = embedding
return self.embedding_
def fit(self, X, y=None):
"""Fit X into an embedded space.
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row. If the method
is 'exact', X may be a sparse matrix of type 'csr', 'csc'
or 'coo'.
"""
self.fit_transform(X)
return self
|
bsd-3-clause
|
maniteja123/numpy
|
numpy/fft/fftpack.py
|
12
|
45846
|
"""
Discrete Fourier Transforms
Routines in this module:
fft(a, n=None, axis=-1)
ifft(a, n=None, axis=-1)
rfft(a, n=None, axis=-1)
irfft(a, n=None, axis=-1)
hfft(a, n=None, axis=-1)
ihfft(a, n=None, axis=-1)
fftn(a, s=None, axes=None)
ifftn(a, s=None, axes=None)
rfftn(a, s=None, axes=None)
irfftn(a, s=None, axes=None)
fft2(a, s=None, axes=(-2,-1))
ifft2(a, s=None, axes=(-2, -1))
rfft2(a, s=None, axes=(-2,-1))
irfft2(a, s=None, axes=(-2, -1))
i = inverse transform
r = transform of purely real data
h = Hermite transform
n = n-dimensional transform
2 = 2-dimensional transform
(Note: 2D routines are just nD routines with different default
behavior.)
The underlying code for these functions is an f2c-translated and modified
version of the FFTPACK routines.
"""
from __future__ import division, absolute_import, print_function
__all__ = ['fft', 'ifft', 'rfft', 'irfft', 'hfft', 'ihfft', 'rfftn',
'irfftn', 'rfft2', 'irfft2', 'fft2', 'ifft2', 'fftn', 'ifftn']
from numpy.core import (array, asarray, zeros, swapaxes, shape, conjugate,
take, sqrt)
from . import fftpack_lite as fftpack
from .helper import _FFTCache
_fft_cache = _FFTCache(max_size_in_mb=100, max_item_count=32)
_real_fft_cache = _FFTCache(max_size_in_mb=100, max_item_count=32)
def _raw_fft(a, n=None, axis=-1, init_function=fftpack.cffti,
work_function=fftpack.cfftf, fft_cache=_fft_cache):
a = asarray(a)
if n is None:
n = a.shape[axis]
if n < 1:
raise ValueError("Invalid number of FFT data points (%d) specified."
% n)
# We have to ensure that only a single thread can access a wsave array
# at any given time. Thus we remove it from the cache and insert it
# again after it has been used. Multiple threads might create multiple
# copies of the wsave array. This is intentional and a limitation of
# the current C code.
wsave = fft_cache.pop_twiddle_factors(n)
if wsave is None:
wsave = init_function(n)
if a.shape[axis] != n:
s = list(a.shape)
if s[axis] > n:
index = [slice(None)]*len(s)
index[axis] = slice(0, n)
a = a[index]
else:
index = [slice(None)]*len(s)
index[axis] = slice(0, s[axis])
s[axis] = n
z = zeros(s, a.dtype.char)
z[index] = a
a = z
if axis != -1:
a = swapaxes(a, axis, -1)
r = work_function(a, wsave)
if axis != -1:
r = swapaxes(r, axis, -1)
# As soon as we put wsave back into the cache, another thread could pick it
# up and start using it, so we must not do this until after we're
# completely done using it ourselves.
fft_cache.put_twiddle_factors(n, wsave)
return r
def _unitary(norm):
if norm not in (None, "ortho"):
raise ValueError("Invalid norm value %s, should be None or \"ortho\"."
% norm)
return norm is not None
def fft(a, n=None, axis=-1, norm=None):
"""
Compute the one-dimensional discrete Fourier Transform.
This function computes the one-dimensional *n*-point discrete Fourier
Transform (DFT) with the efficient Fast Fourier Transform (FFT)
algorithm [CT].
Parameters
----------
a : array_like
Input array, can be complex.
n : int, optional
Length of the transformed axis of the output.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input along the axis specified by `axis` is used.
axis : int, optional
Axis over which to compute the FFT. If not given, the last axis is
used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
Raises
------
IndexError
if `axes` is larger than the last axis of `a`.
See Also
--------
numpy.fft : for definition of the DFT and conventions used.
ifft : The inverse of `fft`.
fft2 : The two-dimensional FFT.
fftn : The *n*-dimensional FFT.
rfftn : The *n*-dimensional FFT of real input.
fftfreq : Frequency bins for given FFT parameters.
Notes
-----
FFT (Fast Fourier Transform) refers to a way the discrete Fourier
Transform (DFT) can be calculated efficiently, by using symmetries in the
calculated terms. The symmetry is highest when `n` is a power of 2, and
the transform is therefore most efficient for these sizes.
The DFT is defined, with the conventions used in this implementation, in
the documentation for the `numpy.fft` module.
References
----------
.. [CT] Cooley, James W., and John W. Tukey, 1965, "An algorithm for the
machine calculation of complex Fourier series," *Math. Comput.*
19: 297-301.
Examples
--------
>>> np.fft.fft(np.exp(2j * np.pi * np.arange(8) / 8))
array([ -3.44505240e-16 +1.14383329e-17j,
8.00000000e+00 -5.71092652e-15j,
2.33482938e-16 +1.22460635e-16j,
1.64863782e-15 +1.77635684e-15j,
9.95839695e-17 +2.33482938e-16j,
0.00000000e+00 +1.66837030e-15j,
1.14383329e-17 +1.22460635e-16j,
-1.64863782e-15 +1.77635684e-15j])
In this example, real input has an FFT which is Hermitian, i.e., symmetric
in the real part and anti-symmetric in the imaginary part, as described in
the `numpy.fft` documentation:
>>> import matplotlib.pyplot as plt
>>> t = np.arange(256)
>>> sp = np.fft.fft(np.sin(t))
>>> freq = np.fft.fftfreq(t.shape[-1])
>>> plt.plot(freq, sp.real, freq, sp.imag)
[<matplotlib.lines.Line2D object at 0x...>, <matplotlib.lines.Line2D object at 0x...>]
>>> plt.show()
"""
a = asarray(a).astype(complex, copy=False)
if n is None:
n = a.shape[axis]
output = _raw_fft(a, n, axis, fftpack.cffti, fftpack.cfftf, _fft_cache)
if _unitary(norm):
output *= 1 / sqrt(n)
return output
def ifft(a, n=None, axis=-1, norm=None):
"""
Compute the one-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the one-dimensional *n*-point
discrete Fourier transform computed by `fft`. In other words,
``ifft(fft(a)) == a`` to within numerical accuracy.
For a general description of the algorithm and definitions,
see `numpy.fft`.
The input should be ordered in the same way as is returned by `fft`,
i.e.,
* ``a[0]`` should contain the zero frequency term,
* ``a[1:n//2]`` should contain the positive-frequency terms,
* ``a[n//2 + 1:]`` should contain the negative-frequency terms, in
increasing order starting from the most negative frequency.
For an even number of input points, ``A[n//2]`` represents the sum of
the values at the positive and negative Nyquist frequencies, as the two
are aliased together. See `numpy.fft` for details.
Parameters
----------
a : array_like
Input array, can be complex.
n : int, optional
Length of the transformed axis of the output.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input along the axis specified by `axis` is used.
See notes about padding issues.
axis : int, optional
Axis over which to compute the inverse DFT. If not given, the last
axis is used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
Raises
------
IndexError
If `axes` is larger than the last axis of `a`.
See Also
--------
numpy.fft : An introduction, with definitions and general explanations.
fft : The one-dimensional (forward) FFT, of which `ifft` is the inverse
ifft2 : The two-dimensional inverse FFT.
ifftn : The n-dimensional inverse FFT.
Notes
-----
If the input parameter `n` is larger than the size of the input, the input
is padded by appending zeros at the end. Even though this is the common
approach, it might lead to surprising results. If a different padding is
desired, it must be performed before calling `ifft`.
Examples
--------
>>> np.fft.ifft([0, 4, 0, 0])
array([ 1.+0.j, 0.+1.j, -1.+0.j, 0.-1.j])
Create and plot a band-limited signal with random phases:
>>> import matplotlib.pyplot as plt
>>> t = np.arange(400)
>>> n = np.zeros((400,), dtype=complex)
>>> n[40:60] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20,)))
>>> s = np.fft.ifft(n)
>>> plt.plot(t, s.real, 'b-', t, s.imag, 'r--')
...
>>> plt.legend(('real', 'imaginary'))
...
>>> plt.show()
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=complex)
if n is None:
n = a.shape[axis]
unitary = _unitary(norm)
output = _raw_fft(a, n, axis, fftpack.cffti, fftpack.cfftb, _fft_cache)
return output * (1 / (sqrt(n) if unitary else n))
def rfft(a, n=None, axis=-1, norm=None):
"""
Compute the one-dimensional discrete Fourier Transform for real input.
This function computes the one-dimensional *n*-point discrete Fourier
Transform (DFT) of a real-valued array by means of an efficient algorithm
called the Fast Fourier Transform (FFT).
Parameters
----------
a : array_like
Input array
n : int, optional
Number of points along transformation axis in the input to use.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input along the axis specified by `axis` is used.
axis : int, optional
Axis over which to compute the FFT. If not given, the last axis is
used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
If `n` is even, the length of the transformed axis is ``(n/2)+1``.
If `n` is odd, the length is ``(n+1)/2``.
Raises
------
IndexError
If `axis` is larger than the last axis of `a`.
See Also
--------
numpy.fft : For definition of the DFT and conventions used.
irfft : The inverse of `rfft`.
fft : The one-dimensional FFT of general (complex) input.
fftn : The *n*-dimensional FFT.
rfftn : The *n*-dimensional FFT of real input.
Notes
-----
When the DFT is computed for purely real input, the output is
Hermitian-symmetric, i.e. the negative frequency terms are just the complex
conjugates of the corresponding positive-frequency terms, and the
negative-frequency terms are therefore redundant. This function does not
compute the negative frequency terms, and the length of the transformed
axis of the output is therefore ``n//2 + 1``.
When ``A = rfft(a)`` and fs is the sampling frequency, ``A[0]`` contains
the zero-frequency term 0*fs, which is real due to Hermitian symmetry.
If `n` is even, ``A[-1]`` contains the term representing both positive
and negative Nyquist frequency (+fs/2 and -fs/2), and must also be purely
real. If `n` is odd, there is no term at fs/2; ``A[-1]`` contains
the largest positive frequency (fs/2*(n-1)/n), and is complex in the
general case.
If the input `a` contains an imaginary part, it is silently discarded.
Examples
--------
>>> np.fft.fft([0, 1, 0, 0])
array([ 1.+0.j, 0.-1.j, -1.+0.j, 0.+1.j])
>>> np.fft.rfft([0, 1, 0, 0])
array([ 1.+0.j, 0.-1.j, -1.+0.j])
Notice how the final element of the `fft` output is the complex conjugate
of the second element, for real input. For `rfft`, this symmetry is
exploited to compute only the non-negative frequency terms.
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=float)
output = _raw_fft(a, n, axis, fftpack.rffti, fftpack.rfftf,
_real_fft_cache)
if _unitary(norm):
output *= 1 / sqrt(a.shape[axis])
return output
def irfft(a, n=None, axis=-1, norm=None):
"""
Compute the inverse of the n-point DFT for real input.
This function computes the inverse of the one-dimensional *n*-point
discrete Fourier Transform of real input computed by `rfft`.
In other words, ``irfft(rfft(a), len(a)) == a`` to within numerical
accuracy. (See Notes below for why ``len(a)`` is necessary here.)
The input is expected to be in the form returned by `rfft`, i.e. the
real zero-frequency term followed by the complex positive frequency terms
in order of increasing frequency. Since the discrete Fourier Transform of
real input is Hermitian-symmetric, the negative frequency terms are taken
to be the complex conjugates of the corresponding positive frequency terms.
Parameters
----------
a : array_like
The input array.
n : int, optional
Length of the transformed axis of the output.
For `n` output points, ``n//2+1`` input points are necessary. If the
input is longer than this, it is cropped. If it is shorter than this,
it is padded with zeros. If `n` is not given, it is determined from
the length of the input along the axis specified by `axis`.
axis : int, optional
Axis over which to compute the inverse FFT. If not given, the last
axis is used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
The length of the transformed axis is `n`, or, if `n` is not given,
``2*(m-1)`` where ``m`` is the length of the transformed axis of the
input. To get an odd number of output points, `n` must be specified.
Raises
------
IndexError
If `axis` is larger than the last axis of `a`.
See Also
--------
numpy.fft : For definition of the DFT and conventions used.
rfft : The one-dimensional FFT of real input, of which `irfft` is inverse.
fft : The one-dimensional FFT.
irfft2 : The inverse of the two-dimensional FFT of real input.
irfftn : The inverse of the *n*-dimensional FFT of real input.
Notes
-----
Returns the real valued `n`-point inverse discrete Fourier transform
of `a`, where `a` contains the non-negative frequency terms of a
Hermitian-symmetric sequence. `n` is the length of the result, not the
input.
If you specify an `n` such that `a` must be zero-padded or truncated, the
extra/removed values will be added/removed at high frequencies. One can
thus resample a series to `m` points via Fourier interpolation by:
``a_resamp = irfft(rfft(a), m)``.
Examples
--------
>>> np.fft.ifft([1, -1j, -1, 1j])
array([ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j])
>>> np.fft.irfft([1, -1j, -1])
array([ 0., 1., 0., 0.])
Notice how the last term in the input to the ordinary `ifft` is the
complex conjugate of the second term, and the output has zero imaginary
part everywhere. When calling `irfft`, the negative frequencies are not
specified, and the output array is purely real.
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=complex)
if n is None:
n = (a.shape[axis] - 1) * 2
unitary = _unitary(norm)
output = _raw_fft(a, n, axis, fftpack.rffti, fftpack.rfftb,
_real_fft_cache)
return output * (1 / (sqrt(n) if unitary else n))
def hfft(a, n=None, axis=-1, norm=None):
"""
Compute the FFT of a signal which has Hermitian symmetry (real spectrum).
Parameters
----------
a : array_like
The input array.
n : int, optional
Length of the transformed axis of the output.
For `n` output points, ``n//2+1`` input points are necessary. If the
input is longer than this, it is cropped. If it is shorter than this,
it is padded with zeros. If `n` is not given, it is determined from
the length of the input along the axis specified by `axis`.
axis : int, optional
Axis over which to compute the FFT. If not given, the last
axis is used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
The length of the transformed axis is `n`, or, if `n` is not given,
``2*(m-1)`` where ``m`` is the length of the transformed axis of the
input. To get an odd number of output points, `n` must be specified.
Raises
------
IndexError
If `axis` is larger than the last axis of `a`.
See also
--------
rfft : Compute the one-dimensional FFT for real input.
ihfft : The inverse of `hfft`.
Notes
-----
`hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the
opposite case: here the signal has Hermitian symmetry in the time domain
and is real in the frequency domain. So here it's `hfft` for which
you must supply the length of the result if it is to be odd:
``ihfft(hfft(a), len(a)) == a``, within numerical accuracy.
Examples
--------
>>> signal = np.array([1, 2, 3, 4, 3, 2])
>>> np.fft.fft(signal)
array([ 15.+0.j, -4.+0.j, 0.+0.j, -1.-0.j, 0.+0.j, -4.+0.j])
>>> np.fft.hfft(signal[:4]) # Input first half of signal
array([ 15., -4., 0., -1., 0., -4.])
>>> np.fft.hfft(signal, 6) # Input entire signal and truncate
array([ 15., -4., 0., -1., 0., -4.])
>>> signal = np.array([[1, 1.j], [-1.j, 2]])
>>> np.conj(signal.T) - signal # check Hermitian symmetry
array([[ 0.-0.j, 0.+0.j],
[ 0.+0.j, 0.-0.j]])
>>> freq_spectrum = np.fft.hfft(signal)
>>> freq_spectrum
array([[ 1., 1.],
[ 2., -2.]])
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=complex)
if n is None:
n = (a.shape[axis] - 1) * 2
unitary = _unitary(norm)
return irfft(conjugate(a), n, axis) * (sqrt(n) if unitary else n)
def ihfft(a, n=None, axis=-1, norm=None):
"""
Compute the inverse FFT of a signal which has Hermitian symmetry.
Parameters
----------
a : array_like
Input array.
n : int, optional
Length of the inverse FFT.
Number of points along transformation axis in the input to use.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input along the axis specified by `axis` is used.
axis : int, optional
Axis over which to compute the inverse FFT. If not given, the last
axis is used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
If `n` is even, the length of the transformed axis is ``(n/2)+1``.
If `n` is odd, the length is ``(n+1)/2``.
See also
--------
hfft, irfft
Notes
-----
`hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the
opposite case: here the signal has Hermitian symmetry in the time domain
and is real in the frequency domain. So here it's `hfft` for which
you must supply the length of the result if it is to be odd:
``ihfft(hfft(a), len(a)) == a``, within numerical accuracy.
Examples
--------
>>> spectrum = np.array([ 15, -4, 0, -1, 0, -4])
>>> np.fft.ifft(spectrum)
array([ 1.+0.j, 2.-0.j, 3.+0.j, 4.+0.j, 3.+0.j, 2.-0.j])
>>> np.fft.ihfft(spectrum)
array([ 1.-0.j, 2.-0.j, 3.-0.j, 4.-0.j])
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=float)
if n is None:
n = a.shape[axis]
unitary = _unitary(norm)
output = conjugate(rfft(a, n, axis))
return output * (1 / (sqrt(n) if unitary else n))
def _cook_nd_args(a, s=None, axes=None, invreal=0):
if s is None:
shapeless = 1
if axes is None:
s = list(a.shape)
else:
s = take(a.shape, axes)
else:
shapeless = 0
s = list(s)
if axes is None:
axes = list(range(-len(s), 0))
if len(s) != len(axes):
raise ValueError("Shape and axes have different lengths.")
if invreal and shapeless:
s[-1] = (a.shape[axes[-1]] - 1) * 2
return s, axes
def _raw_fftnd(a, s=None, axes=None, function=fft, norm=None):
a = asarray(a)
s, axes = _cook_nd_args(a, s, axes)
itl = list(range(len(axes)))
itl.reverse()
for ii in itl:
a = function(a, n=s[ii], axis=axes[ii], norm=norm)
return a
def fftn(a, s=None, axes=None, norm=None):
"""
Compute the N-dimensional discrete Fourier Transform.
This function computes the *N*-dimensional discrete Fourier Transform over
any number of axes in an *M*-dimensional array by means of the Fast Fourier
Transform (FFT).
Parameters
----------
a : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
This corresponds to ``n`` for ``fft(x, n)``.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
Repeated indices in `axes` means that the transform over that axis is
performed multiple times.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` and `a`,
as explained in the parameters section above.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
ifftn : The inverse of `fftn`, the inverse *n*-dimensional FFT.
fft : The one-dimensional FFT, with definitions and conventions used.
rfftn : The *n*-dimensional FFT of real input.
fft2 : The two-dimensional FFT.
fftshift : Shifts zero-frequency terms to centre of array
Notes
-----
The output, analogously to `fft`, contains the term for zero frequency in
the low-order corner of all axes, the positive frequency terms in the
first half of all axes, the term for the Nyquist frequency in the middle
of all axes and the negative frequency terms in the second half of all
axes, in order of decreasingly negative frequency.
See `numpy.fft` for details, definitions and conventions used.
Examples
--------
>>> a = np.mgrid[:3, :3, :3][0]
>>> np.fft.fftn(a, axes=(1, 2))
array([[[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[ 9.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[ 18.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]]])
>>> np.fft.fftn(a, (2, 2), axes=(0, 1))
array([[[ 2.+0.j, 2.+0.j, 2.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[-2.+0.j, -2.+0.j, -2.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]]])
>>> import matplotlib.pyplot as plt
>>> [X, Y] = np.meshgrid(2 * np.pi * np.arange(200) / 12,
... 2 * np.pi * np.arange(200) / 34)
>>> S = np.sin(X) + np.cos(Y) + np.random.uniform(0, 1, X.shape)
>>> FS = np.fft.fftn(S)
>>> plt.imshow(np.log(np.abs(np.fft.fftshift(FS))**2))
<matplotlib.image.AxesImage object at 0x...>
>>> plt.show()
"""
return _raw_fftnd(a, s, axes, fft, norm)
def ifftn(a, s=None, axes=None, norm=None):
"""
Compute the N-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the N-dimensional discrete
Fourier Transform over any number of axes in an M-dimensional array by
means of the Fast Fourier Transform (FFT). In other words,
``ifftn(fftn(a)) == a`` to within numerical accuracy.
For a description of the definitions and conventions used, see `numpy.fft`.
The input, analogously to `ifft`, should be ordered in the same way as is
returned by `fftn`, i.e. it should have the term for zero frequency
in all axes in the low-order corner, the positive frequency terms in the
first half of all axes, the term for the Nyquist frequency in the middle
of all axes and the negative frequency terms in the second half of all
axes, in order of decreasingly negative frequency.
Parameters
----------
a : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
This corresponds to ``n`` for ``ifft(x, n)``.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used. See notes for issue on `ifft` zero padding.
axes : sequence of ints, optional
Axes over which to compute the IFFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
Repeated indices in `axes` means that the inverse transform over that
axis is performed multiple times.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` or `a`,
as explained in the parameters section above.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
fftn : The forward *n*-dimensional FFT, of which `ifftn` is the inverse.
ifft : The one-dimensional inverse FFT.
ifft2 : The two-dimensional inverse FFT.
ifftshift : Undoes `fftshift`, shifts zero-frequency terms to beginning
of array.
Notes
-----
See `numpy.fft` for definitions and conventions used.
Zero-padding, analogously with `ifft`, is performed by appending zeros to
the input along the specified dimension. Although this is the common
approach, it might lead to surprising results. If another form of zero
padding is desired, it must be performed before `ifftn` is called.
Examples
--------
>>> a = np.eye(4)
>>> np.fft.ifftn(np.fft.fftn(a, axes=(0,)), axes=(1,))
array([[ 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j]])
Create and plot an image with band-limited frequency content:
>>> import matplotlib.pyplot as plt
>>> n = np.zeros((200,200), dtype=complex)
>>> n[60:80, 20:40] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20, 20)))
>>> im = np.fft.ifftn(n).real
>>> plt.imshow(im)
<matplotlib.image.AxesImage object at 0x...>
>>> plt.show()
"""
return _raw_fftnd(a, s, axes, ifft, norm)
def fft2(a, s=None, axes=(-2, -1), norm=None):
"""
Compute the 2-dimensional discrete Fourier Transform
This function computes the *n*-dimensional discrete Fourier Transform
over any axes in an *M*-dimensional array by means of the
Fast Fourier Transform (FFT). By default, the transform is computed over
the last two axes of the input array, i.e., a 2-dimensional FFT.
Parameters
----------
a : array_like
Input array, can be complex
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
This corresponds to ``n`` for ``fft(x, n)``.
Along each axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last two
axes are used. A repeated index in `axes` means the transform over
that axis is performed multiple times. A one-element sequence means
that a one-dimensional FFT is performed.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or the last two axes if `axes` is not given.
Raises
------
ValueError
If `s` and `axes` have different length, or `axes` not given and
``len(s) != 2``.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
ifft2 : The inverse two-dimensional FFT.
fft : The one-dimensional FFT.
fftn : The *n*-dimensional FFT.
fftshift : Shifts zero-frequency terms to the center of the array.
For two-dimensional input, swaps first and third quadrants, and second
and fourth quadrants.
Notes
-----
`fft2` is just `fftn` with a different default for `axes`.
The output, analogously to `fft`, contains the term for zero frequency in
the low-order corner of the transformed axes, the positive frequency terms
in the first half of these axes, the term for the Nyquist frequency in the
middle of the axes and the negative frequency terms in the second half of
the axes, in order of decreasingly negative frequency.
See `fftn` for details and a plotting example, and `numpy.fft` for
definitions and conventions used.
Examples
--------
>>> a = np.mgrid[:5, :5][0]
>>> np.fft.fft2(a)
array([[ 50.0 +0.j , 0.0 +0.j , 0.0 +0.j ,
0.0 +0.j , 0.0 +0.j ],
[-12.5+17.20477401j, 0.0 +0.j , 0.0 +0.j ,
0.0 +0.j , 0.0 +0.j ],
[-12.5 +4.0614962j , 0.0 +0.j , 0.0 +0.j ,
0.0 +0.j , 0.0 +0.j ],
[-12.5 -4.0614962j , 0.0 +0.j , 0.0 +0.j ,
0.0 +0.j , 0.0 +0.j ],
[-12.5-17.20477401j, 0.0 +0.j , 0.0 +0.j ,
0.0 +0.j , 0.0 +0.j ]])
"""
return _raw_fftnd(a, s, axes, fft, norm)
def ifft2(a, s=None, axes=(-2, -1), norm=None):
"""
Compute the 2-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the 2-dimensional discrete Fourier
Transform over any number of axes in an M-dimensional array by means of
the Fast Fourier Transform (FFT). In other words, ``ifft2(fft2(a)) == a``
to within numerical accuracy. By default, the inverse transform is
computed over the last two axes of the input array.
The input, analogously to `ifft`, should be ordered in the same way as is
returned by `fft2`, i.e. it should have the term for zero frequency
in the low-order corner of the two axes, the positive frequency terms in
the first half of these axes, the term for the Nyquist frequency in the
middle of the axes and the negative frequency terms in the second half of
both axes, in order of decreasingly negative frequency.
Parameters
----------
a : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each axis) of the output (``s[0]`` refers to axis 0,
``s[1]`` to axis 1, etc.). This corresponds to `n` for ``ifft(x, n)``.
Along each axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used. See notes for issue on `ifft` zero padding.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last two
axes are used. A repeated index in `axes` means the transform over
that axis is performed multiple times. A one-element sequence means
that a one-dimensional FFT is performed.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or the last two axes if `axes` is not given.
Raises
------
ValueError
If `s` and `axes` have different length, or `axes` not given and
``len(s) != 2``.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
fft2 : The forward 2-dimensional FFT, of which `ifft2` is the inverse.
ifftn : The inverse of the *n*-dimensional FFT.
fft : The one-dimensional FFT.
ifft : The one-dimensional inverse FFT.
Notes
-----
`ifft2` is just `ifftn` with a different default for `axes`.
See `ifftn` for details and a plotting example, and `numpy.fft` for
definition and conventions used.
Zero-padding, analogously with `ifft`, is performed by appending zeros to
the input along the specified dimension. Although this is the common
approach, it might lead to surprising results. If another form of zero
padding is desired, it must be performed before `ifft2` is called.
Examples
--------
>>> a = 4 * np.eye(4)
>>> np.fft.ifft2(a)
array([[ 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j],
[ 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j],
[ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j]])
"""
return _raw_fftnd(a, s, axes, ifft, norm)
def rfftn(a, s=None, axes=None, norm=None):
"""
Compute the N-dimensional discrete Fourier Transform for real input.
This function computes the N-dimensional discrete Fourier Transform over
any number of axes in an M-dimensional real array by means of the Fast
Fourier Transform (FFT). By default, all axes are transformed, with the
real transform performed over the last axis, while the remaining
transforms are complex.
Parameters
----------
a : array_like
Input array, taken to be real.
s : sequence of ints, optional
Shape (length along each transformed axis) to use from the input.
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
The final element of `s` corresponds to `n` for ``rfft(x, n)``, while
for the remaining axes, it corresponds to `n` for ``fft(x, n)``.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` and `a`,
as explained in the parameters section above.
The length of the last axis transformed will be ``s[-1]//2+1``,
while the remaining transformed axes will have lengths according to
`s`, or unchanged from the input.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
irfftn : The inverse of `rfftn`, i.e. the inverse of the n-dimensional FFT
of real input.
fft : The one-dimensional FFT, with definitions and conventions used.
rfft : The one-dimensional FFT of real input.
fftn : The n-dimensional FFT.
rfft2 : The two-dimensional FFT of real input.
Notes
-----
The transform for real input is performed over the last transformation
axis, as by `rfft`, then the transform over the remaining axes is
performed as by `fftn`. The order of the output is as for `rfft` for the
final transformation axis, and as for `fftn` for the remaining
transformation axes.
See `fft` for details, definitions and conventions used.
Examples
--------
>>> a = np.ones((2, 2, 2))
>>> np.fft.rfftn(a)
array([[[ 8.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j]],
[[ 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j]]])
>>> np.fft.rfftn(a, axes=(2, 0))
array([[[ 4.+0.j, 0.+0.j],
[ 4.+0.j, 0.+0.j]],
[[ 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j]]])
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=float)
s, axes = _cook_nd_args(a, s, axes)
a = rfft(a, s[-1], axes[-1], norm)
for ii in range(len(axes)-1):
a = fft(a, s[ii], axes[ii], norm)
return a
def rfft2(a, s=None, axes=(-2, -1), norm=None):
"""
Compute the 2-dimensional FFT of a real array.
Parameters
----------
a : array
Input array, taken to be real.
s : sequence of ints, optional
Shape of the FFT.
axes : sequence of ints, optional
Axes over which to compute the FFT.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : ndarray
The result of the real 2-D FFT.
See Also
--------
rfftn : Compute the N-dimensional discrete Fourier Transform for real
input.
Notes
-----
This is really just `rfftn` with different default behavior.
For more details see `rfftn`.
"""
return rfftn(a, s, axes, norm)
def irfftn(a, s=None, axes=None, norm=None):
"""
Compute the inverse of the N-dimensional FFT of real input.
This function computes the inverse of the N-dimensional discrete
Fourier Transform for real input over any number of axes in an
M-dimensional array by means of the Fast Fourier Transform (FFT). In
other words, ``irfftn(rfftn(a), a.shape) == a`` to within numerical
accuracy. (The ``a.shape`` is necessary like ``len(a)`` is for `irfft`,
and for the same reason.)
The input should be ordered in the same way as is returned by `rfftn`,
i.e. as for `irfft` for the final transformation axis, and as for `ifftn`
along all the other axes.
Parameters
----------
a : array_like
Input array.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). `s` is also the
number of input points used along this axis, except for the last axis,
where ``s[-1]//2+1`` points of the input are used.
Along any axis, if the shape indicated by `s` is smaller than that of
the input, the input is cropped. If it is larger, the input is padded
with zeros. If `s` is not given, the shape of the input along the
axes specified by `axes` is used.
axes : sequence of ints, optional
Axes over which to compute the inverse FFT. If not given, the last
`len(s)` axes are used, or all axes if `s` is also not specified.
Repeated indices in `axes` means that the inverse transform over that
axis is performed multiple times.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` or `a`,
as explained in the parameters section above.
The length of each transformed axis is as given by the corresponding
element of `s`, or the length of the input in every axis except for the
last one if `s` is not given. In the final transformed axis the length
of the output when `s` is not given is ``2*(m-1)`` where ``m`` is the
length of the final transformed axis of the input. To get an odd
number of output points in the final axis, `s` must be specified.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
rfftn : The forward n-dimensional FFT of real input,
of which `ifftn` is the inverse.
fft : The one-dimensional FFT, with definitions and conventions used.
irfft : The inverse of the one-dimensional FFT of real input.
irfft2 : The inverse of the two-dimensional FFT of real input.
Notes
-----
See `fft` for definitions and conventions used.
See `rfft` for definitions and conventions used for real input.
Examples
--------
>>> a = np.zeros((3, 2, 2))
>>> a[0, 0, 0] = 3 * 2 * 2
>>> np.fft.irfftn(a)
array([[[ 1., 1.],
[ 1., 1.]],
[[ 1., 1.],
[ 1., 1.]],
[[ 1., 1.],
[ 1., 1.]]])
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=complex)
s, axes = _cook_nd_args(a, s, axes, invreal=1)
for ii in range(len(axes)-1):
a = ifft(a, s[ii], axes[ii], norm)
a = irfft(a, s[-1], axes[-1], norm)
return a
def irfft2(a, s=None, axes=(-2, -1), norm=None):
"""
Compute the 2-dimensional inverse FFT of a real array.
Parameters
----------
a : array_like
The input array
s : sequence of ints, optional
Shape of the inverse FFT.
axes : sequence of ints, optional
The axes over which to compute the inverse fft.
Default is the last two axes.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : ndarray
The result of the inverse real 2-D FFT.
See Also
--------
irfftn : Compute the inverse of the N-dimensional FFT of real input.
Notes
-----
This is really `irfftn` with different defaults.
For more details see `irfftn`.
"""
return irfftn(a, s, axes, norm)
|
bsd-3-clause
|
MartinPerez/nistats
|
examples/plot_spm_multimodal_faces.py
|
1
|
3482
|
"""
Minimal script for preprocessing single-subject data (two session)
Author: DOHMATOB Elvis, Bertrand Thirion, 2015
For details on the data, please see:
Henson, R.N., Goshen-Gottstein, Y., Ganel, T., Otten, L.J., Quayle, A.,
Rugg, M.D. Electrophysiological and haemodynamic correlates of face
perception, recognition and priming. Cereb Cortex. 2003 Jul;13(7):793-805.
http://www.dx.doi.org/10.1093/cercor/13.7.793
Note: this example takes a lot of time because the input are lists of 3D images
sampled in different position (encoded by different) affine functions.
"""
print(__doc__)
# standard imports
import numpy as np
from scipy.io import loadmat
import pandas as pd
# imports for GLM business
from nilearn.image import concat_imgs, resample_img, mean_img
from nistats.design_matrix import make_design_matrix
from nistats.glm import FirstLevelGLM
from nistats.datasets import fetch_spm_multimodal_fmri
# fetch spm multimodal_faces data
subject_data = fetch_spm_multimodal_fmri()
# experimental paradigm meta-params
tr = 2.
drift_model = 'Cosine'
hrf_model = 'Canonical With Derivative'
period_cut = 128.
# resample the images
fmri_img = [concat_imgs(subject_data.func1, auto_resample=True),
concat_imgs(subject_data.func2, auto_resample=True)]
affine, shape = fmri_img[0].get_affine(), fmri_img[0].shape
print('Resampling the second image (this takes time)...')
fmri_img[1] = resample_img(fmri_img[1], affine, shape[:3])
# Create mean image for display
mean_image = mean_img(fmri_img)
# make design matrices
design_matrices = []
for idx in range(2):
# build paradigm
n_scans = fmri_img[idx].shape[-1]
timing = loadmat(getattr(subject_data, "trials_ses%i" % (idx + 1)),
squeeze_me=True, struct_as_record=False)
faces_onsets = timing['onsets'][0].ravel()
scrambled_onsets = timing['onsets'][1].ravel()
onsets = np.hstack((faces_onsets, scrambled_onsets))
onsets *= tr # because onsets were reporting in 'scans' units
conditions = (['faces'] * len(faces_onsets) +
['scrambled'] * len(scrambled_onsets))
paradigm = pd.DataFrame({'name': conditions, 'onset': onsets})
# build design matrix
frame_times = np.arange(n_scans) * tr
design_matrix = make_design_matrix(
frame_times, paradigm, hrf_model=hrf_model, drift_model=drift_model,
period_cut=period_cut)
design_matrices.append(design_matrix)
# specify contrasts
contrast_matrix = np.eye(design_matrix.shape[1])
contrasts = dict([(column, contrast_matrix[i])
for i, column in enumerate(design_matrix.columns)])
# more interesting contrasts
contrasts = {
'faces-scrambled': contrasts['faces'] - contrasts['scrambled'],
'scrambled-faces': -contrasts['faces'] + contrasts['scrambled'],
'effects_of_interest': np.vstack((contrasts['faces'],
contrasts['scrambled']))
}
# fit GLM
print('Fitting a GLM')
fmri_glm = FirstLevelGLM(standardize=False).fit(fmri_img, design_matrices)
# compute contrast maps
print('Computing contrasts')
from nilearn import plotting
for contrast_id, contrast_val in contrasts.items():
print("\tcontrast id: %s" % contrast_id)
z_map, = fmri_glm.transform(
[contrast_val] * 2, contrast_name=contrast_id, output_z=True)
plotting.plot_stat_map(
z_map, bg_img=mean_image, threshold=3.0, display_mode='z',
cut_coords=3, black_bg=True, title=contrast_id)
plotting.show()
|
bsd-3-clause
|
louisLouL/pair_trading
|
capstone_env/lib/python3.6/site-packages/pandas/tests/plotting/test_series.py
|
6
|
30296
|
# coding: utf-8
""" Test cases for Series.plot """
import itertools
import pytest
from datetime import datetime
import pandas as pd
from pandas import Series, DataFrame, date_range
from pandas.compat import range, lrange
import pandas.util.testing as tm
from pandas.util.testing import slow
import numpy as np
from numpy.random import randn
import pandas.plotting as plotting
from pandas.tests.plotting.common import (TestPlotBase, _check_plot_works,
_skip_if_no_scipy_gaussian_kde,
_ok_for_gaussian_kde)
tm._skip_module_if_no_mpl()
class TestSeriesPlots(TestPlotBase):
def setup_method(self, method):
TestPlotBase.setup_method(self, method)
import matplotlib as mpl
mpl.rcdefaults()
self.ts = tm.makeTimeSeries()
self.ts.name = 'ts'
self.series = tm.makeStringSeries()
self.series.name = 'series'
self.iseries = tm.makePeriodSeries()
self.iseries.name = 'iseries'
@slow
def test_plot(self):
_check_plot_works(self.ts.plot, label='foo')
_check_plot_works(self.ts.plot, use_index=False)
axes = _check_plot_works(self.ts.plot, rot=0)
self._check_ticks_props(axes, xrot=0)
ax = _check_plot_works(self.ts.plot, style='.', logy=True)
self._check_ax_scales(ax, yaxis='log')
ax = _check_plot_works(self.ts.plot, style='.', logx=True)
self._check_ax_scales(ax, xaxis='log')
ax = _check_plot_works(self.ts.plot, style='.', loglog=True)
self._check_ax_scales(ax, xaxis='log', yaxis='log')
_check_plot_works(self.ts[:10].plot.bar)
_check_plot_works(self.ts.plot.area, stacked=False)
_check_plot_works(self.iseries.plot)
for kind in ['line', 'bar', 'barh', 'kde', 'hist', 'box']:
if not _ok_for_gaussian_kde(kind):
continue
_check_plot_works(self.series[:5].plot, kind=kind)
_check_plot_works(self.series[:10].plot.barh)
ax = _check_plot_works(Series(randn(10)).plot.bar, color='black')
self._check_colors([ax.patches[0]], facecolors=['black'])
# GH 6951
ax = _check_plot_works(self.ts.plot, subplots=True)
self._check_axes_shape(ax, axes_num=1, layout=(1, 1))
ax = _check_plot_works(self.ts.plot, subplots=True, layout=(-1, 1))
self._check_axes_shape(ax, axes_num=1, layout=(1, 1))
ax = _check_plot_works(self.ts.plot, subplots=True, layout=(1, -1))
self._check_axes_shape(ax, axes_num=1, layout=(1, 1))
@slow
def test_plot_figsize_and_title(self):
# figsize and title
ax = self.series.plot(title='Test', figsize=(16, 8))
self._check_text_labels(ax.title, 'Test')
self._check_axes_shape(ax, axes_num=1, layout=(1, 1), figsize=(16, 8))
def test_dont_modify_rcParams(self):
# GH 8242
if self.mpl_ge_1_5_0:
key = 'axes.prop_cycle'
else:
key = 'axes.color_cycle'
colors = self.plt.rcParams[key]
Series([1, 2, 3]).plot()
assert colors == self.plt.rcParams[key]
def test_ts_line_lim(self):
ax = self.ts.plot()
xmin, xmax = ax.get_xlim()
lines = ax.get_lines()
assert xmin == lines[0].get_data(orig=False)[0][0]
assert xmax == lines[0].get_data(orig=False)[0][-1]
tm.close()
ax = self.ts.plot(secondary_y=True)
xmin, xmax = ax.get_xlim()
lines = ax.get_lines()
assert xmin == lines[0].get_data(orig=False)[0][0]
assert xmax == lines[0].get_data(orig=False)[0][-1]
def test_ts_area_lim(self):
ax = self.ts.plot.area(stacked=False)
xmin, xmax = ax.get_xlim()
line = ax.get_lines()[0].get_data(orig=False)[0]
assert xmin == line[0]
assert xmax == line[-1]
tm.close()
# GH 7471
ax = self.ts.plot.area(stacked=False, x_compat=True)
xmin, xmax = ax.get_xlim()
line = ax.get_lines()[0].get_data(orig=False)[0]
assert xmin == line[0]
assert xmax == line[-1]
tm.close()
tz_ts = self.ts.copy()
tz_ts.index = tz_ts.tz_localize('GMT').tz_convert('CET')
ax = tz_ts.plot.area(stacked=False, x_compat=True)
xmin, xmax = ax.get_xlim()
line = ax.get_lines()[0].get_data(orig=False)[0]
assert xmin == line[0]
assert xmax == line[-1]
tm.close()
ax = tz_ts.plot.area(stacked=False, secondary_y=True)
xmin, xmax = ax.get_xlim()
line = ax.get_lines()[0].get_data(orig=False)[0]
assert xmin == line[0]
assert xmax == line[-1]
def test_label(self):
s = Series([1, 2])
ax = s.plot(label='LABEL', legend=True)
self._check_legend_labels(ax, labels=['LABEL'])
self.plt.close()
ax = s.plot(legend=True)
self._check_legend_labels(ax, labels=['None'])
self.plt.close()
# get name from index
s.name = 'NAME'
ax = s.plot(legend=True)
self._check_legend_labels(ax, labels=['NAME'])
self.plt.close()
# override the default
ax = s.plot(legend=True, label='LABEL')
self._check_legend_labels(ax, labels=['LABEL'])
self.plt.close()
# Add lebel info, but don't draw
ax = s.plot(legend=False, label='LABEL')
assert ax.get_legend() is None # Hasn't been drawn
ax.legend() # draw it
self._check_legend_labels(ax, labels=['LABEL'])
def test_line_area_nan_series(self):
values = [1, 2, np.nan, 3]
s = Series(values)
ts = Series(values, index=tm.makeDateIndex(k=4))
for d in [s, ts]:
ax = _check_plot_works(d.plot)
masked = ax.lines[0].get_ydata()
# remove nan for comparison purpose
exp = np.array([1, 2, 3], dtype=np.float64)
tm.assert_numpy_array_equal(np.delete(masked.data, 2), exp)
tm.assert_numpy_array_equal(
masked.mask, np.array([False, False, True, False]))
expected = np.array([1, 2, 0, 3], dtype=np.float64)
ax = _check_plot_works(d.plot, stacked=True)
tm.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected)
ax = _check_plot_works(d.plot.area)
tm.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected)
ax = _check_plot_works(d.plot.area, stacked=False)
tm.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected)
def test_line_use_index_false(self):
s = Series([1, 2, 3], index=['a', 'b', 'c'])
s.index.name = 'The Index'
ax = s.plot(use_index=False)
label = ax.get_xlabel()
assert label == ''
ax2 = s.plot.bar(use_index=False)
label2 = ax2.get_xlabel()
assert label2 == ''
@slow
def test_bar_log(self):
expected = np.array([1., 10., 100., 1000.])
if not self.mpl_le_1_2_1:
expected = np.hstack((.1, expected, 1e4))
ax = Series([200, 500]).plot.bar(log=True)
tm.assert_numpy_array_equal(ax.yaxis.get_ticklocs(), expected)
tm.close()
ax = Series([200, 500]).plot.barh(log=True)
tm.assert_numpy_array_equal(ax.xaxis.get_ticklocs(), expected)
tm.close()
# GH 9905
expected = np.array([1.0e-03, 1.0e-02, 1.0e-01, 1.0e+00])
if not self.mpl_le_1_2_1:
expected = np.hstack((1.0e-04, expected, 1.0e+01))
if self.mpl_ge_2_0_0:
expected = np.hstack((1.0e-05, expected))
ax = Series([0.1, 0.01, 0.001]).plot(log=True, kind='bar')
ymin = 0.0007943282347242822 if self.mpl_ge_2_0_0 else 0.001
ymax = 0.12589254117941673 if self.mpl_ge_2_0_0 else .10000000000000001
res = ax.get_ylim()
tm.assert_almost_equal(res[0], ymin)
tm.assert_almost_equal(res[1], ymax)
tm.assert_numpy_array_equal(ax.yaxis.get_ticklocs(), expected)
tm.close()
ax = Series([0.1, 0.01, 0.001]).plot(log=True, kind='barh')
res = ax.get_xlim()
tm.assert_almost_equal(res[0], ymin)
tm.assert_almost_equal(res[1], ymax)
tm.assert_numpy_array_equal(ax.xaxis.get_ticklocs(), expected)
@slow
def test_bar_ignore_index(self):
df = Series([1, 2, 3, 4], index=['a', 'b', 'c', 'd'])
ax = df.plot.bar(use_index=False)
self._check_text_labels(ax.get_xticklabels(), ['0', '1', '2', '3'])
def test_rotation(self):
df = DataFrame(randn(5, 5))
# Default rot 0
axes = df.plot()
self._check_ticks_props(axes, xrot=0)
axes = df.plot(rot=30)
self._check_ticks_props(axes, xrot=30)
def test_irregular_datetime(self):
rng = date_range('1/1/2000', '3/1/2000')
rng = rng[[0, 1, 2, 3, 5, 9, 10, 11, 12]]
ser = Series(randn(len(rng)), rng)
ax = ser.plot()
xp = datetime(1999, 1, 1).toordinal()
ax.set_xlim('1/1/1999', '1/1/2001')
assert xp == ax.get_xlim()[0]
@slow
def test_pie_series(self):
# if sum of values is less than 1.0, pie handle them as rate and draw
# semicircle.
series = Series(np.random.randint(1, 5),
index=['a', 'b', 'c', 'd', 'e'], name='YLABEL')
ax = _check_plot_works(series.plot.pie)
self._check_text_labels(ax.texts, series.index)
assert ax.get_ylabel() == 'YLABEL'
# without wedge labels
ax = _check_plot_works(series.plot.pie, labels=None)
self._check_text_labels(ax.texts, [''] * 5)
# with less colors than elements
color_args = ['r', 'g', 'b']
ax = _check_plot_works(series.plot.pie, colors=color_args)
color_expected = ['r', 'g', 'b', 'r', 'g']
self._check_colors(ax.patches, facecolors=color_expected)
# with labels and colors
labels = ['A', 'B', 'C', 'D', 'E']
color_args = ['r', 'g', 'b', 'c', 'm']
ax = _check_plot_works(series.plot.pie, labels=labels,
colors=color_args)
self._check_text_labels(ax.texts, labels)
self._check_colors(ax.patches, facecolors=color_args)
# with autopct and fontsize
ax = _check_plot_works(series.plot.pie, colors=color_args,
autopct='%.2f', fontsize=7)
pcts = ['{0:.2f}'.format(s * 100)
for s in series.values / float(series.sum())]
iters = [iter(series.index), iter(pcts)]
expected_texts = list(next(it) for it in itertools.cycle(iters))
self._check_text_labels(ax.texts, expected_texts)
for t in ax.texts:
assert t.get_fontsize() == 7
# includes negative value
with pytest.raises(ValueError):
series = Series([1, 2, 0, 4, -1], index=['a', 'b', 'c', 'd', 'e'])
series.plot.pie()
# includes nan
series = Series([1, 2, np.nan, 4], index=['a', 'b', 'c', 'd'],
name='YLABEL')
ax = _check_plot_works(series.plot.pie)
self._check_text_labels(ax.texts, ['a', 'b', '', 'd'])
def test_pie_nan(self):
s = Series([1, np.nan, 1, 1])
ax = s.plot.pie(legend=True)
expected = ['0', '', '2', '3']
result = [x.get_text() for x in ax.texts]
assert result == expected
@slow
def test_hist_df_kwargs(self):
df = DataFrame(np.random.randn(10, 2))
ax = df.plot.hist(bins=5)
assert len(ax.patches) == 10
@slow
def test_hist_df_with_nonnumerics(self):
# GH 9853
with tm.RNGContext(1):
df = DataFrame(
np.random.randn(10, 4), columns=['A', 'B', 'C', 'D'])
df['E'] = ['x', 'y'] * 5
ax = df.plot.hist(bins=5)
assert len(ax.patches) == 20
ax = df.plot.hist() # bins=10
assert len(ax.patches) == 40
@slow
def test_hist_legacy(self):
_check_plot_works(self.ts.hist)
_check_plot_works(self.ts.hist, grid=False)
_check_plot_works(self.ts.hist, figsize=(8, 10))
# _check_plot_works adds an ax so catch warning. see GH #13188
with tm.assert_produces_warning(UserWarning):
_check_plot_works(self.ts.hist,
by=self.ts.index.month)
with tm.assert_produces_warning(UserWarning):
_check_plot_works(self.ts.hist,
by=self.ts.index.month, bins=5)
fig, ax = self.plt.subplots(1, 1)
_check_plot_works(self.ts.hist, ax=ax)
_check_plot_works(self.ts.hist, ax=ax, figure=fig)
_check_plot_works(self.ts.hist, figure=fig)
tm.close()
fig, (ax1, ax2) = self.plt.subplots(1, 2)
_check_plot_works(self.ts.hist, figure=fig, ax=ax1)
_check_plot_works(self.ts.hist, figure=fig, ax=ax2)
with pytest.raises(ValueError):
self.ts.hist(by=self.ts.index, figure=fig)
@slow
def test_hist_bins_legacy(self):
df = DataFrame(np.random.randn(10, 2))
ax = df.hist(bins=2)[0][0]
assert len(ax.patches) == 2
@slow
def test_hist_layout(self):
df = self.hist_df
with pytest.raises(ValueError):
df.height.hist(layout=(1, 1))
with pytest.raises(ValueError):
df.height.hist(layout=[1, 1])
@slow
def test_hist_layout_with_by(self):
df = self.hist_df
# _check_plot_works adds an ax so catch warning. see GH #13188
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.height.hist,
by=df.gender, layout=(2, 1))
self._check_axes_shape(axes, axes_num=2, layout=(2, 1))
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.height.hist,
by=df.gender, layout=(3, -1))
self._check_axes_shape(axes, axes_num=2, layout=(3, 1))
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.height.hist,
by=df.category, layout=(4, 1))
self._check_axes_shape(axes, axes_num=4, layout=(4, 1))
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.height.hist,
by=df.category, layout=(2, -1))
self._check_axes_shape(axes, axes_num=4, layout=(2, 2))
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.height.hist,
by=df.category, layout=(3, -1))
self._check_axes_shape(axes, axes_num=4, layout=(3, 2))
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.height.hist,
by=df.category, layout=(-1, 4))
self._check_axes_shape(axes, axes_num=4, layout=(1, 4))
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.height.hist,
by=df.classroom, layout=(2, 2))
self._check_axes_shape(axes, axes_num=3, layout=(2, 2))
axes = df.height.hist(by=df.category, layout=(4, 2), figsize=(12, 7))
self._check_axes_shape(axes, axes_num=4, layout=(4, 2),
figsize=(12, 7))
@slow
def test_hist_no_overlap(self):
from matplotlib.pyplot import subplot, gcf
x = Series(randn(2))
y = Series(randn(2))
subplot(121)
x.hist()
subplot(122)
y.hist()
fig = gcf()
axes = fig.axes if self.mpl_ge_1_5_0 else fig.get_axes()
assert len(axes) == 2
@slow
def test_hist_secondary_legend(self):
# GH 9610
df = DataFrame(np.random.randn(30, 4), columns=list('abcd'))
# primary -> secondary
ax = df['a'].plot.hist(legend=True)
df['b'].plot.hist(ax=ax, legend=True, secondary_y=True)
# both legends are dran on left ax
# left and right axis must be visible
self._check_legend_labels(ax, labels=['a', 'b (right)'])
assert ax.get_yaxis().get_visible()
assert ax.right_ax.get_yaxis().get_visible()
tm.close()
# secondary -> secondary
ax = df['a'].plot.hist(legend=True, secondary_y=True)
df['b'].plot.hist(ax=ax, legend=True, secondary_y=True)
# both legends are draw on left ax
# left axis must be invisible, right axis must be visible
self._check_legend_labels(ax.left_ax,
labels=['a (right)', 'b (right)'])
assert not ax.left_ax.get_yaxis().get_visible()
assert ax.get_yaxis().get_visible()
tm.close()
# secondary -> primary
ax = df['a'].plot.hist(legend=True, secondary_y=True)
# right axes is returned
df['b'].plot.hist(ax=ax, legend=True)
# both legends are draw on left ax
# left and right axis must be visible
self._check_legend_labels(ax.left_ax, labels=['a (right)', 'b'])
assert ax.left_ax.get_yaxis().get_visible()
assert ax.get_yaxis().get_visible()
tm.close()
@slow
def test_df_series_secondary_legend(self):
# GH 9779
df = DataFrame(np.random.randn(30, 3), columns=list('abc'))
s = Series(np.random.randn(30), name='x')
# primary -> secondary (without passing ax)
ax = df.plot()
s.plot(legend=True, secondary_y=True)
# both legends are dran on left ax
# left and right axis must be visible
self._check_legend_labels(ax, labels=['a', 'b', 'c', 'x (right)'])
assert ax.get_yaxis().get_visible()
assert ax.right_ax.get_yaxis().get_visible()
tm.close()
# primary -> secondary (with passing ax)
ax = df.plot()
s.plot(ax=ax, legend=True, secondary_y=True)
# both legends are dran on left ax
# left and right axis must be visible
self._check_legend_labels(ax, labels=['a', 'b', 'c', 'x (right)'])
assert ax.get_yaxis().get_visible()
assert ax.right_ax.get_yaxis().get_visible()
tm.close()
# seconcary -> secondary (without passing ax)
ax = df.plot(secondary_y=True)
s.plot(legend=True, secondary_y=True)
# both legends are dran on left ax
# left axis must be invisible and right axis must be visible
expected = ['a (right)', 'b (right)', 'c (right)', 'x (right)']
self._check_legend_labels(ax.left_ax, labels=expected)
assert not ax.left_ax.get_yaxis().get_visible()
assert ax.get_yaxis().get_visible()
tm.close()
# secondary -> secondary (with passing ax)
ax = df.plot(secondary_y=True)
s.plot(ax=ax, legend=True, secondary_y=True)
# both legends are dran on left ax
# left axis must be invisible and right axis must be visible
expected = ['a (right)', 'b (right)', 'c (right)', 'x (right)']
self._check_legend_labels(ax.left_ax, expected)
assert not ax.left_ax.get_yaxis().get_visible()
assert ax.get_yaxis().get_visible()
tm.close()
# secondary -> secondary (with passing ax)
ax = df.plot(secondary_y=True, mark_right=False)
s.plot(ax=ax, legend=True, secondary_y=True)
# both legends are dran on left ax
# left axis must be invisible and right axis must be visible
expected = ['a', 'b', 'c', 'x (right)']
self._check_legend_labels(ax.left_ax, expected)
assert not ax.left_ax.get_yaxis().get_visible()
assert ax.get_yaxis().get_visible()
tm.close()
@slow
def test_plot_fails_with_dupe_color_and_style(self):
x = Series(randn(2))
with pytest.raises(ValueError):
x.plot(style='k--', color='k')
@slow
def test_hist_kde(self):
ax = self.ts.plot.hist(logy=True)
self._check_ax_scales(ax, yaxis='log')
xlabels = ax.get_xticklabels()
# ticks are values, thus ticklabels are blank
self._check_text_labels(xlabels, [''] * len(xlabels))
ylabels = ax.get_yticklabels()
self._check_text_labels(ylabels, [''] * len(ylabels))
tm._skip_if_no_scipy()
_skip_if_no_scipy_gaussian_kde()
_check_plot_works(self.ts.plot.kde)
_check_plot_works(self.ts.plot.density)
ax = self.ts.plot.kde(logy=True)
self._check_ax_scales(ax, yaxis='log')
xlabels = ax.get_xticklabels()
self._check_text_labels(xlabels, [''] * len(xlabels))
ylabels = ax.get_yticklabels()
self._check_text_labels(ylabels, [''] * len(ylabels))
@slow
def test_kde_kwargs(self):
tm._skip_if_no_scipy()
_skip_if_no_scipy_gaussian_kde()
from numpy import linspace
_check_plot_works(self.ts.plot.kde, bw_method=.5,
ind=linspace(-100, 100, 20))
_check_plot_works(self.ts.plot.density, bw_method=.5,
ind=linspace(-100, 100, 20))
ax = self.ts.plot.kde(logy=True, bw_method=.5,
ind=linspace(-100, 100, 20))
self._check_ax_scales(ax, yaxis='log')
self._check_text_labels(ax.yaxis.get_label(), 'Density')
@slow
def test_kde_missing_vals(self):
tm._skip_if_no_scipy()
_skip_if_no_scipy_gaussian_kde()
s = Series(np.random.uniform(size=50))
s[0] = np.nan
axes = _check_plot_works(s.plot.kde)
# gh-14821: check if the values have any missing values
assert any(~np.isnan(axes.lines[0].get_xdata()))
@slow
def test_hist_kwargs(self):
ax = self.ts.plot.hist(bins=5)
assert len(ax.patches) == 5
self._check_text_labels(ax.yaxis.get_label(), 'Frequency')
tm.close()
if self.mpl_ge_1_3_1:
ax = self.ts.plot.hist(orientation='horizontal')
self._check_text_labels(ax.xaxis.get_label(), 'Frequency')
tm.close()
ax = self.ts.plot.hist(align='left', stacked=True)
tm.close()
@slow
def test_hist_kde_color(self):
ax = self.ts.plot.hist(logy=True, bins=10, color='b')
self._check_ax_scales(ax, yaxis='log')
assert len(ax.patches) == 10
self._check_colors(ax.patches, facecolors=['b'] * 10)
tm._skip_if_no_scipy()
_skip_if_no_scipy_gaussian_kde()
ax = self.ts.plot.kde(logy=True, color='r')
self._check_ax_scales(ax, yaxis='log')
lines = ax.get_lines()
assert len(lines) == 1
self._check_colors(lines, ['r'])
@slow
def test_boxplot_series(self):
ax = self.ts.plot.box(logy=True)
self._check_ax_scales(ax, yaxis='log')
xlabels = ax.get_xticklabels()
self._check_text_labels(xlabels, [self.ts.name])
ylabels = ax.get_yticklabels()
self._check_text_labels(ylabels, [''] * len(ylabels))
@slow
def test_kind_both_ways(self):
s = Series(range(3))
kinds = (plotting._core._common_kinds +
plotting._core._series_kinds)
for kind in kinds:
if not _ok_for_gaussian_kde(kind):
continue
s.plot(kind=kind)
getattr(s.plot, kind)()
@slow
def test_invalid_plot_data(self):
s = Series(list('abcd'))
for kind in plotting._core._common_kinds:
if not _ok_for_gaussian_kde(kind):
continue
with pytest.raises(TypeError):
s.plot(kind=kind)
@slow
def test_valid_object_plot(self):
s = Series(lrange(10), dtype=object)
for kind in plotting._core._common_kinds:
if not _ok_for_gaussian_kde(kind):
continue
_check_plot_works(s.plot, kind=kind)
def test_partially_invalid_plot_data(self):
s = Series(['a', 'b', 1.0, 2])
for kind in plotting._core._common_kinds:
if not _ok_for_gaussian_kde(kind):
continue
with pytest.raises(TypeError):
s.plot(kind=kind)
def test_invalid_kind(self):
s = Series([1, 2])
with pytest.raises(ValueError):
s.plot(kind='aasdf')
@slow
def test_dup_datetime_index_plot(self):
dr1 = date_range('1/1/2009', periods=4)
dr2 = date_range('1/2/2009', periods=4)
index = dr1.append(dr2)
values = randn(index.size)
s = Series(values, index=index)
_check_plot_works(s.plot)
@slow
def test_errorbar_plot(self):
s = Series(np.arange(10), name='x')
s_err = np.random.randn(10)
d_err = DataFrame(randn(10, 2), index=s.index, columns=['x', 'y'])
# test line and bar plots
kinds = ['line', 'bar']
for kind in kinds:
ax = _check_plot_works(s.plot, yerr=Series(s_err), kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=1)
ax = _check_plot_works(s.plot, yerr=s_err, kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=1)
ax = _check_plot_works(s.plot, yerr=s_err.tolist(), kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=1)
ax = _check_plot_works(s.plot, yerr=d_err, kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=1)
ax = _check_plot_works(s.plot, xerr=0.2, yerr=0.2, kind=kind)
self._check_has_errorbars(ax, xerr=1, yerr=1)
ax = _check_plot_works(s.plot, xerr=s_err)
self._check_has_errorbars(ax, xerr=1, yerr=0)
# test time series plotting
ix = date_range('1/1/2000', '1/1/2001', freq='M')
ts = Series(np.arange(12), index=ix, name='x')
ts_err = Series(np.random.randn(12), index=ix)
td_err = DataFrame(randn(12, 2), index=ix, columns=['x', 'y'])
ax = _check_plot_works(ts.plot, yerr=ts_err)
self._check_has_errorbars(ax, xerr=0, yerr=1)
ax = _check_plot_works(ts.plot, yerr=td_err)
self._check_has_errorbars(ax, xerr=0, yerr=1)
# check incorrect lengths and types
with pytest.raises(ValueError):
s.plot(yerr=np.arange(11))
s_err = ['zzz'] * 10
# in mpl 1.5+ this is a TypeError
with pytest.raises((ValueError, TypeError)):
s.plot(yerr=s_err)
def test_table(self):
_check_plot_works(self.series.plot, table=True)
_check_plot_works(self.series.plot, table=self.series)
@slow
def test_series_grid_settings(self):
# Make sure plot defaults to rcParams['axes.grid'] setting, GH 9792
self._check_grid_settings(Series([1, 2, 3]),
plotting._core._series_kinds +
plotting._core._common_kinds)
@slow
def test_standard_colors(self):
from pandas.plotting._style import _get_standard_colors
for c in ['r', 'red', 'green', '#FF0000']:
result = _get_standard_colors(1, color=c)
assert result == [c]
result = _get_standard_colors(1, color=[c])
assert result == [c]
result = _get_standard_colors(3, color=c)
assert result == [c] * 3
result = _get_standard_colors(3, color=[c])
assert result == [c] * 3
@slow
def test_standard_colors_all(self):
import matplotlib.colors as colors
from pandas.plotting._style import _get_standard_colors
# multiple colors like mediumaquamarine
for c in colors.cnames:
result = _get_standard_colors(num_colors=1, color=c)
assert result == [c]
result = _get_standard_colors(num_colors=1, color=[c])
assert result == [c]
result = _get_standard_colors(num_colors=3, color=c)
assert result == [c] * 3
result = _get_standard_colors(num_colors=3, color=[c])
assert result == [c] * 3
# single letter colors like k
for c in colors.ColorConverter.colors:
result = _get_standard_colors(num_colors=1, color=c)
assert result == [c]
result = _get_standard_colors(num_colors=1, color=[c])
assert result == [c]
result = _get_standard_colors(num_colors=3, color=c)
assert result == [c] * 3
result = _get_standard_colors(num_colors=3, color=[c])
assert result == [c] * 3
def test_series_plot_color_kwargs(self):
# GH1890
ax = Series(np.arange(12) + 1).plot(color='green')
self._check_colors(ax.get_lines(), linecolors=['green'])
def test_time_series_plot_color_kwargs(self):
# #1890
ax = Series(np.arange(12) + 1, index=date_range(
'1/1/2000', periods=12)).plot(color='green')
self._check_colors(ax.get_lines(), linecolors=['green'])
def test_time_series_plot_color_with_empty_kwargs(self):
import matplotlib as mpl
if self.mpl_ge_1_5_0:
def_colors = self._maybe_unpack_cycler(mpl.rcParams)
else:
def_colors = mpl.rcParams['axes.color_cycle']
index = date_range('1/1/2000', periods=12)
s = Series(np.arange(1, 13), index=index)
ncolors = 3
for i in range(ncolors):
ax = s.plot()
self._check_colors(ax.get_lines(), linecolors=def_colors[:ncolors])
def test_xticklabels(self):
# GH11529
s = Series(np.arange(10), index=['P%02d' % i for i in range(10)])
ax = s.plot(xticks=[0, 3, 5, 9])
exp = ['P%02d' % i for i in [0, 3, 5, 9]]
self._check_text_labels(ax.get_xticklabels(), exp)
def test_custom_business_day_freq(self):
# GH7222
from pandas.tseries.offsets import CustomBusinessDay
s = Series(range(100, 121), index=pd.bdate_range(
start='2014-05-01', end='2014-06-01',
freq=CustomBusinessDay(holidays=['2014-05-26'])))
_check_plot_works(s.plot)
|
mit
|
xpspectre/aws-tools
|
spot_pricing.py
|
1
|
6176
|
# Usage: specify params (time interval, instance types, avail zones)
# Does: Looks up from cache 1st; fetches results that aren't in the cache
# Check if instance type is present
# Gets present time range: have latest early time thru earliest late time; assumes present time ranges are contiguous
import os
import boto3
from datetime import datetime, timedelta
import sqlite3
import matplotlib.pyplot as plt
import numpy as np
# Work with a sample response for development
CACHE_DIR = 'cache'
TIME_STR = '%Y-%m-%d %H:%M:%S'
def fetch_spot_history(conn, instance_type, availability_zone, start_time, end_time):
# https://boto3.readthedocs.io/en/latest/reference/services/ec2.html#EC2.Client.describe_spot_price_history
# https://docs.aws.amazon.com/cli/latest/reference/ec2/describe-spot-price-history.html
# Get for all Availability Zones
# Do this in a loop to keep fetching more using NextToken
# TODO: error handling
client = boto3.client('ec2')
next_token = ''
while True:
response = client.describe_spot_price_history(
DryRun=False,
StartTime=start_time,
EndTime=end_time,
InstanceTypes=[instance_type],
ProductDescriptions=['Linux/UNIX'],
AvailabilityZone=availability_zone,
NextToken=next_token
)
print('Fetched {} spot price entries'.format(len(response['SpotPriceHistory'])))
# Turn response into something that's easily inserted into DB - list of tuples
# By default, returned datetime has timezone info. It's UTF from the AWS API.
# Strip it for compatibility with sqlite
to_add = []
for row in response['SpotPriceHistory']:
to_add.append((row['Timestamp'].replace(tzinfo=None), row['AvailabilityZone'], row['SpotPrice']))
with conn:
conn.executemany('INSERT OR IGNORE INTO history VALUES (?,?,?)', to_add)
next_token = response['NextToken']
if next_token == '':
break
def get_db_avail_zones(conn, instance_type):
c = conn.cursor()
zones = []
c.execute('SELECT DISTINCT availabilityzone FROM history')
for row in c:
zones.append(row[0])
return zones
def update_spot_history(instance_type, start_time, end_time):
# Update cached spot price history
# Store in sqlite db, 1 db for each instance type
# Ignore Product, everything will be Linux/UNIX
# Single table called 'history'
# Cols for AvailabilityZone, Timestamp, SpotPrice
# AvailabilityZone+Timestamp primary key - identifies the entry
# Fetches all avail zones together, which may cause extra fetches when avail zones times don't perfectly overlap
db_file = os.path.join(CACHE_DIR, '{}.db'.format(instance_type))
conn = sqlite3.connect(db_file, detect_types=sqlite3.PARSE_DECLTYPES | sqlite3.PARSE_COLNAMES)
# Note: table names can't be parametrized
with conn:
conn.execute('CREATE TABLE IF NOT EXISTS history (timestamp TIMESTAMP , availabilityzone TEXT, spotprice REAL, PRIMARY KEY (timestamp, availabilityzone))')
# Get availability zones remotely
client = boto3.client('ec2')
response = client.describe_availability_zones()
zones = []
for row in response['AvailabilityZones']:
zones.append(row['ZoneName'])
# Get availability zones in cache
c = conn.cursor()
cached_zones = get_db_avail_zones(conn, instance_type)
for zone in zones:
if zone in cached_zones:
c.execute('SELECT MIN(timestamp) FROM history WHERE availabilityzone=?', (zone,))
first_time = datetime.strptime(c.fetchone()[0], TIME_STR)
c.execute('SELECT MAX(timestamp) FROM history WHERE availabilityzone=?', (zone,))
last_time = datetime.strptime(c.fetchone()[0], TIME_STR)
# Fetch earlier windows if needed
if start_time < first_time:
fetch_spot_history(conn, instance_type, zone, start_time, first_time)
# Fetch later windows if needed
if last_time < end_time:
fetch_spot_history(conn, instance_type, zone, last_time, end_time)
else:
fetch_spot_history(conn, instance_type, zone, start_time, end_time)
conn.close()
def get_spot_history(instance_type, start_time, end_time, exclude_zones=[]):
# Get spot price history from cache. Returns times and prices as numpy datatypes.
db_file = os.path.join(CACHE_DIR, '{}.db'.format(instance_type))
conn = sqlite3.connect(db_file, detect_types=sqlite3.PARSE_DECLTYPES | sqlite3.PARSE_COLNAMES)
times = []
prices = []
c = conn.cursor()
zones = get_db_avail_zones(conn, instance_type)
for exclude_zone in exclude_zones:
try:
zones.remove(exclude_zone)
except ValueError:
pass
for zone in zones:
c.execute('SELECT timestamp, spotprice FROM history WHERE availabilityzone=? AND timestamp BETWEEN datetime(?) AND datetime(?) ORDER BY timestamp', (zone, start_time, end_time))
times_i = []
prices_i = []
for row in c:
times_i.append(row[0])
prices_i.append(row[1])
times.append(np.array(times_i, dtype='datetime64[s]')) # units of seconds
prices.append(np.array(prices_i))
return times, prices, zones
if __name__ == '__main__':
GET_DATA = True # DEBUG: whether to collect/update data at all
# Get the current date/time and 1 week ago
# Turn into UTC
# end_time = datetime.utcnow()
end_time = datetime(2016, 12, 10)
start_time = end_time - timedelta(days=7)
instance_type = 't1.micro'
if GET_DATA:
update_spot_history(instance_type, start_time, end_time)
# Analyze data
# Make plots of spot price over time for each avail zone
# This is probably inefficient...
times, prices, zones = get_spot_history(instance_type, start_time, end_time, exclude_zones=['us-east-1e'])
plt.figure()
for i, zone in enumerate(zones):
plt.plot(times[i], prices[i])
plt.legend(zones)
plt.title(instance_type)
plt.show()
|
mit
|
chrisjsewell/PyGauss
|
pygauss/file_io.py
|
1
|
10124
|
# -*- coding: utf-8 -*-
"""
Created on Mon May 18 21:01:25 2015
@author: chris sewell
"""
import os, glob
import socket
import errno
import re
import getpass
from io import BytesIO
import paramiko
class Folder(object):
""" an object intended to act as an entry point to a folder path
it will act identical whether the folder is local or on a server
"""
def __init__(self, path,
server=None, username=None, passwrd=None):
"""an object intended to act as an entry point to a folder path
Parameters
----------
path : str
the path to the folder (absolute or relative)
server : str
the server name
username : str
the username to connect to the server
passwrd : str
server password, if not present it will be asked for during initialisation
"""
assert type(path) is str
self._path = path
if not server:
self._local = True
else:
self._local = False
self._server = server
self._username = username
#TODO encrypt?
self._passwrd = passwrd
# set folder and test it exists
if self._local:
if not os.path.exists(self._path):
raise IOError(
'the folder path does not exist: {}'.format(self._path))
else:
ssh_failed = False
try:
ssh = self._connect_ssh(server, username, passwrd)
except:
ssh_failed = True
if ssh_failed:
if not type(self._username) is str:
self._passwrd = getpass.getuser()
if not type(self._passwrd) is str:
self._passwrd = getpass.getpass('Please enter server password: ')
ssh = self._connect_ssh(server, username, self._passwrd)
sftp = ssh.open_sftp()
try:
sftp.stat(path)
except IOError, e:
ssh.close()
if e.errno == errno.ENOENT:
raise IOError("{0} does not exist on server: {1}".format(path,
server))
else:
IOError('error trying to validate folder \n {0}'.format(e))
ssh.close()
self._ssh = None
self._sftp = None
def get_path(self):
"""get folder path """
return self._path
def islocal(self):
""" if folder is local """
return self._local
def active(self):
""" if folder is active """
if self._local:
return True
elif self._ssh:
return True
else:
return False
def _connect_ssh(self, ssh_server, ssh_username, ssh_passwrd):
""" connect and verify ssh connection """
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
try:
ssh.connect(ssh_server, username=ssh_username, password=ssh_passwrd)
except socket.error, e:
raise IOError(
'could not connect to the ssh server: \n {0} \n {1}'.format(ssh_server, e))
except paramiko.ssh_exception.AuthenticationException, e:
raise IOError(
'username or password authentication error \n {0}'.format(e))
except Exception, e:
raise IOError('error connecting to server: \n {0}'.format(e))
return ssh
def __enter__(self):
""" use with statement to open ssh connection once """
if self._local:
return self
ssh = self._connect_ssh(self._server,
self._username, self._passwrd)
self._ssh = ssh
self._sftp = ssh.open_sftp()
return self
def __exit__(self, type, value, traceback):
""" use with statement to open ssh connection once """
if self._local:
return
try:
self._ssh.close()
except:
pass
self._ssh = None
self._sftp = None
def list_files(self, pattern=None, one_file=False):
""" list files in folder
Parameters
----------
pattern : str
a pattern the file must match that can include * wildcards
"""
if self._local:
if not pattern:
pattern = '*'
filepaths = glob.glob(os.path.join(self._path, pattern))
files = [os.path.basename(f) for f in filepaths]
else:
if not self._ssh:
ssh = self._connect_ssh(self._server,
self._username, self._passwrd)
sftp = ssh.open_sftp()
files = sftp.listdir(self._path)
ssh.close()
else:
files = self._sftp.listdir(self._path)
if pattern:
pattern = "".join(
[ c if c.isalnum() or c=='*' else "["+c+"]" for c in pattern]
).replace('*', '.*')
files = filter(lambda x: re.match(pattern,x), files)
if not one_file:
return files
if not files:
raise IOError(
'no files of format {0} in path: {1}'.format(pattern, self._path))
if len(files)>1:
raise IOError(
'multiple files found conforming to format {0} in path: {1}'.format(
pattern, self._path))
if self._local:
return os.path.basename(files[0])
else:
return files[0]
def read_file(self, file_name):
"""return an open file ready for reading """
mode='rb'
file_name = self.list_files(file_name, one_file=True)
if self._local:
return open(os.path.join(self._path, file_name), mode)
#assume it is a unix server (so '/' is path seperator)
#otherwise if you use os.path.join on a windows os it will not find
if not self._path[-1] == '/':
file_path = self._path + '/' + file_name
else:
file_path = self._path + file_name
if not self._sftp:
raise IOError('must have an open ssh connection (use `with` statement)')
return self._sftp.file(file_path, mode)
def write_file(self, file_name, overwrite=False):
"""return an open file ready for writing to """
mode = 'w'
if not overwrite:
f = None
try:
f = self.list_files(file_name, one_file=True)
except:
pass
if f:
raise IOError('the file {0} already exists'.format(file_name))
if self._local:
return open(os.path.join(self._path, file_name), mode)
#assume it is a unix server (so '/' is path seperator)
#otherwise if you use os.path.join on a windows os it will not find
if not self._path[-1] == '/':
file_path = self._path + '/' + file_name
else:
file_path = self._path + file_name
if not self._sftp:
raise IOError('must have an open ssh connection (use `with` statement)')
return self._sftp.file(file_path, mode)
#TODO write save_mplfig for non-local
def save_mplfig(self, fig, fig_name, dpi=256, format='png'):
"""a function for outputing a matplotlib figure to a file
Parameters
----------
fig : matplotlib.figure.Figure
a Matplotlib figure
fig_name : str
the desired name of the file
"""
try:
fig.get_figwidth()
except AttributeError:
raise ValueError('the fig is not a Matplotlib figure')
if not os.path.splitext(fig_name)[1]:
fig_name += os.path.extsep + 'png'
if self._local:
full_path = os.path.join(self._path, fig_name)
fig.savefig(full_path, dpi=dpi,
bbox_inches='tight')
return os.path.abspath(full_path)
else:
raise NotImplementedError
#TODO write save_ipyimg for non-local
def save_ipyimg(self, img, img_name):
"""a function for outputing an IPython Image to a file
Parameters
----------
img : IPython.display.Image
an IPyton image
img_name : str
the desired name of the file
"""
try:
data = img.data
except AttributeError:
raise ValueError('the img is not an IPython Image')
#_PNG = b'\x89PNG\r\n\x1a\n'
_JPEG = b'\xff\xd8'
ext = 'png'
if data[:2] == _JPEG:
ext = 'jpg'
if self._local:
full_path = os.path.join(self._path, img_name)+ os.path.extsep + ext
with open(full_path, "wb") as f:
f.write(data)
return os.path.abspath(full_path)
else:
raise NotImplementedError
#TODO write save_pilimg
def save_pilimg(self, img, img_name):
raise NotImplementedError
class NoOutputFolder(Folder):
""" a folder object which will not output any data """
def __init__(self, *args, **kwargs):
super(NoOutputFolder, self).__init__(*args, **kwargs)
def write_file(self, *arg, **kwargs):
return BytesIO()
def save_ipyimg(self, *arg, **kwargs):
return ''
def save_mplfig(self, *arg, **kwargs):
return ''
def save_pilimg(self, *arg, **kwargs):
return ''
|
gpl-3.0
|
TheBB/badger
|
tests/test_run.py
|
1
|
5308
|
from pathlib import Path
import numpy as np
import pandas as pd
from grevling import Case
DATADIR = Path(__file__).parent / 'data'
def read_file(path: Path) -> str:
with open(path, 'r') as f:
return f.read()
def check_df(left, right):
to_remove = [c for c in left.columns if c.startswith('walltime/') or c.startswith('_')]
pd.testing.assert_frame_equal(
left.drop(columns=to_remove).sort_index(axis=1),
right.sort_index(axis=1)
)
def test_echo():
case = Case(DATADIR / 'run' / 'echo')
case.clear_cache()
case.run()
case.collect()
data = case.load_dataframe()
check_df(data, pd.DataFrame(
index=pd.Int64Index(range(9)),
data={
'alpha': pd.array([1, 1, 1, 2, 2, 2, 3, 3, 3], dtype=pd.Int64Dtype()),
'bravo': ['a', 'b', 'c', 'a', 'b', 'c', 'a', 'b', 'c'],
'charlie': pd.array([1, 1, 1, 3, 3, 3, 5, 5, 5], dtype=pd.Int64Dtype()),
'a': pd.array([1, 1, 1, 2, 2, 2, 3, 3, 3], dtype=pd.Int64Dtype()),
'b': ['a', 'b', 'c', 'a', 'b', 'c', 'a', 'b', 'c'],
'c': [1., 1., 1., 3., 3., 3., 5., 5., 5.],
}
))
def test_cat():
case = Case(DATADIR / 'run' / 'cat')
case.clear_cache()
case.run()
case.collect()
data = case.load_dataframe()
check_df(data, pd.DataFrame(
index=pd.Int64Index(range(9)),
data={
'alpha': pd.array([1, 1, 1, 2, 2, 2, 3, 3, 3], dtype=pd.Int64Dtype()),
'bravo': ['a', 'b', 'c', 'a', 'b', 'c', 'a', 'b', 'c'],
'charlie': pd.array([1, 1, 1, 3, 3, 3, 5, 5, 5], dtype=pd.Int64Dtype()),
'a': pd.array([1, 1, 1, 2, 2, 2, 3, 3, 3], dtype=pd.Int64Dtype()),
'b': ['a', 'b', 'c', 'a', 'b', 'c', 'a', 'b', 'c'],
'c': [1., 1., 1., 3., 3., 3., 5., 5., 5.],
'a_auto': pd.array([1, 1, 1, 2, 2, 2, 3, 3, 3], dtype=pd.Int64Dtype()),
}
))
def test_files():
case = Case(DATADIR / 'run' / 'files')
case.clear_cache()
case.run()
for a in range(1,4):
for b in 'abc':
path = case.storagepath / f'{a}-{b}'
assert read_file(path / 'template.txt') == f'a={a} b={b} c={2*a-1}\n'
assert read_file(path / 'other-template.txt') == f'a={a} b={b} c={2*a-1}\n'
assert read_file(path / 'non-template.txt') == 'a=${alpha} b=${bravo} c=${charlie}\n'
assert read_file(path / 'some' / 'deep' / 'directory' / 'empty1.dat') == ''
assert read_file(path / 'some' / 'deep' / 'directory' / 'empty2.dat') == ''
assert read_file(path / 'some' / 'deep' / 'directory' / 'empty3.dat') == ''
def test_capture():
case = Case(DATADIR / 'run' / 'capture')
case.clear_cache()
case.run()
case.collect()
data = case.load_dataframe()
check_df(data, pd.DataFrame(
index=pd.Int64Index(range(9)),
data={
'alpha': [1.234, 1.234, 1.234, 2.345, 2.345, 2.345, 3.456, 3.456, 3.456],
'bravo': pd.array([1, 2, 3, 1, 2, 3, 1, 2, 3], dtype=pd.Int64Dtype()),
'firstalpha': [1.234, 1.234, 1.234, 2.345, 2.345, 2.345, 3.456, 3.456, 3.456],
'lastalpha': [4.936, 4.936, 4.936, 9.38, 9.38, 9.38, 13.824, 13.824, 13.824],
'allalpha': [
[1.234, 2.468, 3.702, 4.936], [1.234, 2.468, 3.702, 4.936], [1.234, 2.468, 3.702, 4.936],
[2.345, 4.690, 7.035, 9.380], [2.345, 4.690, 7.035, 9.380], [2.345, 4.690, 7.035, 9.380],
[3.456, 6.912, 10.368, 13.824], [3.456, 6.912, 10.368, 13.824], [3.456, 6.912, 10.368, 13.824]
],
'firstbravo': pd.array([1, 2, 3, 1, 2, 3, 1, 2, 3], dtype=pd.Int64Dtype()),
'lastbravo': pd.array([4, 8, 12, 4, 8, 12, 4, 8, 12], dtype=pd.Int64Dtype()),
'allbravo': [
[1, 2, 3, 4], [2, 4, 6, 8], [3, 6, 9, 12],
[1, 2, 3, 4], [2, 4, 6, 8], [3, 6, 9, 12],
[1, 2, 3, 4], [2, 4, 6, 8], [3, 6, 9, 12],
],
}
))
def test_failing():
case = Case(DATADIR / 'run' / 'failing')
case.clear_cache()
case.run()
case.collect()
data = case.load_dataframe()
check_df(data, pd.DataFrame(
index=pd.Int64Index([0, 1]),
data={
'retcode': pd.array([0, 1], dtype=pd.Int64Dtype()),
'before': pd.array([12, 12], dtype=pd.Int64Dtype()),
'return': pd.array([0, 1], dtype=pd.Int64Dtype()),
'next': pd.array([0, pd.NA], dtype=pd.Int64Dtype()),
'after': pd.array([13, pd.NA], dtype=pd.Int64Dtype()),
}
))
def test_stdout():
case = Case(DATADIR / 'run' / 'stdout')
case.clear_cache()
case.run()
path = case.storagepath
assert read_file(path / 'out-0' / 'good.stdout') == 'stdout 0\n'
assert read_file(path / 'out-0' / 'good.stderr') == 'stderr 0\n'
assert read_file(path / 'out-0' / 'bad.stdout') == 'stdout 0\n'
assert read_file(path / 'out-0' / 'bad.stderr') == 'stderr 0\n'
assert read_file(path / 'out-1' / 'good.stdout') == 'stdout 1\n'
assert read_file(path / 'out-1' / 'good.stderr') == 'stderr 1\n'
assert read_file(path / 'out-1' / 'bad.stdout') == 'stdout 1\n'
assert read_file(path / 'out-1' / 'bad.stderr') == 'stderr 1\n'
|
agpl-3.0
|
CassioAmador/profile_tcabr
|
visualization_tools/test_custom_spectrogram_sim2.py
|
1
|
2623
|
"""Compare custom spectrogram with scipy's implementation"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.mlab import specgram
from scipy import signal
import time
import sys
sys.path.insert(0, './../src/')
import custom_spectrogram as cs
fs = 100 # 100 MHz
tem = np.arange(int(8 * fs)) / fs
f = 12 - 4 * np.sqrt(np.arange(tem.size) / tem.size)
sinal = np.sin(2 * np.pi * f * tem) + 0.8 * np.sin(2 * np.pi * (f + 6) * tem) + 0.8 * np.sin(2 * np.pi * (f + 12) * tem)
nfft = 1* fs
window = 80
step_scale = 16
zer_pad = 8
print('\n time cost:')
# measure custom spectrogram time
# for some unkown reason, the step must be scaled down to compare to scipy.
freq_min, freq_max = 0, 15
time0 = time.time()
for i in range(10):
time_spec, beat_freq = cs.eval_beat_freq(tem, window_size=window, step_scale=step_scale, zer_pad=zer_pad)
fmin, fmax, mask = cs.eval_mask(beat_freq, window, freq_min, freq_max, zer_pad=zer_pad)
Sxx_custom = cs.spectrogram(sinal, window_size=window, zer_pad=zer_pad, step_scale=step_scale, freq_mask=mask)
time1 = time.time()
print('\nCUSTOM: {0} ms'.format(100 * (time1 - time0)))
plt.subplots(4, 1)
ax1 = plt.subplot(411)
plt.plot(tem, sinal)
plt.xlim(tem[0], tem[-1])
plt.ylabel('signal')
plt.setp(ax1.get_xticklabels(), visible=False)
ax2 = plt.subplot(412, sharex=ax1)
plt.pcolormesh(time_spec, beat_freq[mask], Sxx_custom)
plt.plot(tem, f, 'k', lw=2)
plt.ylim(freq_min, freq_max)
plt.ylabel('custom')
plt.setp(ax2.get_xticklabels(), visible=False)
# measure scipy spectrogram time
time0 = time.time()
for i in range(10):
freqs, tempo, Sxx_scipy = signal.spectrogram(sinal, fs, nperseg=nfft, noverlap=99, window=signal.get_window('hann', nfft), nfft=512)
time1 = time.time()
print('\nSCIPY: {0} ms'.format(100 * (time1 - time0)))
ax3 = plt.subplot(413)
plt.pcolormesh(tempo, freqs, Sxx_scipy)
plt.ylim(freq_min, freq_max)
plt.plot(tem, f, 'k', lw=2)
plt.ylim(freq_min, freq_max)
plt.ylabel('scipy')
plt.setp(ax3.get_xticklabels(), visible=False)
# measure matplotlib spectrogram time
plt.subplot(414, sharex=ax1)
time0 = time.time()
for i in range(10):
Sxx_malab, freqs, bins = specgram(sinal, Fs=fs, NFFT=nfft, noverlap=99, pad_to=512)
time1 = time.time()
print('\nMATPLOTLIB: {0} ms'.format(100 * (time1 - time0)))
print('\ntime resolution:\n CUSTOM: {} \t SCIPY: {} \t MATPLOTLIB: {}'.format(Sxx_custom.shape, Sxx_scipy.shape, Sxx_malab.shape))
print('\ndark line is simulated frequency\n')
plt.plot(tem, f, 'k', lw=2)
plt.pcolormesh(bins, freqs, Sxx_malab)
plt.ylim(freq_min, freq_max)
plt.ylabel('mlab')
plt.tight_layout(h_pad=0)
plt.show()
|
mit
|
mwv/scikit-learn
|
examples/linear_model/lasso_dense_vs_sparse_data.py
|
348
|
1862
|
"""
==============================
Lasso on dense and sparse data
==============================
We show that linear_model.Lasso provides the same results for dense and sparse
data and that in the case of sparse data the speed is improved.
"""
print(__doc__)
from time import time
from scipy import sparse
from scipy import linalg
from sklearn.datasets.samples_generator import make_regression
from sklearn.linear_model import Lasso
###############################################################################
# The two Lasso implementations on Dense data
print("--- Dense matrices")
X, y = make_regression(n_samples=200, n_features=5000, random_state=0)
X_sp = sparse.coo_matrix(X)
alpha = 1
sparse_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=1000)
dense_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=1000)
t0 = time()
sparse_lasso.fit(X_sp, y)
print("Sparse Lasso done in %fs" % (time() - t0))
t0 = time()
dense_lasso.fit(X, y)
print("Dense Lasso done in %fs" % (time() - t0))
print("Distance between coefficients : %s"
% linalg.norm(sparse_lasso.coef_ - dense_lasso.coef_))
###############################################################################
# The two Lasso implementations on Sparse data
print("--- Sparse matrices")
Xs = X.copy()
Xs[Xs < 2.5] = 0.0
Xs = sparse.coo_matrix(Xs)
Xs = Xs.tocsc()
print("Matrix density : %s %%" % (Xs.nnz / float(X.size) * 100))
alpha = 0.1
sparse_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=10000)
dense_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=10000)
t0 = time()
sparse_lasso.fit(Xs, y)
print("Sparse Lasso done in %fs" % (time() - t0))
t0 = time()
dense_lasso.fit(Xs.toarray(), y)
print("Dense Lasso done in %fs" % (time() - t0))
print("Distance between coefficients : %s"
% linalg.norm(sparse_lasso.coef_ - dense_lasso.coef_))
|
bsd-3-clause
|
jjx02230808/project0223
|
sklearn/neural_network/tests/test_stochastic_optimizers.py
|
146
|
4310
|
import numpy as np
from sklearn.neural_network._stochastic_optimizers import (BaseOptimizer,
SGDOptimizer,
AdamOptimizer)
from sklearn.utils.testing import (assert_array_equal, assert_true,
assert_false, assert_equal)
shapes = [(4, 6), (6, 8), (7, 8, 9)]
def test_base_optimizer():
params = [np.zeros(shape) for shape in shapes]
for lr in [10 ** i for i in range(-3, 4)]:
optimizer = BaseOptimizer(params, lr)
assert_true(optimizer.trigger_stopping('', False))
def test_sgd_optimizer_no_momentum():
params = [np.zeros(shape) for shape in shapes]
for lr in [10 ** i for i in range(-3, 4)]:
optimizer = SGDOptimizer(params, lr, momentum=0, nesterov=False)
grads = [np.random.random(shape) for shape in shapes]
expected = [param - lr * grad for param, grad in zip(params, grads)]
optimizer.update_params(grads)
for exp, param in zip(expected, optimizer.params):
assert_array_equal(exp, param)
def test_sgd_optimizer_momentum():
params = [np.zeros(shape) for shape in shapes]
lr = 0.1
for momentum in np.arange(0.5, 0.9, 0.1):
optimizer = SGDOptimizer(params, lr, momentum=momentum, nesterov=False)
velocities = [np.random.random(shape) for shape in shapes]
optimizer.velocities = velocities
grads = [np.random.random(shape) for shape in shapes]
updates = [momentum * velocity - lr * grad
for velocity, grad in zip(velocities, grads)]
expected = [param + update for param, update in zip(params, updates)]
optimizer.update_params(grads)
for exp, param in zip(expected, optimizer.params):
assert_array_equal(exp, param)
def test_sgd_optimizer_trigger_stopping():
params = [np.zeros(shape) for shape in shapes]
lr = 2e-6
optimizer = SGDOptimizer(params, lr, lr_schedule='adaptive')
assert_false(optimizer.trigger_stopping('', False))
assert_equal(lr / 5, optimizer.learning_rate)
assert_true(optimizer.trigger_stopping('', False))
def test_sgd_optimizer_nesterovs_momentum():
params = [np.zeros(shape) for shape in shapes]
lr = 0.1
for momentum in np.arange(0.5, 0.9, 0.1):
optimizer = SGDOptimizer(params, lr, momentum=momentum, nesterov=True)
velocities = [np.random.random(shape) for shape in shapes]
optimizer.velocities = velocities
grads = [np.random.random(shape) for shape in shapes]
updates = [momentum * velocity - lr * grad
for velocity, grad in zip(velocities, grads)]
updates = [momentum * update - lr * grad
for update, grad in zip(updates, grads)]
expected = [param + update for param, update in zip(params, updates)]
optimizer.update_params(grads)
for exp, param in zip(expected, optimizer.params):
assert_array_equal(exp, param)
def test_adam_optimizer():
params = [np.zeros(shape) for shape in shapes]
lr = 0.001
epsilon = 1e-8
for beta_1 in np.arange(0.9, 1.0, 0.05):
for beta_2 in np.arange(0.995, 1.0, 0.001):
optimizer = AdamOptimizer(params, lr, beta_1, beta_2, epsilon)
ms = [np.random.random(shape) for shape in shapes]
vs = [np.random.random(shape) for shape in shapes]
t = 10
optimizer.ms = ms
optimizer.vs = vs
optimizer.t = t - 1
grads = [np.random.random(shape) for shape in shapes]
ms = [beta_1 * m + (1 - beta_1) * grad
for m, grad in zip(ms, grads)]
vs = [beta_2 * v + (1 - beta_2) * (grad ** 2)
for v, grad in zip(vs, grads)]
learning_rate = lr * np.sqrt(1 - beta_2 ** t) / (1 - beta_1**t)
updates = [-learning_rate * m / (np.sqrt(v) + epsilon)
for m, v in zip(ms, vs)]
expected = [param + update
for param, update in zip(params, updates)]
optimizer.update_params(grads)
for exp, param in zip(expected, optimizer.params):
assert_array_equal(exp, param)
|
bsd-3-clause
|
arjoly/scikit-learn
|
examples/linear_model/plot_ols_ridge_variance.py
|
387
|
2060
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Ordinary Least Squares and Ridge Regression Variance
=========================================================
Due to the few points in each dimension and the straight
line that linear regression uses to follow these points
as well as it can, noise on the observations will cause
great variance as shown in the first plot. Every line's slope
can vary quite a bit for each prediction due to the noise
induced in the observations.
Ridge regression is basically minimizing a penalised version
of the least-squared function. The penalising `shrinks` the
value of the regression coefficients.
Despite the few data points in each dimension, the slope
of the prediction is much more stable and the variance
in the line itself is greatly reduced, in comparison to that
of the standard linear regression
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
X_train = np.c_[.5, 1].T
y_train = [.5, 1]
X_test = np.c_[0, 2].T
np.random.seed(0)
classifiers = dict(ols=linear_model.LinearRegression(),
ridge=linear_model.Ridge(alpha=.1))
fignum = 1
for name, clf in classifiers.items():
fig = plt.figure(fignum, figsize=(4, 3))
plt.clf()
plt.title(name)
ax = plt.axes([.12, .12, .8, .8])
for _ in range(6):
this_X = .1 * np.random.normal(size=(2, 1)) + X_train
clf.fit(this_X, y_train)
ax.plot(X_test, clf.predict(X_test), color='.5')
ax.scatter(this_X, y_train, s=3, c='.5', marker='o', zorder=10)
clf.fit(X_train, y_train)
ax.plot(X_test, clf.predict(X_test), linewidth=2, color='blue')
ax.scatter(X_train, y_train, s=30, c='r', marker='+', zorder=10)
ax.set_xticks(())
ax.set_yticks(())
ax.set_ylim((0, 1.6))
ax.set_xlabel('X')
ax.set_ylabel('y')
ax.set_xlim(0, 2)
fignum += 1
plt.show()
|
bsd-3-clause
|
mne-tools/mne-tools.github.io
|
0.19/_downloads/c3c432f4bd08791c7b83f3ddb29bcc5b/plot_channel_epochs_image.py
|
1
|
2889
|
"""
=========================================
Visualize channel over epochs as an image
=========================================
This will produce what is sometimes called an event related
potential / field (ERP/ERF) image.
Two images are produced, one with a good channel and one with a channel
that does not show any evoked field.
It is also demonstrated how to reorder the epochs using a 1D spectral
embedding as described in [1]_.
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne import io
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
###############################################################################
# Set parameters
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
event_id, tmin, tmax = 1, -0.2, 0.4
# Setup for reading the raw data
raw = io.read_raw_fif(raw_fname)
events = mne.read_events(event_fname)
# Set up pick list: EEG + MEG - bad channels (modify to your needs)
raw.info['bads'] = ['MEG 2443', 'EEG 053']
# Create epochs, here for gradiometers + EOG only for simplicity
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
picks=('grad', 'eog'), baseline=(None, 0), preload=True,
reject=dict(grad=4000e-13, eog=150e-6))
###############################################################################
# Show event-related fields images
# and order with spectral reordering
# If you don't have scikit-learn installed set order_func to None
from sklearn.cluster.spectral import spectral_embedding # noqa
from sklearn.metrics.pairwise import rbf_kernel # noqa
def order_func(times, data):
this_data = data[:, (times > 0.0) & (times < 0.350)]
this_data /= np.sqrt(np.sum(this_data ** 2, axis=1))[:, np.newaxis]
return np.argsort(spectral_embedding(rbf_kernel(this_data, gamma=1.),
n_components=1, random_state=0).ravel())
good_pick = 97 # channel with a clear evoked response
bad_pick = 98 # channel with no evoked response
# We'll also plot a sample time onset for each trial
plt_times = np.linspace(0, .2, len(epochs))
plt.close('all')
mne.viz.plot_epochs_image(epochs, [good_pick, bad_pick], sigma=.5,
order=order_func, vmin=-250, vmax=250,
overlay_times=plt_times, show=True)
###############################################################################
# References
# ----------
# .. [1] Graph-based variability estimation in single-trial event-related
# neural responses. A. Gramfort, R. Keriven, M. Clerc, 2010,
# Biomedical Engineering, IEEE Trans. on, vol. 57 (5), 1051-1061
# https://ieeexplore.ieee.org/document/5406156
|
bsd-3-clause
|
Fireblend/scikit-learn
|
sklearn/gaussian_process/gaussian_process.py
|
83
|
34544
|
# -*- coding: utf-8 -*-
# Author: Vincent Dubourg <vincent.dubourg@gmail.com>
# (mostly translation, see implementation details)
# Licence: BSD 3 clause
from __future__ import print_function
import numpy as np
from scipy import linalg, optimize
from ..base import BaseEstimator, RegressorMixin
from ..metrics.pairwise import manhattan_distances
from ..utils import check_random_state, check_array, check_X_y
from ..utils.validation import check_is_fitted
from . import regression_models as regression
from . import correlation_models as correlation
MACHINE_EPSILON = np.finfo(np.double).eps
def l1_cross_distances(X):
"""
Computes the nonzero componentwise L1 cross-distances between the vectors
in X.
Parameters
----------
X: array_like
An array with shape (n_samples, n_features)
Returns
-------
D: array with shape (n_samples * (n_samples - 1) / 2, n_features)
The array of componentwise L1 cross-distances.
ij: arrays with shape (n_samples * (n_samples - 1) / 2, 2)
The indices i and j of the vectors in X associated to the cross-
distances in D: D[k] = np.abs(X[ij[k, 0]] - Y[ij[k, 1]]).
"""
X = check_array(X)
n_samples, n_features = X.shape
n_nonzero_cross_dist = n_samples * (n_samples - 1) // 2
ij = np.zeros((n_nonzero_cross_dist, 2), dtype=np.int)
D = np.zeros((n_nonzero_cross_dist, n_features))
ll_1 = 0
for k in range(n_samples - 1):
ll_0 = ll_1
ll_1 = ll_0 + n_samples - k - 1
ij[ll_0:ll_1, 0] = k
ij[ll_0:ll_1, 1] = np.arange(k + 1, n_samples)
D[ll_0:ll_1] = np.abs(X[k] - X[(k + 1):n_samples])
return D, ij
class GaussianProcess(BaseEstimator, RegressorMixin):
"""The Gaussian Process model class.
Read more in the :ref:`User Guide <gaussian_process>`.
Parameters
----------
regr : string or callable, optional
A regression function returning an array of outputs of the linear
regression functional basis. The number of observations n_samples
should be greater than the size p of this basis.
Default assumes a simple constant regression trend.
Available built-in regression models are::
'constant', 'linear', 'quadratic'
corr : string or callable, optional
A stationary autocorrelation function returning the autocorrelation
between two points x and x'.
Default assumes a squared-exponential autocorrelation model.
Built-in correlation models are::
'absolute_exponential', 'squared_exponential',
'generalized_exponential', 'cubic', 'linear'
beta0 : double array_like, optional
The regression weight vector to perform Ordinary Kriging (OK).
Default assumes Universal Kriging (UK) so that the vector beta of
regression weights is estimated using the maximum likelihood
principle.
storage_mode : string, optional
A string specifying whether the Cholesky decomposition of the
correlation matrix should be stored in the class (storage_mode =
'full') or not (storage_mode = 'light').
Default assumes storage_mode = 'full', so that the
Cholesky decomposition of the correlation matrix is stored.
This might be a useful parameter when one is not interested in the
MSE and only plan to estimate the BLUP, for which the correlation
matrix is not required.
verbose : boolean, optional
A boolean specifying the verbose level.
Default is verbose = False.
theta0 : double array_like, optional
An array with shape (n_features, ) or (1, ).
The parameters in the autocorrelation model.
If thetaL and thetaU are also specified, theta0 is considered as
the starting point for the maximum likelihood estimation of the
best set of parameters.
Default assumes isotropic autocorrelation model with theta0 = 1e-1.
thetaL : double array_like, optional
An array with shape matching theta0's.
Lower bound on the autocorrelation parameters for maximum
likelihood estimation.
Default is None, so that it skips maximum likelihood estimation and
it uses theta0.
thetaU : double array_like, optional
An array with shape matching theta0's.
Upper bound on the autocorrelation parameters for maximum
likelihood estimation.
Default is None, so that it skips maximum likelihood estimation and
it uses theta0.
normalize : boolean, optional
Input X and observations y are centered and reduced wrt
means and standard deviations estimated from the n_samples
observations provided.
Default is normalize = True so that data is normalized to ease
maximum likelihood estimation.
nugget : double or ndarray, optional
Introduce a nugget effect to allow smooth predictions from noisy
data. If nugget is an ndarray, it must be the same length as the
number of data points used for the fit.
The nugget is added to the diagonal of the assumed training covariance;
in this way it acts as a Tikhonov regularization in the problem. In
the special case of the squared exponential correlation function, the
nugget mathematically represents the variance of the input values.
Default assumes a nugget close to machine precision for the sake of
robustness (nugget = 10. * MACHINE_EPSILON).
optimizer : string, optional
A string specifying the optimization algorithm to be used.
Default uses 'fmin_cobyla' algorithm from scipy.optimize.
Available optimizers are::
'fmin_cobyla', 'Welch'
'Welch' optimizer is dued to Welch et al., see reference [WBSWM1992]_.
It consists in iterating over several one-dimensional optimizations
instead of running one single multi-dimensional optimization.
random_start : int, optional
The number of times the Maximum Likelihood Estimation should be
performed from a random starting point.
The first MLE always uses the specified starting point (theta0),
the next starting points are picked at random according to an
exponential distribution (log-uniform on [thetaL, thetaU]).
Default does not use random starting point (random_start = 1).
random_state: integer or numpy.RandomState, optional
The generator used to shuffle the sequence of coordinates of theta in
the Welch optimizer. If an integer is given, it fixes the seed.
Defaults to the global numpy random number generator.
Attributes
----------
theta_ : array
Specified theta OR the best set of autocorrelation parameters (the \
sought maximizer of the reduced likelihood function).
reduced_likelihood_function_value_ : array
The optimal reduced likelihood function value.
Examples
--------
>>> import numpy as np
>>> from sklearn.gaussian_process import GaussianProcess
>>> X = np.array([[1., 3., 5., 6., 7., 8.]]).T
>>> y = (X * np.sin(X)).ravel()
>>> gp = GaussianProcess(theta0=0.1, thetaL=.001, thetaU=1.)
>>> gp.fit(X, y) # doctest: +ELLIPSIS
GaussianProcess(beta0=None...
...
Notes
-----
The presentation implementation is based on a translation of the DACE
Matlab toolbox, see reference [NLNS2002]_.
References
----------
.. [NLNS2002] `H.B. Nielsen, S.N. Lophaven, H. B. Nielsen and J.
Sondergaard. DACE - A MATLAB Kriging Toolbox.` (2002)
http://www2.imm.dtu.dk/~hbn/dace/dace.pdf
.. [WBSWM1992] `W.J. Welch, R.J. Buck, J. Sacks, H.P. Wynn, T.J. Mitchell,
and M.D. Morris (1992). Screening, predicting, and computer
experiments. Technometrics, 34(1) 15--25.`
http://www.jstor.org/pss/1269548
"""
_regression_types = {
'constant': regression.constant,
'linear': regression.linear,
'quadratic': regression.quadratic}
_correlation_types = {
'absolute_exponential': correlation.absolute_exponential,
'squared_exponential': correlation.squared_exponential,
'generalized_exponential': correlation.generalized_exponential,
'cubic': correlation.cubic,
'linear': correlation.linear}
_optimizer_types = [
'fmin_cobyla',
'Welch']
def __init__(self, regr='constant', corr='squared_exponential', beta0=None,
storage_mode='full', verbose=False, theta0=1e-1,
thetaL=None, thetaU=None, optimizer='fmin_cobyla',
random_start=1, normalize=True,
nugget=10. * MACHINE_EPSILON, random_state=None):
self.regr = regr
self.corr = corr
self.beta0 = beta0
self.storage_mode = storage_mode
self.verbose = verbose
self.theta0 = theta0
self.thetaL = thetaL
self.thetaU = thetaU
self.normalize = normalize
self.nugget = nugget
self.optimizer = optimizer
self.random_start = random_start
self.random_state = random_state
def fit(self, X, y):
"""
The Gaussian Process model fitting method.
Parameters
----------
X : double array_like
An array with shape (n_samples, n_features) with the input at which
observations were made.
y : double array_like
An array with shape (n_samples, ) or shape (n_samples, n_targets)
with the observations of the output to be predicted.
Returns
-------
gp : self
A fitted Gaussian Process model object awaiting data to perform
predictions.
"""
# Run input checks
self._check_params()
self.random_state = check_random_state(self.random_state)
# Force data to 2D numpy.array
X, y = check_X_y(X, y, multi_output=True, y_numeric=True)
self.y_ndim_ = y.ndim
if y.ndim == 1:
y = y[:, np.newaxis]
# Check shapes of DOE & observations
n_samples, n_features = X.shape
_, n_targets = y.shape
# Run input checks
self._check_params(n_samples)
# Normalize data or don't
if self.normalize:
X_mean = np.mean(X, axis=0)
X_std = np.std(X, axis=0)
y_mean = np.mean(y, axis=0)
y_std = np.std(y, axis=0)
X_std[X_std == 0.] = 1.
y_std[y_std == 0.] = 1.
# center and scale X if necessary
X = (X - X_mean) / X_std
y = (y - y_mean) / y_std
else:
X_mean = np.zeros(1)
X_std = np.ones(1)
y_mean = np.zeros(1)
y_std = np.ones(1)
# Calculate matrix of distances D between samples
D, ij = l1_cross_distances(X)
if (np.min(np.sum(D, axis=1)) == 0.
and self.corr != correlation.pure_nugget):
raise Exception("Multiple input features cannot have the same"
" target value.")
# Regression matrix and parameters
F = self.regr(X)
n_samples_F = F.shape[0]
if F.ndim > 1:
p = F.shape[1]
else:
p = 1
if n_samples_F != n_samples:
raise Exception("Number of rows in F and X do not match. Most "
"likely something is going wrong with the "
"regression model.")
if p > n_samples_F:
raise Exception(("Ordinary least squares problem is undetermined "
"n_samples=%d must be greater than the "
"regression model size p=%d.") % (n_samples, p))
if self.beta0 is not None:
if self.beta0.shape[0] != p:
raise Exception("Shapes of beta0 and F do not match.")
# Set attributes
self.X = X
self.y = y
self.D = D
self.ij = ij
self.F = F
self.X_mean, self.X_std = X_mean, X_std
self.y_mean, self.y_std = y_mean, y_std
# Determine Gaussian Process model parameters
if self.thetaL is not None and self.thetaU is not None:
# Maximum Likelihood Estimation of the parameters
if self.verbose:
print("Performing Maximum Likelihood Estimation of the "
"autocorrelation parameters...")
self.theta_, self.reduced_likelihood_function_value_, par = \
self._arg_max_reduced_likelihood_function()
if np.isinf(self.reduced_likelihood_function_value_):
raise Exception("Bad parameter region. "
"Try increasing upper bound")
else:
# Given parameters
if self.verbose:
print("Given autocorrelation parameters. "
"Computing Gaussian Process model parameters...")
self.theta_ = self.theta0
self.reduced_likelihood_function_value_, par = \
self.reduced_likelihood_function()
if np.isinf(self.reduced_likelihood_function_value_):
raise Exception("Bad point. Try increasing theta0.")
self.beta = par['beta']
self.gamma = par['gamma']
self.sigma2 = par['sigma2']
self.C = par['C']
self.Ft = par['Ft']
self.G = par['G']
if self.storage_mode == 'light':
# Delete heavy data (it will be computed again if required)
# (it is required only when MSE is wanted in self.predict)
if self.verbose:
print("Light storage mode specified. "
"Flushing autocorrelation matrix...")
self.D = None
self.ij = None
self.F = None
self.C = None
self.Ft = None
self.G = None
return self
def predict(self, X, eval_MSE=False, batch_size=None):
"""
This function evaluates the Gaussian Process model at x.
Parameters
----------
X : array_like
An array with shape (n_eval, n_features) giving the point(s) at
which the prediction(s) should be made.
eval_MSE : boolean, optional
A boolean specifying whether the Mean Squared Error should be
evaluated or not.
Default assumes evalMSE = False and evaluates only the BLUP (mean
prediction).
batch_size : integer, optional
An integer giving the maximum number of points that can be
evaluated simultaneously (depending on the available memory).
Default is None so that all given points are evaluated at the same
time.
Returns
-------
y : array_like, shape (n_samples, ) or (n_samples, n_targets)
An array with shape (n_eval, ) if the Gaussian Process was trained
on an array of shape (n_samples, ) or an array with shape
(n_eval, n_targets) if the Gaussian Process was trained on an array
of shape (n_samples, n_targets) with the Best Linear Unbiased
Prediction at x.
MSE : array_like, optional (if eval_MSE == True)
An array with shape (n_eval, ) or (n_eval, n_targets) as with y,
with the Mean Squared Error at x.
"""
check_is_fitted(self, "X")
# Check input shapes
X = check_array(X)
n_eval, _ = X.shape
n_samples, n_features = self.X.shape
n_samples_y, n_targets = self.y.shape
# Run input checks
self._check_params(n_samples)
if X.shape[1] != n_features:
raise ValueError(("The number of features in X (X.shape[1] = %d) "
"should match the number of features used "
"for fit() "
"which is %d.") % (X.shape[1], n_features))
if batch_size is None:
# No memory management
# (evaluates all given points in a single batch run)
# Normalize input
X = (X - self.X_mean) / self.X_std
# Initialize output
y = np.zeros(n_eval)
if eval_MSE:
MSE = np.zeros(n_eval)
# Get pairwise componentwise L1-distances to the input training set
dx = manhattan_distances(X, Y=self.X, sum_over_features=False)
# Get regression function and correlation
f = self.regr(X)
r = self.corr(self.theta_, dx).reshape(n_eval, n_samples)
# Scaled predictor
y_ = np.dot(f, self.beta) + np.dot(r, self.gamma)
# Predictor
y = (self.y_mean + self.y_std * y_).reshape(n_eval, n_targets)
if self.y_ndim_ == 1:
y = y.ravel()
# Mean Squared Error
if eval_MSE:
C = self.C
if C is None:
# Light storage mode (need to recompute C, F, Ft and G)
if self.verbose:
print("This GaussianProcess used 'light' storage mode "
"at instantiation. Need to recompute "
"autocorrelation matrix...")
reduced_likelihood_function_value, par = \
self.reduced_likelihood_function()
self.C = par['C']
self.Ft = par['Ft']
self.G = par['G']
rt = linalg.solve_triangular(self.C, r.T, lower=True)
if self.beta0 is None:
# Universal Kriging
u = linalg.solve_triangular(self.G.T,
np.dot(self.Ft.T, rt) - f.T,
lower=True)
else:
# Ordinary Kriging
u = np.zeros((n_targets, n_eval))
MSE = np.dot(self.sigma2.reshape(n_targets, 1),
(1. - (rt ** 2.).sum(axis=0)
+ (u ** 2.).sum(axis=0))[np.newaxis, :])
MSE = np.sqrt((MSE ** 2.).sum(axis=0) / n_targets)
# Mean Squared Error might be slightly negative depending on
# machine precision: force to zero!
MSE[MSE < 0.] = 0.
if self.y_ndim_ == 1:
MSE = MSE.ravel()
return y, MSE
else:
return y
else:
# Memory management
if type(batch_size) is not int or batch_size <= 0:
raise Exception("batch_size must be a positive integer")
if eval_MSE:
y, MSE = np.zeros(n_eval), np.zeros(n_eval)
for k in range(max(1, n_eval / batch_size)):
batch_from = k * batch_size
batch_to = min([(k + 1) * batch_size + 1, n_eval + 1])
y[batch_from:batch_to], MSE[batch_from:batch_to] = \
self.predict(X[batch_from:batch_to],
eval_MSE=eval_MSE, batch_size=None)
return y, MSE
else:
y = np.zeros(n_eval)
for k in range(max(1, n_eval / batch_size)):
batch_from = k * batch_size
batch_to = min([(k + 1) * batch_size + 1, n_eval + 1])
y[batch_from:batch_to] = \
self.predict(X[batch_from:batch_to],
eval_MSE=eval_MSE, batch_size=None)
return y
def reduced_likelihood_function(self, theta=None):
"""
This function determines the BLUP parameters and evaluates the reduced
likelihood function for the given autocorrelation parameters theta.
Maximizing this function wrt the autocorrelation parameters theta is
equivalent to maximizing the likelihood of the assumed joint Gaussian
distribution of the observations y evaluated onto the design of
experiments X.
Parameters
----------
theta : array_like, optional
An array containing the autocorrelation parameters at which the
Gaussian Process model parameters should be determined.
Default uses the built-in autocorrelation parameters
(ie ``theta = self.theta_``).
Returns
-------
reduced_likelihood_function_value : double
The value of the reduced likelihood function associated to the
given autocorrelation parameters theta.
par : dict
A dictionary containing the requested Gaussian Process model
parameters:
sigma2
Gaussian Process variance.
beta
Generalized least-squares regression weights for
Universal Kriging or given beta0 for Ordinary
Kriging.
gamma
Gaussian Process weights.
C
Cholesky decomposition of the correlation matrix [R].
Ft
Solution of the linear equation system : [R] x Ft = F
G
QR decomposition of the matrix Ft.
"""
check_is_fitted(self, "X")
if theta is None:
# Use built-in autocorrelation parameters
theta = self.theta_
# Initialize output
reduced_likelihood_function_value = - np.inf
par = {}
# Retrieve data
n_samples = self.X.shape[0]
D = self.D
ij = self.ij
F = self.F
if D is None:
# Light storage mode (need to recompute D, ij and F)
D, ij = l1_cross_distances(self.X)
if (np.min(np.sum(D, axis=1)) == 0.
and self.corr != correlation.pure_nugget):
raise Exception("Multiple X are not allowed")
F = self.regr(self.X)
# Set up R
r = self.corr(theta, D)
R = np.eye(n_samples) * (1. + self.nugget)
R[ij[:, 0], ij[:, 1]] = r
R[ij[:, 1], ij[:, 0]] = r
# Cholesky decomposition of R
try:
C = linalg.cholesky(R, lower=True)
except linalg.LinAlgError:
return reduced_likelihood_function_value, par
# Get generalized least squares solution
Ft = linalg.solve_triangular(C, F, lower=True)
try:
Q, G = linalg.qr(Ft, econ=True)
except:
#/usr/lib/python2.6/dist-packages/scipy/linalg/decomp.py:1177:
# DeprecationWarning: qr econ argument will be removed after scipy
# 0.7. The economy transform will then be available through the
# mode='economic' argument.
Q, G = linalg.qr(Ft, mode='economic')
pass
sv = linalg.svd(G, compute_uv=False)
rcondG = sv[-1] / sv[0]
if rcondG < 1e-10:
# Check F
sv = linalg.svd(F, compute_uv=False)
condF = sv[0] / sv[-1]
if condF > 1e15:
raise Exception("F is too ill conditioned. Poor combination "
"of regression model and observations.")
else:
# Ft is too ill conditioned, get out (try different theta)
return reduced_likelihood_function_value, par
Yt = linalg.solve_triangular(C, self.y, lower=True)
if self.beta0 is None:
# Universal Kriging
beta = linalg.solve_triangular(G, np.dot(Q.T, Yt))
else:
# Ordinary Kriging
beta = np.array(self.beta0)
rho = Yt - np.dot(Ft, beta)
sigma2 = (rho ** 2.).sum(axis=0) / n_samples
# The determinant of R is equal to the squared product of the diagonal
# elements of its Cholesky decomposition C
detR = (np.diag(C) ** (2. / n_samples)).prod()
# Compute/Organize output
reduced_likelihood_function_value = - sigma2.sum() * detR
par['sigma2'] = sigma2 * self.y_std ** 2.
par['beta'] = beta
par['gamma'] = linalg.solve_triangular(C.T, rho)
par['C'] = C
par['Ft'] = Ft
par['G'] = G
return reduced_likelihood_function_value, par
def _arg_max_reduced_likelihood_function(self):
"""
This function estimates the autocorrelation parameters theta as the
maximizer of the reduced likelihood function.
(Minimization of the opposite reduced likelihood function is used for
convenience)
Parameters
----------
self : All parameters are stored in the Gaussian Process model object.
Returns
-------
optimal_theta : array_like
The best set of autocorrelation parameters (the sought maximizer of
the reduced likelihood function).
optimal_reduced_likelihood_function_value : double
The optimal reduced likelihood function value.
optimal_par : dict
The BLUP parameters associated to thetaOpt.
"""
# Initialize output
best_optimal_theta = []
best_optimal_rlf_value = []
best_optimal_par = []
if self.verbose:
print("The chosen optimizer is: " + str(self.optimizer))
if self.random_start > 1:
print(str(self.random_start) + " random starts are required.")
percent_completed = 0.
# Force optimizer to fmin_cobyla if the model is meant to be isotropic
if self.optimizer == 'Welch' and self.theta0.size == 1:
self.optimizer = 'fmin_cobyla'
if self.optimizer == 'fmin_cobyla':
def minus_reduced_likelihood_function(log10t):
return - self.reduced_likelihood_function(
theta=10. ** log10t)[0]
constraints = []
for i in range(self.theta0.size):
constraints.append(lambda log10t, i=i:
log10t[i] - np.log10(self.thetaL[0, i]))
constraints.append(lambda log10t, i=i:
np.log10(self.thetaU[0, i]) - log10t[i])
for k in range(self.random_start):
if k == 0:
# Use specified starting point as first guess
theta0 = self.theta0
else:
# Generate a random starting point log10-uniformly
# distributed between bounds
log10theta0 = (np.log10(self.thetaL)
+ self.random_state.rand(*self.theta0.shape)
* np.log10(self.thetaU / self.thetaL))
theta0 = 10. ** log10theta0
# Run Cobyla
try:
log10_optimal_theta = \
optimize.fmin_cobyla(minus_reduced_likelihood_function,
np.log10(theta0).ravel(), constraints,
iprint=0)
except ValueError as ve:
print("Optimization failed. Try increasing the ``nugget``")
raise ve
optimal_theta = 10. ** log10_optimal_theta
optimal_rlf_value, optimal_par = \
self.reduced_likelihood_function(theta=optimal_theta)
# Compare the new optimizer to the best previous one
if k > 0:
if optimal_rlf_value > best_optimal_rlf_value:
best_optimal_rlf_value = optimal_rlf_value
best_optimal_par = optimal_par
best_optimal_theta = optimal_theta
else:
best_optimal_rlf_value = optimal_rlf_value
best_optimal_par = optimal_par
best_optimal_theta = optimal_theta
if self.verbose and self.random_start > 1:
if (20 * k) / self.random_start > percent_completed:
percent_completed = (20 * k) / self.random_start
print("%s completed" % (5 * percent_completed))
optimal_rlf_value = best_optimal_rlf_value
optimal_par = best_optimal_par
optimal_theta = best_optimal_theta
elif self.optimizer == 'Welch':
# Backup of the given atrributes
theta0, thetaL, thetaU = self.theta0, self.thetaL, self.thetaU
corr = self.corr
verbose = self.verbose
# This will iterate over fmin_cobyla optimizer
self.optimizer = 'fmin_cobyla'
self.verbose = False
# Initialize under isotropy assumption
if verbose:
print("Initialize under isotropy assumption...")
self.theta0 = check_array(self.theta0.min())
self.thetaL = check_array(self.thetaL.min())
self.thetaU = check_array(self.thetaU.max())
theta_iso, optimal_rlf_value_iso, par_iso = \
self._arg_max_reduced_likelihood_function()
optimal_theta = theta_iso + np.zeros(theta0.shape)
# Iterate over all dimensions of theta allowing for anisotropy
if verbose:
print("Now improving allowing for anisotropy...")
for i in self.random_state.permutation(theta0.size):
if verbose:
print("Proceeding along dimension %d..." % (i + 1))
self.theta0 = check_array(theta_iso)
self.thetaL = check_array(thetaL[0, i])
self.thetaU = check_array(thetaU[0, i])
def corr_cut(t, d):
return corr(check_array(np.hstack([optimal_theta[0][0:i],
t[0],
optimal_theta[0][(i +
1)::]])),
d)
self.corr = corr_cut
optimal_theta[0, i], optimal_rlf_value, optimal_par = \
self._arg_max_reduced_likelihood_function()
# Restore the given atrributes
self.theta0, self.thetaL, self.thetaU = theta0, thetaL, thetaU
self.corr = corr
self.optimizer = 'Welch'
self.verbose = verbose
else:
raise NotImplementedError("This optimizer ('%s') is not "
"implemented yet. Please contribute!"
% self.optimizer)
return optimal_theta, optimal_rlf_value, optimal_par
def _check_params(self, n_samples=None):
# Check regression model
if not callable(self.regr):
if self.regr in self._regression_types:
self.regr = self._regression_types[self.regr]
else:
raise ValueError("regr should be one of %s or callable, "
"%s was given."
% (self._regression_types.keys(), self.regr))
# Check regression weights if given (Ordinary Kriging)
if self.beta0 is not None:
self.beta0 = check_array(self.beta0)
if self.beta0.shape[1] != 1:
# Force to column vector
self.beta0 = self.beta0.T
# Check correlation model
if not callable(self.corr):
if self.corr in self._correlation_types:
self.corr = self._correlation_types[self.corr]
else:
raise ValueError("corr should be one of %s or callable, "
"%s was given."
% (self._correlation_types.keys(), self.corr))
# Check storage mode
if self.storage_mode != 'full' and self.storage_mode != 'light':
raise ValueError("Storage mode should either be 'full' or "
"'light', %s was given." % self.storage_mode)
# Check correlation parameters
self.theta0 = check_array(self.theta0)
lth = self.theta0.size
if self.thetaL is not None and self.thetaU is not None:
self.thetaL = check_array(self.thetaL)
self.thetaU = check_array(self.thetaU)
if self.thetaL.size != lth or self.thetaU.size != lth:
raise ValueError("theta0, thetaL and thetaU must have the "
"same length.")
if np.any(self.thetaL <= 0) or np.any(self.thetaU < self.thetaL):
raise ValueError("The bounds must satisfy O < thetaL <= "
"thetaU.")
elif self.thetaL is None and self.thetaU is None:
if np.any(self.theta0 <= 0):
raise ValueError("theta0 must be strictly positive.")
elif self.thetaL is None or self.thetaU is None:
raise ValueError("thetaL and thetaU should either be both or "
"neither specified.")
# Force verbose type to bool
self.verbose = bool(self.verbose)
# Force normalize type to bool
self.normalize = bool(self.normalize)
# Check nugget value
self.nugget = np.asarray(self.nugget)
if np.any(self.nugget) < 0.:
raise ValueError("nugget must be positive or zero.")
if (n_samples is not None
and self.nugget.shape not in [(), (n_samples,)]):
raise ValueError("nugget must be either a scalar "
"or array of length n_samples.")
# Check optimizer
if self.optimizer not in self._optimizer_types:
raise ValueError("optimizer should be one of %s"
% self._optimizer_types)
# Force random_start type to int
self.random_start = int(self.random_start)
|
bsd-3-clause
|
Pabsm94/HyperPlume
|
src/AEM/AEM_plume.py
|
1
|
44467
|
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 25 19:12:18 2017
@author: pablo
"""
import os
dir_path = os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
import sys
sys.path.append(dir_path) #change to src
from src import np,math,interp1d,Hyperplume,griddata,interp2d,plt
import matplotlib.lines as mlines
class AEM(Hyperplume):
"""Asymptotic Expansion Model of a plasma plume expansion.Class AEM inherits methods from
parent class Hyperplume, and particularizes them.All initial inputs must be given in dimensional form.
"""
def __init__(self,plasma={'Electrons': {'Gamma': 1,'T_0_electron': 2.1801714e-19,'q_electron': -1.6e-19},'Ions': {'mass_ion': 2.1801714e-25, 'q_ion': 1.6e-19}},z_span=np.linspace(0,100,500),r_span=np.linspace(0,40,500),n_init=np.linspace(1,100,500),uz_init=np.linspace(1,100,500),ur_init=np.linspace(0,100,500),sol_order=0):
""" Class method __init__ is used as class constructor. Calls parent class Hyperplume constructor method __init__ to
store main plasma properties as attributes in the class.
Args:
plasma (dict): simple_plasma object dictionary containing basic plasma parameters.
z_span (numpy.ndarray): axial region where the problem will be integrated.
r_span (numpy.ndarray): initial far-field plasma radial profile.
n_init (numpy.ndarray): initial far-field plum density front.
uz_init (numpy.ndarray): initial far-region plume axial velocity profile
ur_init (numpy.ndarray): initial fr-region plume radial velocity profile
sol_order (int): Integer defining the AEM correction order for the plume integration.
-0: AEM "cold beam" zeroth order solution
-1: AEM first order correction
-2: Second Order Correction
Usage:
>>>>>> e_charge,ion_mass,Plasma_temp,gamma_value=1.6e-19,2.1801714e-25,2.1801714e-19,1 #Main Plasma Parameters
>>> Plasma = Hyperplume().simple_plasma(e_charge,ion_mass,Plasma_temp,gamma_value) #Loading Plasma dict
>>> z_span = np.linspace(0,110,5000) # Axial plume grid for integration
>>> r_0 = np.linspace(0,10,5000) #Initial plume radial profile
>>> n0 = np.exp(-6.15/2 * r_0**2) #Initial far-field plume density
>>> uz0,,ur0 = np.linspace(20000,20000,100),np.linspace(0,40000,100) #Intial far-field plume axial and radial velocity fronts
>>> AEM_Order = 2 # AEM model solution
>>> PlumeAEM = AEM(Plasma,z_span,eta_0,n0,uz0,ur0,AEM_Order) #Creation of AEM plume
Other important class attributes loaded in the AEM constructor are:
d0 (numpy.ndarray): far field initial divergence ur0/uz0.
d0p (numpy.ndarray): derivative of plume initial divergence
eps (float): AEM expansion parameter 1/M_{0}^{2}
uz0p (numpy.ndarray): derivative of initial far region axial velocity
duz0p (numpy.ndarray): derivative of initial far region radial velocity
z_grid,r_grid (numpy.ndarray): Plume grids where AEM problem is integrated
To access these attributes, for instance:
>>> print(PlumeAEM.d0)
>>> print(PlumeAEM.eps)
"""
#Call parent class Hyperplume constructor method to store main plasma properties as attributes in the AEM class."""
super(AEM,self).__init__(plasma,z_span,r_span,n_init)
self.uz0,self.ur0,self.d0 = uz_init,ur_init,ur_init/uz_init #Load additional AEM plasma plume properties
self.alpha0=math.degrees(math.atan(interp1d(self.eta,self.d0)(1))) #Initial Plume divergence at the 95% streamline
self.order = sol_order #AEM Solution Order
self.eps = self.Gamma*self.T_0/(self.m_ion*(uz_init[0]**2 + ur_init[0]**2)) #residual expansion parameter. Inverse of squared Mach Number
self.M0 = np.sqrt(1/self.eps) #Plume Mach Nmeber
"Derivatives of initial front"
self.d0p = self.eta_deriver(self.eta,self.d0) #derivative of plume divergence
self.d0p[0],self.d0p[-1] = self.d0[1]/self.eta[1],self.d0p[-2] + (self.d0p[-2] - self.d0p[-3]) #Edge vetor prime conditions
self.uz0p = self.eta_deriver(self.eta,self.uz0) #derivative of initial axial velocity
self.uz0p[0],self.uz0p[-1] = 0,self.uz0p[-2] + (self.uz0p[-2] - self.uz0p[-3])
self.duz0p = self.eta_deriver(self.eta,self.d0*self.uz0) #derivatie of initial radial velocity
self.duz0p[0],self.duz0p[-1] = self.duz0p[1]/self.eta[1],self.duz0p[-2] + (self.duz0p[-2] - self.duz0p[-3])
"Grid Set Up"
self.z_grid,self.r_grid = self.grid_setup(self.z_span.size,self.eta.size) #2D grids of z, and r points in the plume
def solver(self):
""" Class method Solver integrates the AEM model equations in the specified plume grid. The method stores the different order
plasma properties in matrixes of size (mxn), where m,n are the number of z,r points,respectively.Porperties such as
density,temperature,electric field,etc are calculated and saved as attributes of the class in this matrix form.
Usage:
>>> PlumeAEM = AEM(Plasma,z_span,eta_0,n0,uz0,ur0,AEM_Order) #Creation of AEM plume
>>> PlumeAEM.solver() # be sure to create a valid AEM plasma plume before applying the plume solver method
Main Plume properties solved and saved by the method as class attributes:
lnn (numpy.ndarray): 3-D matrix containing density values (logarithmic) for the three possible AEM solution orders.
uz (numpy.ndarray): 3-D matrix containing axial velocity values for the three possible AEM solution orders.
ur (numpy.ndarray): 3-D matrix containing radial velocity values for the three possible AEM solution orders.
T (numpy.ndarray): 3-D matrix containing plasma Temperature values for the three possible AEM solution orders.
phi (numpy.ndarray):3-D matrix containing plasma ambipolar electric field for the three possible AEM solution orders.
div (numpy.ndarray): 3-D matrix containing plume divergence values for the three possible AEM solution orders.
eta_ (int,numpy.ndarray): 3-D matrix containing ion current streamlines for the three possible AEM solution orders.
To access these varibles,for instance:
>>> PlumeAEM.lnn[0,:,:] #density values for Cold Beam Zeroth Order AEM solution of plume expansion in the grid
>>> PlumeAEM.uz[1,:,:] # axial velocity First Order AEM solution
>>> PlumeAEM.T[2,:,:] ## Temperature values for Second Order AEM solution
"""
self.__zpts = np.shape(self.z_grid)[1] #Number of axial steps
self.__epts = np.shape(self.r_grid)[0] #Number of radial steps
self.uz = np.zeros((3,self.__epts,self.__zpts)) #3D matrix containing the axial velocity solutions for zeroth,first and second order
self.ur = np.zeros((3,self.__epts,self.__zpts)) #3D matrix containing the axial velocity solutions for zeroth,first and second order
self.div = np.zeros((3,self.__epts,self.__zpts)) #3D matrix containing the plume divergence solutions for zeroth,first and second order
self.lnn = np.zeros((3,self.__epts,self.__zpts)) #3D matrix containing the natural logarothm of density solutions for zeroth,first and second order
self.T = np.zeros((3,self.__epts,self.__zpts)) #3D matrix containing the plasma temperature solutions for zeroth,first and second order
self.phi = np.zeros((3,self.__epts,self.__zpts)) #3D matrix containing the electric potential solutions for zeroth,first and second order
self.eta_ = np.zeros((3,self.__epts,self.__zpts)) #3D matrix containing the eta lines for zeroth,first and second order
"""COMPUTE COLD PLASMA BEAM (ZEROTH ORDER) SOLUTION"""
self.uz_0 = np.zeros((self.__epts, self.__zpts)) #matrix containing cold plasma axial velocity
self.ur_0 = np.zeros((self.__epts, self.__zpts)) #matrix containing zeroth order radial velocity
n_0 = np.zeros((self.__epts, self.__zpts)) #matrix containing zeroth order plasma density
self.lnn_0 = np.zeros((self.__epts, self.__zpts)) #matrix containing natural logarithm of density
"""ADVANCES IN AXIAL DIRECTION"""
for i in range(0,self.__zpts):
"Calculation of properties at plume axis r = 0"
z = self.z_grid[0,i] # Step in z-grid
n_0[0,i] = self.n0[0] / (1 + self.d0p[0] * z)**2 #COMPUTATION OF DENSITY WHEN r0 --> 0, ON THE AXIS.Apply L'Hopital to equation 3.2 in paper"
self.uz_0[0,i] = self.uz0[0] #Axial velocity at axis is kept constant and equal to origin velocity
self.ur_0[0,i] = 0 #No radial velocity at axis
for j in range(1,self.__epts):
"ADVANCE IN THE RADIAL DIRECTION"
r0 = self.eta[j] #Updating radial coordinate
self.uz_0[j,i] = self.uz0[j] #Updating axial velocity
self.ur_0[j,i] = self.uz0[j]*self.d0[j] #updating radial velocity
"COMPUTATION OF DENSITY"
n_0[j,i] = self.n0[j] / ((1 + self.d0p[j] * z) * (1 + self.d0[j] * z / r0 )) #updating density
"Updating plume variable arrays with Zeroth-Order plasma plume reults"
self.lnn_0[:,:] = np.log(n_0[:,:])
self.uz[0,:,:] = self.uz_0
self.ur[0,:,:] = self.ur_0
self.lnn[0,:,:] = self.lnn_0
self.div[0,:,:] = self.ur[0,:,:]/ self.uz[0,:,:]
self.T[0,:,:] = super(AEM,self).temp(n_0[:,:],self.n0[0],self.T_0,self.Gamma) #Calling parent class method Hyperplume.temp() to calculate plume cold beam temperature based on density
self.phi[0,:,:] = super(AEM,self).phi(n_0[:,:],self.n0[0],self.T_0,self.Gamma,self.q_ion) #Calling parent class method Hyperplume.phi() to calculate plume cold beam potential based on density
self.eta_[0,:,:] = self.r_grid-self.div[0,:,:]*self.z_grid #Zeroth order eta-lines based on theory (paper equation no.)
"""COMPUTE FIRST ORDER CORRECTION OF PLUME VARIABLES"""
zed = np.zeros((self.__zpts,1)) #array of axial steps
zed[:,0] = self.z_grid[0,:] #redimensioning
zstep = zed[1]-zed[0] #integer measuing the axial step of the grid.Assumes linearly spaced z points
dlnn_dz_0, dlnn_dr_0 = self.partial_derivs(self.lnn_0, 0) #GET REQUIRED DERIVATIVES OF ZEROTH ORDER DENSITY (LOGARITHMIC) ALONG GRID POINTS
"COMPUTE FIRST ORDER CONTRIBUTION OF VELOCITIES. EULER METHOD APPLIED TO ALL STREAMLINES"
self.uz_1 = np.zeros((self.__epts,self.__zpts)) #matrix containing first order axial velocity
self.ur_1 = np.zeros((self.__epts,self.__zpts)) #matrix containing first order radial velocity
n1 = np.zeros((self.__epts,self.__zpts)) #matrix containing first order radial velocity
for i in range(1,self.__zpts):
"Computation of the first order velocity perturbation along one streamline. Euler method applied to all the streamlines at the same time"
self.uz_1[:,i] = self.uz_1[:,i-1] + zstep * self.uz0[0]**2 / self.uz0 * ( 1/self.uz0[0]**2 * self.uz0p / (1+self.d0p*zed[i-1]) * (self.d0 * self.uz_1[:,i-1] - self.ur_1[:,i-1]) - (n_0[:,i-1]/self.n0[0]) ** (self.Gamma-1) * dlnn_dz_0[:,i-1] )
self.ur_1[:,i] = self.ur_1[:,i-1] + zstep * self.uz0[0]**2 / self.uz0 * ( 1/self.uz0[0]**2 * self.duz0p / (1+self.d0p*zed[i-1]) * (self.d0 * self.uz_1[:,i-1] - self.ur_1[:,i-1]) - (n_0[:,i-1]/self.n0[0]) ** (self.Gamma-1) * dlnn_dr_0[:,i-1] )
"GET REQUIRED VECTORS AND DERIVATIVES TO BE USED IN FISRT ORDER DENSITY CORRECTION"
rur_1 = self.ur_1 * self.r_grid
_,drur1_dr = self.partial_derivs(rur_1,0)
duz1_dz,_ = self.partial_derivs(self.uz_1,0)
"""GET FIRST ORDER CONTRIBUTION OF THE DENSITY LOGARITHIM. EULER METHOD APPLIED TO ALL STREAMLINES.tHE FUNCTION INTEGRATES THE
FIRST ORDER DENSTY PERTURBATION ALONG STREAMLINE"""
self.lnn_1 = np.zeros((self.__epts,self.__zpts)) #matrix containing first order density correction(logarithmic)
"Compute limit of 1/r*drur1/dr for r-->0 as second order derivative of r*ur1.Apply L'Hopital"
limit = np.zeros((1,self.__zpts)) #array containing 1/r(0) * drur1(0)/dr (at the axis of plume)
_,limit = self.partial_derivs(drur1_dr,0)
for i in range(1,self.__zpts):
"COMPUTE DENSITY FIRST ORDER SOLUTION FOR THE AXIS STREAMLINE"
self.lnn_1[0,i] = self.lnn_1[0,i-1] + zstep * 1 / self.uz0[0] *( - self.uz_1[0,i-1] * dlnn_dz_0[0,i-1] - self.ur_1[0,i-1] * dlnn_dr_0[0,i-1] - duz1_dz[0,i-1] - limit[1,i-1] )
for i in range(1,self.__zpts):
"COMPUTE DENSITY FIRST ORDER SOLUTION FOR ALL REMAINING STREAMLINES"
self.lnn_1[1:,i] = self.lnn_1[1:,i-1] + zstep * 1 / self.uz0[1:] *( - self.uz_1[1:,i-1] * dlnn_dz_0[1:,i-1] - self.ur_1[1:,i-1] * dlnn_dr_0[1:,i-1] - duz1_dz[1:,i-1] - 1 / self.r_grid[1:,i-1] * drur1_dr[1:,i-1])
"""Updating plume variable arrays with First Order Correction plasma plume reults"""
self.uz[1,:,:] = self.uz[0,:,:] + self.uz_1*self.eps
self.ur[1,:,:] = self.ur[0,:,:] + self.ur_1*self.eps
self.lnn[1,:,:] = self.lnn[0,:,:] + self.lnn_1*self.eps
n1[:,:] = np.exp(self.lnn[1,:,:])
self.div[1,:,:] = self.ur[1,:,:]/ self.uz[1,:,:]
self.T [1,:,:] = super(AEM,self).temp(n1[:,:],self.n0[0],self.T_0,self.Gamma) #Calling parent class method Hyperplume.temp() to calculate plume second order solution temperature based on density
self.phi[1,:,:] = super(AEM,self).phi(n1[:,:],self.n0[0],self.T_0,self.Gamma,self.q_ion) #Calling parent class method Hyperplume.phi() to calculate plume second order solution temperature based on density
self.eta_[1,:,:] = self.r_grid-self.div[1,:,:]*self.z_grid #PABLO20170426this is clearly wrong. But why??, corrections are integrable along zeroth order streamlines as said in article.Matlab code calls function orbit2d.m , which I cannot understand
"""COMPUTE SECOND ORDER SOLUTION"""
dlnn_dz_0, dlnn_dr_0 = self.partial_derivs(self.lnn_0, 0) #COMPUTE REQUIRED VECTORS AND DERIVATIVES TO BE USED IN SECOND ORDER CORRECTION CALCULATIONS
dlnn_dz_1, dlnn_dr_1 = self.partial_derivs(self.lnn_1, 0)
duz_dz_1, duz_dr_1 = self.partial_derivs(self.uz_1, 0)
dur_dz_1, dur_dr_1 = self.partial_derivs(self.ur_1, -1)
self.uz_2 = np.zeros((self.__epts,self.__zpts)) #matrix containing second order axial velocity
self.ur_2 = np.zeros((self.__epts,self.__zpts)) #matrix containing second order radial velocity
n2 = np.zeros((self.__epts,self.__zpts)) #matrix containing second order radial velocity
for i in range(1,self.__zpts):
"Computation of the second order velocity perturbation along one streamline. Euler method applied to all the streamlines at the same time"
self.uz_2[:,i] = self.uz_2[:,i-1] + zstep * 1 / self.uz0 *( self.uz0p / ( 1 + self.d0p*zed[i-1] )*( self.d0* self.uz_2[:,i-1] - self.ur_2[:,i-1] )-self.uz_1[:,i-1] * duz_dz_1[:,i-1] - self.ur_1[:,i-1] * duz_dr_1[:,i-1]- self.uz0[0]**2*((n_0[:,i-1]/self.n0[0])**(self.Gamma-1) * ( (self.Gamma-1)*self.lnn_1[:,i-1] * dlnn_dz_0[:,i-1] + dlnn_dz_1[:,i-1] ) ))
self.ur_2[:,i] = self.ur_2[:,i-1] + zstep * 1 / self.uz0 * ( self.duz0p / ( 1 + self.d0p*zed[i-1] )*( self.d0 * self.uz_2[:,i-1] - self.ur_2[:,i-1] )-self.uz_1[:,i-1] * dur_dz_1[:,i-1] - self.ur_1[:,i-1] * dur_dr_1[:,i-1]- self.uz0[0]**2*((n_0[:,i-1]/self.n0[0])**(self.Gamma-1) * ( (self.Gamma-1)*self.lnn_1[:,i-1] * dlnn_dr_0[:,i-1] + dlnn_dr_1[:,i-1] ) ))
"GET REQUIRED VECTORS AND DERIVATIVES TO BE USED IN FISRT ORDER DENSITY CORRECTION"
rur_2 = self.ur_2 * self.r_grid
_,drur2_dr = self.partial_derivs(rur_2,0)
duz2_dz,_ = self.partial_derivs(self.uz_2,0)
"""GET SECOND ORDER CONTRIBUTION OF THE DENSITY LOGARITHIM. EULER METHOD APPLIED TO ALL STREAMLINES.tHE FUNCTION INTEGRATES THE
FIRST ORDER DENSTY PERTURBATION ALONG STREAMLINE"""
self.lnn_2 = np.zeros((self.__epts,self.__zpts)) #matrix containing first order density (logarithmic)
"Compute limit of 1/r*drur2/dr for r-->0 as second order derivative of r*ur2.Apply L'Hopital"
limit = np.zeros((1,self.__zpts)) #array containing 1/r(0) * drur1(0)/dr (at the axis of plume)
_,limit = self.partial_derivs(drur2_dr,0)
for i in range(1,self.__zpts):
"COMPUTE SOLUTION FOR THE AXIS STREAMLINE"
self.lnn_2[0,i] = self.lnn_2[0,i-1] + zstep * 1 / self.uz0[0] *( - self.uz_2[0,i-1] * dlnn_dz_0[0,i-1] - self.ur_2[0,i-1] * dlnn_dr_0[0,i-1] - duz2_dz[0,i-1] - limit[0,i-1] - self.uz_1[0,i-1]*dlnn_dz_1[0,i-1] - self.ur_1[0,i-1]*dlnn_dr_1[0,i-1] )
for i in range(1,self.__zpts):
"COMPUTE SOLUTION FOR ALL REMAINING STREAMLINES"
self.lnn_2[1:,i] = self.lnn_2[1:,i-1] + zstep * 1 / self.uz0[1:] * (-self.uz_2[1:,i-1] * dlnn_dz_0[1:,i-1] - self.ur_2[1:,i-1] * dlnn_dr_0[1:,i-1] - duz2_dz[1:,i-1]- 1/self.r_grid[1:,i-1] * drur2_dr[1:,i-1]- self.uz_1[1:,i-1]*dlnn_dz_1[1:,i-1] - self.ur_1[1:,i-1]*dlnn_dr_1[1:,i-1] )
"Updating plume variable arrays with Second-Order plasma plume reults"
self.uz[2,:,:] = self.uz[1,:,:] + self.uz_2*self.eps**2
self.ur[2,:,:] = self.ur[1,:,:] + self.ur_2*self.eps**2
self.lnn[2,:,:] = self.lnn[1,:,:] + self.lnn_2*self.eps**2
n2[:,:] = np.exp(self.lnn[2,:,:])
self.div[2,:,:] = self.ur[2,:,:]/ self.uz[2,:,:]
self.T [2,:,:] = super(AEM,self).temp(n2[:,:],self.n0[0],self.T_0,self.Gamma) #Calling parent class method Hyperplume.temp() to calculate plume second order solution temperature based on density
self.phi[2,:,:] = super(AEM,self).phi(n2[:,:],self.n0[0],self.T_0,self.Gamma,self.q_ion) #Calling parent class method Hyperplume.phi() to calculate plume second order solution temperature based on density
self.eta_[2,:,:] = self.r_grid-self.div[2,:,:]*self.z_grid #PABLO20170426 This is clearly wrong. But why?? corrections are integrable along zeroth order streamlines as said in article.Matlab code calls function orbit2d.m , which I dont understand. Could you explain?
def marching_solver(self,nsects):
"""Marching schem AEM plume solver.AEM Class method marching_solver solves extends the AEM solution downstream by
reinitializing the method at each new calculated plasma plume front, (r0_front,z0_front,uz0_front,n0_front)
preventing excessive error growth in the calculations and widening the convergence region of thr AEM model.
Marching_solver method reinitializes the plume initial parameter, with the values calculated in the previous
integration step, as many times as indicated by the user in nsects. It then solves the plume expansion incrementally
by callling the solver method multiple times.
Args:
nsects (int): number of axial sections or steps (plume fronts), where solver reinitializes the
model and integrates the solution agin.
Usage:
>>> PlumeAEM = AEM(Plasma,z_span,eta_0,n0,uz0,ur0,AEM_Order) #Creation of AEM plume
>>> Plume.marching_solver(nsects=100)
Same Plasma attributes from standard solver can be accessed in Method marching_solver, but in this case the method stores only the
ith higher order correction specified by the user at plume creation with the input argument sol_order:
lnn (numpy.ndarray): matrix containing density values (logarithmic) for the selected AEM solution order.
uz (numpy.ndarray): matrix containing axial velocity values for the selected AEM solution order.
ur (numpy.ndarray): matrix containing radial velocity values for the selected AEM solution order.
T (numpy.ndarray): matrix containing plasma Temperature values for the selected AEM solution order.
phi (numpy.ndarray): matrix containing plasma ambipolar electric field for the selected AEM solution order.
div (numpy.ndarray): matrix containing plume divergence values for the selected AEM solution order.
To access these properties, for instance:
>>> PlumeAEM.lnn # density values for AEM ith order solution of plume expansion in the grid
>>> PlumeAEM.uz # axial velocity for AEM ith order solution
>>> PlumeAEM.T # Temperature values for AEM ith order solution
"""
z_grid_ = np.zeros((self.eta.size,self.z_span.size)) #2D matrix containing the z grid points in marching mode
r_grid_ = np.zeros((self.eta.size,self.z_span.size)) #2D matrix containing the r grid points in marching mode
lnn_ = np.zeros((self.eta.size,self.z_span.size)) #2D matrix containing the density (natural logarithm) in marching mode
uz_ = np.zeros((self.eta.size,self.z_span.size)) #2D matrix containing the plume axial velocity in marching mode
ur_ = np.zeros((self.eta.size,self.z_span.size)) #2D matrix containing the plume radial velocity in marching mode
div_ = np.zeros((self.eta.size,self.z_span.size)) #2D matrix containing the plume divergence in marching mode
#T_ = np.zeros((self.eta.size,self.z_span.size)) #2D matrix containing theplume temperature in marching mode
#phi_ = np.zeros((self.eta.size,self.z_span.size)) #2D matrix containing the electric potential in marching mode
zsects_val = np.linspace(self.z_span.max()/nsects,self.z_span.max(),nsects) #z_grid points values at the end of each section/front in maching mode
"""GENERATION OF FIRST COMPUTATIONAL GRID"""
dz = self.z_span.max()/(self.z_span.size - 1) #minimum integration step in axial direction
zpts = int(round(zsects_val[0]/dz) + 1) #number of steps in axial direction (number od z_points)
zed = np.zeros((1,zpts+1)) #array containing integration axial steps
zed[0,0:zpts] = np.linspace(0,zsects_val[0],zpts)
zed[0,zpts] = zsects_val[0] + dz
self.z_grid,self.r_grid = self.grid_setup(zpts+1,self.eta.size) #calculation of z_grid and r_grid based on first front
M0_start = self.M0 #Initial Mach Number based on first front
"""Tracking variables for marching method"""
zpts_old = 1 #tracker of previous axial grid points
count = 0 #counter of axial sections
vel_factor = 1 #factor for front plume velocity correction
Te_factor = 1 #factor for front plume temperature corrections
for i in range(nsects):
"""Solving current plasma axial extension"""
self.solver() #Calling to method AEM class method to solve the plume in the entire grid
if i == 0:
"""Update contents of marching plume_final result matrixes with
results from self.solver method in the grid, for first axial section"""
lnn_[:,0:zpts] = self.lnn[self.order,:,0:zpts]
uz_[:,0:zpts] = self.uz[self.order,:,0:zpts]
ur_[:,0:zpts] = self.ur[self.order,:,0:zpts]
div_[:,0:zpts] = self.div[self.order,:,0:zpts]
#T_[:,0:zpts] = self.T[self.order,:,0:zpts]
#phi_[:,0:zpts] = self.phi[self.order,:,0:zpts]
z_grid_[:,0:zpts] = self.z_grid[:,0:zpts]
r_grid_[:,0:zpts] = self.r_grid[:,0:zpts]
else:
"""Update contents of marching plume_final result matrixes with
results from self.solver method in the grid, for remaining axial section"""
lnn_[:,zpts_old-1:zpts_old+zpts-1] = self.lnn[self.order,:,0:zpts]
uz_[:,zpts_old-1:zpts_old+zpts-1] = self.uz[self.order,:,0:zpts] * vel_factor
ur_[:,zpts_old-1:zpts_old+zpts-1] = self.ur[self.order,:,0:zpts] * vel_factor
div_[:,zpts_old-1:zpts_old+zpts-1] = self.div[self.order,:,0:zpts]
#T_[:,zpts_old-1:zpts_old+zpts-1] = self.T[self.order,:,0:zpts] * Te_factor
#phi_[:,zpts_old-1:zpts_old+zpts-1] = self.phi[self.order,:,0:zpts]
z_grid_[:,zpts_old-1:zpts_old+zpts-1] = self.z_grid[:,0:zpts] + zsects_val[i-1]
r_grid_[:,zpts_old-1:zpts_old+zpts-1] = self.r_grid[:,0:zpts]
zpts_old = zpts_old + zpts -1 #Updating value of axial points, to move forward along the sections
"""PREPARING NEXT INTEGATION AXIAL STEP"""
if i < nsects-1:
"""Updating new integration interval parameters"""
zpts = int(round((zsects_val[i+1]-zsects_val[i])/dz) + 1) #Updating next number of axial points
zed = np.zeros((1,zpts+1)) #Updating next array of axial integration steps
zed[0,0:zpts] = np.linspace(zsects_val[i],zsects_val[i+1],zpts) - zsects_val[i]
zed[0,zpts] = zed[0,zpts-1] + dz
"""New initial profiles for the next axial integration interval"""
r0_front = self.r_grid[:,-2] #New initial r_span obtained from before-last previous section r_grid
z0_front = zed #New z0_front based on zed array
n0_front = np.exp(self.lnn[self.order,:,-2]) #New initial density profile obatained from before-last previous section density
uz0_front = self.uz[self.order,:,-2] #New initial axial velocity profile obatained from before-last previous section axial velocity
ur0_front = self.ur[self.order,:,-2] #New radial velocity profile obatained from before-last previous section radial velocity
super(AEM,self) .__init__(self.plasma,z0_front.reshape(zed.shape[1],),r0_front,n0_front) #calling Hyperplume constructor to reload the new initial profiles
self.uz0,self.ur0,self.d0 = uz0_front,ur0_front,ur0_front/uz0_front #reloading remaining new plume initial profiles
"""Compute derivatives of new initial plume profiles"""
self.d0p = self.eta_deriver(self.eta,self.d0) #derivative of plume divergence
self.d0p[0],self.d0p[-1] = self.d0[1]/self.eta[1],self.d0p[-2] + (self.d0p[-2] - self.d0p[-3])
self.uz0p = self.eta_deriver(self.eta,self.uz0) #derivative of initial axial velocity
self.uz0p[0],self.uz0p[-1] = 0,self.uz0p[-2] + (self.uz0p[-2] - self.uz0p[-3])
self.duz0p = self.eta_deriver(self.eta,self.d0*self.uz0) #derivatie of initial radial velocity
self.duz0p[0],self.duz0p[-1] = self.duz0p[1]/self.eta[1],self.duz0p[-2] + (self.duz0p[-2] - self.duz0p[-3])
"""Updating correction factors"""
vel_factor = vel_factor * self.uz[self.order,0,-2]/self.uz[self.order,0,0] #velocity factor for next integration interval
Te_factor = Te_factor * self.T[self.order,0,-2]/self.T[self.order,0,0] #Temperature factor for next integration interval
self.M0 = M0_start*np.sqrt(vel_factor**2/Te_factor) #Update new Mach number based on previos Mach and correction factors
self.eps = 1/self.M0**2 #new AEM epsilon expansion parameter
self.z_grid,self.r_grid = self.grid_setup(zpts+1,self.eta.size) #new interval solution grids
"""line1 = plt.plot(self.eta,self.n0,'b');
line2 = plt.plot(self.eta,self.uz0/self.uz0[0],'k')
line3 = plt.plot(self.eta,self.d0,'r',label=r'$Plume divergence \delta = u_{r0}/u_{z0}$')
count = count + 1
if count > 100:
plt.close(fig)
count = 0
red_line = mlines.Line2D([], [], color='red', label=r'$\delta_{0}$')
black_line = mlines.Line2D([], [], color='black', label=r'$\tilde{u}_{z0}$')
blue_line = mlines.Line2D([], [], color='blue', label=r'$\tilde{n}_{0}$')
plt.legend(handles=[red_line,black_line,blue_line],loc='best')
plt.title('AEM Marching Solver plume front evolution')
plt.xlabel(r'$\eta$')
plt.savefig('Marching_solver_init_fronts.png')"""
""" Updating final plume attibutes structure with marching_plume results"""
self.z_grid = z_grid_
self.r_grid = r_grid_
self.lnn = lnn_
self.uz = uz_
self.ur = ur_
self.div = div_
self.T = super(AEM,self).temp(np.exp(self.lnn),np.exp(self.lnn)[0,0],self.T_0,self.Gamma)#T_
self.phi = super(AEM,self).phi(np.exp(self.lnn),np.exp(self.lnn)[0,0],self.T_0,self.Gamma,self.q_ion)#phi_
def query(self,z,r):
""" Method query returns the density, velocity profile, temperature, the electric potential at
particular (z,r) points in the Plume.
These plasma properties are interpolated along the previously calculated 2D grids z_grid and r_grid
at targeted (z,r) points specified by the user. User must always check if np.max(r) > np.max(self.r_grid),
np.max(z) > np.max(self.z_grid) in their query point set,to avoid extrapolation results.
Args:
z (float,numpy.ndarray): new interpolation z points.
r (float,numpy.ndarray): new interpolation r points.
Outputs:
lnn (int,numpy.ndarray): logarithmic plasma density at specified (z,r) points in plume grid
u_z (int,numpy.ndarray): plasma axial velocity at specified (z,r) points in plume grid
u_r (int,numpy.ndarray): plasma radial velocity at specified (z,r) points in plume grid
T (int,numpy.ndarray): plasma temperature at specified (z,r) points in plume grid
phi (int,numpy.ndarray): plasma ambipolar electric potential at specified (z,r) points in plume grid
eta (int,numpy.ndarray): ion current stream lines at specified (z,r) points in plume grid
Usage:
>>> z,r = np.linspace(0,100,50),np.linspace(0,50,40) #target (z,r) for plume query
>>> lnn,u_z,u_r,T,phi,eta=PlumeAEM.query(z,r)
Method query returns only the self.order solution indicated by the user.
#PABLO20170506: After reading extensively on how to perform interpolation over 2D rectagunlar grids,
I decided to leave method griddata for such task. The problem with interp2D is that given the
great size of self.z_grid,and r_grid (mxn), the number of points exceeds memory of method and return error.
Even if the size of the grids is made smaller (losing accuracy and information in the AEM) the method
interp2D takes a lot of time (sometimes I had to reset the console). If you want to try to fix the bug or
otherwise tell what I am doing incorrectly I leave the line of code with interp2D that I was using to solve
the interpolation (The syntax is exactly the same as the one we saw the ither day in your office, but for
some reason it is not returning the results i expect.
lnn = interp2d(self.z_grid,self.r_grid,self.lnn[self.order,:,:])(z.flatten(),r.flatten())
On the other hand, griddadta in python does not behave like the Matlab function(In Matlab this function does indeed
extrapolate the results).Griddata is the recommended function to use over large arrays of data over 2D structured
or unstructured data, and I have check the return of the interpolation using griddata and it is correct
"""
grid_points = np.array((self.z_grid.flatten(),self.r_grid.flatten())).T #pairing each z_grid point to its matching r_grid point
lnn = griddata(grid_points,self.lnn[self.order,:,:].flatten(),(z,r),method='linear') #Logarithm of plasma plume density interpolation matrix
u_z = griddata(grid_points,self.uz[self.order,:,:].flatten(),(z,r),method='linear') #Plasma plume axial velocity interpolation matrix
u_r = griddata(grid_points,self.ur[self.order,:,:].flatten(),(z,r),method='linear') #Plasma plume radial velocity interpolation matrix
T = griddata(grid_points,self.T[self.order,:,:].flatten(),(z,r),method='linear') #Plasma plume temperature interpolation matrix
phi = griddata(grid_points,self.phi[self.order,:,:].flatten(),(z,r),method='linear') #Plasma plume electric potential interpolation matrix
eta = griddata(grid_points,self.eta_[self.order,:,:].flatten(),(z,r),method='linear') #Eta-line interpolated values at specied (z,r) points
return lnn,u_z,u_r,T,phi,eta
def grid_setup(self,zpts,epts):
""" grid_setup creates an strctured grid of z,r points where the AEM problem will be integrated
Args:
zpts (int): number of axial points in the structure. Indicates legnth oof axial plume span
epts (int): number of radial points in the structure. Indicates legnth of radial plume span
Returns:
z_grid (numpy.ndarray): 2D matrix containing axial grid points for model integration
r_grid (numpy.ndarray): 2D matrix containing radial grid points for model integration
Usage:
>>> z_grid,r_grid = PlumeAEM.grid_setup(100,50)
"""
z_grid = np.zeros((epts,zpts)) #2D Matrix of Plume z points
r_grid = np.zeros((epts,zpts)) #2D Matrix of Plume r points
"""Compute the radial and axial coordinates of each grid points along the streamlines"""
for j in range(epts): #advance radially
for k in range(zpts): #advance axially
"Compute the axial coordinate along the jth streamline"
z_grid[j,k] = self.z_span[k] #update z_grid point from initial z_front
for j in range(epts): #advance radially
r_grid[j,0] = self.eta[j] #updating initial r_grid fron points
for k in range(1,zpts): #advance axially
r_grid[j,k] = r_grid[j,0]+self.d0[j]*self.z_span[k] #update r_grid points from initial front and divergence
return z_grid,r_grid
def val_domain(self):
""" val_domain class method evaluates the validity of the AEM series expansion solution
at z_grid and r_grid points in the plume. Validity results for each AEM order
are stored in the 3D matrix Plume.val. These matrix is filled with values indicating a specific validity condition
in the results.
VALIDITY VALUES
0 - Not valid for both velocity and density
1 - Valid only for velocity
-1 - Valid only for density
2 - Valid for both velocity and density
Usage:
>>> PlumeAEM.val_domain() #Intialize validity condition study
>>> print(Plume.val) #See results of validation
"""
rel_size = 0.1 #Maximum relative size of the ith order perturbation wrt(i-1)th order to ensure validity of the solution
self.val = np.zeros((3,self.__epts,self.__zpts)) #3D matrix with validity values for each grid point and each solution order
self.val[0,:,:]=2 #Setting valitidity od Zeroth Order solution as reference
for i in range(self.order):
"Creation of plume variable contribution matrixes"
uz_contribution= np.zeros((self.__epts,self.__zpts))
ur_contribution= np.zeros((self.__epts,self.__zpts))
lnn_contribution= np.zeros((self.__epts,self.__zpts))
uz_contribution[:,1:] = abs((self.uz[i+1,:,1:]-self.uz[i,:,1:])/self.uz[i,:,1:])
ur_contribution[1:,1:] = abs((self.ur[i+1,1:,1:]-self.ur[i,1:,1:])/self.ur[i,1:,1:])
lnn_contribution[:,1:] = abs((self.lnn[i+1,:,1:]-self.lnn[i,:,1:])/self.lnn[i,:,1:])
for j in range(self.__epts): #advance in radial direction
for k in range(self.__zpts): #advance axially
if (uz_contribution[j,k]<rel_size and ur_contribution[j,k] < rel_size): #applying validity criterion stated in dissertation
self.val[i+1,j,k] = 1
if lnn_contribution[j,k] < rel_size: #applying validity criterion stated in dissertation
self.val[i+1,j,k] = 2
elif lnn_contribution[j,k] < rel_size: #applying validity criterion stated in dissertation
self.val[i+1,j,k] = -1
def partial_derivs(self,var,type2):
"""Class method partial_derivs computes the partial derivatives of plasma variables with respect to
the physical z,r at the plume grid points.
Args:
var (numpy.ndarray): Variable values to derive at z,r grid points
type2 (int): Integer defining the behaviour of the derivative at the borders and therefore the
Type of varible to be differentiated:
0: Symmetric function. Border derivative value is set to 0
-1; Anti-symmetric function. Forward finite difference is used for border derivative calculation.
Returns:
dvar_dz : z-partial derivative values of input argument var at the grid points
dvar_dr : r-partial derivative values of input argument var at the grid points
Usage:
>>> dlnn0_dz,dlnn0dr = PlumeAEM.partial_derivs(Plume.lnn_0) #derivative of Zeroth order density correction
"""
dvar_dz = np.zeros((self.__epts,self.__zpts)) #2D matrix storing z-derivative values of var at grid points
dvar_dr = np.zeros((self.__epts,self.__zpts)) #2D matrix storing z-derivative values of var at grid points
zfactor = np.zeros((self.__epts,self.__zpts)) #2D Jacobian z tranformation matrix to pass from zita-eta derivatives to z,r derivatives at grid points(see dissertation appendix)
rfactor = np.zeros((self.__epts,self.__zpts)) #2D Jacobian r tranformation matrix to pass from zita-eta derivatives to z,r derivatives at grid points(see dissertation appendix)
eta_grid = np.dstack((self.r_grid[:,0],)*self.__zpts)[0] #2D matrix containing stacked values of the eta coordinates at grid points
"CALCULATION OF DERIVATIVES IN ZITA-ETA COORDINATES AT GRID POINTS"
"This function computes the partial derivatives with respect to the stream coordinates eta-Zita at the grid points"
dvar_dzita = np.zeros((self.__epts,self.__zpts)) #2D matrix storing zita-derivative values of var at grid points
dvar_deta = np.zeros((self.__epts,self.__zpts)) #2D matrix storing eta-derivative values of var at grid points
"Compute the zita partial derivatives of the grid points inside the domain (excluding the boundaries) with a centred difference"
dvar_dzita[0:,1:-1] = (var[0:,2:] - var[0:,0:-2]) / (self.z_grid[0:,2:] - self.z_grid[0:,0:-2])
"Compute the zita partial derivatives at z = 0 and zmax, using respectively a forwards and backwards Euler derivative"
dvar_dzita[0:,0] = ( var[0:,1] - var[0:,0] ) / (self.z_grid[0,1] - self.z_grid[0,0])
dvar_dzita[0:,-1] = ( var[0:, -1] - var[0:, -2] ) / (self.z_grid[0,-1] - self.z_grid[0,-2])
"""Compute the eta partial derivatives of the grid points inside the domain (excluding the boundaries) with a centred difference even when points
are not uniform along eta"""
h1 = eta_grid[1:-1,0:] - eta_grid[0:-2,0:] #steps in eta-grid used for derivation of variable
h2 = eta_grid[2:,0:] - eta_grid[1:-1,0:]
dvar_deta[1:-1,0:] = ((h1**2) * var[2:,0:] - ((h1**2) - (h2**2)) * var[1:-1,0:] - (h2**2) * var[0:-2,0:] ) / ((h1**2)*h2 + h1*(h2**2))
""" Compute the eta partial derivatives at z = 0 and zmax, using respectively a forwards and backwards Euler derivative
For eta = 0, consider the type of input function"""
if type2 == 0:
dvar_deta[0,0:] = 0
else:
dvar_deta[0,0:] = ( var[1, 0:] - var[0, 0:] ) / (eta_grid[1, 0:] - eta_grid[0,0:])
"Take the final eta line derivative equal to that of the previous point"
dvar_deta[-1,0:] = dvar_deta[-2,0:] + (dvar_deta[-2,0:] - dvar_deta[-3,0:])
for i in range(0,self.__zpts):
"Compute transformation factors in matrix form for z and r derivative using the Jacobian matrix"
zfactor[:,i] = -self.d0[:] / (1 + self.z_grid[:,i] * self.d0p[:]) #Updating Jacobian matrixes
rfactor[:,i] = 1 / (1 + self.z_grid[:,i] * self.d0p[:])
dvar_dz[:,:] = dvar_dzita + zfactor * dvar_deta #updatinf final derivatives in z,r coordinates
dvar_dr[:,:] = rfactor * dvar_deta
return dvar_dz, dvar_dr
|
mit
|
jrosebr1/imutils
|
imutils/convenience.py
|
1
|
11508
|
# author: Adrian Rosebrock
# website: http://www.pyimagesearch.com
# import the necessary packages
import numpy as np
import cv2
import sys
# import any special Python 2.7 packages
if sys.version_info.major == 2:
from urllib import urlopen
# import any special Python 3 packages
elif sys.version_info.major == 3:
from urllib.request import urlopen
def translate(image, x, y):
# define the translation matrix and perform the translation
M = np.float32([[1, 0, x], [0, 1, y]])
shifted = cv2.warpAffine(image, M, (image.shape[1], image.shape[0]))
# return the translated image
return shifted
def rotate(image, angle, center=None, scale=1.0):
# grab the dimensions of the image
(h, w) = image.shape[:2]
# if the center is None, initialize it as the center of
# the image
if center is None:
center = (w // 2, h // 2)
# perform the rotation
M = cv2.getRotationMatrix2D(center, angle, scale)
rotated = cv2.warpAffine(image, M, (w, h))
# return the rotated image
return rotated
def rotate_bound(image, angle):
# grab the dimensions of the image and then determine the
# center
(h, w) = image.shape[:2]
(cX, cY) = (w / 2, h / 2)
# grab the rotation matrix (applying the negative of the
# angle to rotate clockwise), then grab the sine and cosine
# (i.e., the rotation components of the matrix)
M = cv2.getRotationMatrix2D((cX, cY), -angle, 1.0)
cos = np.abs(M[0, 0])
sin = np.abs(M[0, 1])
# compute the new bounding dimensions of the image
nW = int((h * sin) + (w * cos))
nH = int((h * cos) + (w * sin))
# adjust the rotation matrix to take into account translation
M[0, 2] += (nW / 2) - cX
M[1, 2] += (nH / 2) - cY
# perform the actual rotation and return the image
return cv2.warpAffine(image, M, (nW, nH))
def resize(image, width=None, height=None, inter=cv2.INTER_AREA):
# initialize the dimensions of the image to be resized and
# grab the image size
dim = None
(h, w) = image.shape[:2]
# if both the width and height are None, then return the
# original image
if width is None and height is None:
return image
# check to see if the width is None
if width is None:
# calculate the ratio of the height and construct the
# dimensions
r = height / float(h)
dim = (int(w * r), height)
# otherwise, the height is None
else:
# calculate the ratio of the width and construct the
# dimensions
r = width / float(w)
dim = (width, int(h * r))
# resize the image
resized = cv2.resize(image, dim, interpolation=inter)
# return the resized image
return resized
def skeletonize(image, size, structuring=cv2.MORPH_RECT):
# determine the area (i.e. total number of pixels in the image),
# initialize the output skeletonized image, and construct the
# morphological structuring element
area = image.shape[0] * image.shape[1]
skeleton = np.zeros(image.shape, dtype="uint8")
elem = cv2.getStructuringElement(structuring, size)
# keep looping until the erosions remove all pixels from the
# image
while True:
# erode and dilate the image using the structuring element
eroded = cv2.erode(image, elem)
temp = cv2.dilate(eroded, elem)
# subtract the temporary image from the original, eroded
# image, then take the bitwise 'or' between the skeleton
# and the temporary image
temp = cv2.subtract(image, temp)
skeleton = cv2.bitwise_or(skeleton, temp)
image = eroded.copy()
# if there are no more 'white' pixels in the image, then
# break from the loop
if area == area - cv2.countNonZero(image):
break
# return the skeletonized image
return skeleton
def opencv2matplotlib(image):
# OpenCV represents images in BGR order; however, Matplotlib
# expects the image in RGB order, so simply convert from BGR
# to RGB and return
return cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
def url_to_image(url, readFlag=cv2.IMREAD_COLOR):
# download the image, convert it to a NumPy array, and then read
# it into OpenCV format
resp = urlopen(url)
image = np.asarray(bytearray(resp.read()), dtype="uint8")
image = cv2.imdecode(image, readFlag)
# return the image
return image
def auto_canny(image, sigma=0.33):
# compute the median of the single channel pixel intensities
v = np.median(image)
# apply automatic Canny edge detection using the computed median
lower = int(max(0, (1.0 - sigma) * v))
upper = int(min(255, (1.0 + sigma) * v))
edged = cv2.Canny(image, lower, upper)
# return the edged image
return edged
def grab_contours(cnts):
# if the length the contours tuple returned by cv2.findContours
# is '2' then we are using either OpenCV v2.4, v4-beta, or
# v4-official
if len(cnts) == 2:
cnts = cnts[0]
# if the length of the contours tuple is '3' then we are using
# either OpenCV v3, v4-pre, or v4-alpha
elif len(cnts) == 3:
cnts = cnts[1]
# otherwise OpenCV has changed their cv2.findContours return
# signature yet again and I have no idea WTH is going on
else:
raise Exception(("Contours tuple must have length 2 or 3, "
"otherwise OpenCV changed their cv2.findContours return "
"signature yet again. Refer to OpenCV's documentation "
"in that case"))
# return the actual contours array
return cnts
def is_cv2(or_better=False):
# grab the OpenCV major version number
major = get_opencv_major_version()
# check to see if we are using *at least* OpenCV 2
if or_better:
return major >= 2
# otherwise we want to check for *strictly* OpenCV 2
return major == 2
def is_cv3(or_better=False):
# grab the OpenCV major version number
major = get_opencv_major_version()
# check to see if we are using *at least* OpenCV 3
if or_better:
return major >= 3
# otherwise we want to check for *strictly* OpenCV 3
return major == 3
def is_cv4(or_better=False):
# grab the OpenCV major version number
major = get_opencv_major_version()
# check to see if we are using *at least* OpenCV 4
if or_better:
return major >= 4
# otherwise we want to check for *strictly* OpenCV 4
return major == 4
def get_opencv_major_version(lib=None):
# if the supplied library is None, import OpenCV
if lib is None:
import cv2 as lib
# return the major version number
return int(lib.__version__.split(".")[0])
def check_opencv_version(major, lib=None):
# this function may be removed in a future release as we now
# use the get_opencv_major_function to obtain the current OpenCV
# version and then perform the actual version check *within* the
# respective function
import warnings
message = """
The check_opencv_version function is deprecated and may be
removed in a future release. Use at your own risk.
"""
warnings.warn(message, DeprecationWarning, stacklevel=2)
# if the supplied library is None, import OpenCV
if lib is None:
import cv2 as lib
# return whether or not the current OpenCV version matches the
# major version number
return lib.__version__.startswith(major)
def build_montages(image_list, image_shape, montage_shape):
"""
---------------------------------------------------------------------------------------------
author: Kyle Hounslow
---------------------------------------------------------------------------------------------
Converts a list of single images into a list of 'montage' images of specified rows and columns.
A new montage image is started once rows and columns of montage image is filled.
Empty space of incomplete montage images are filled with black pixels
---------------------------------------------------------------------------------------------
:param image_list: python list of input images
:param image_shape: tuple, size each image will be resized to for display (width, height)
:param montage_shape: tuple, shape of image montage (width, height)
:return: list of montage images in numpy array format
---------------------------------------------------------------------------------------------
example usage:
# load single image
img = cv2.imread('lena.jpg')
# duplicate image 25 times
num_imgs = 25
img_list = []
for i in xrange(num_imgs):
img_list.append(img)
# convert image list into a montage of 256x256 images tiled in a 5x5 montage
montages = make_montages_of_images(img_list, (256, 256), (5, 5))
# iterate through montages and display
for montage in montages:
cv2.imshow('montage image', montage)
cv2.waitKey(0)
----------------------------------------------------------------------------------------------
"""
if len(image_shape) != 2:
raise Exception('image shape must be list or tuple of length 2 (rows, cols)')
if len(montage_shape) != 2:
raise Exception('montage shape must be list or tuple of length 2 (rows, cols)')
image_montages = []
# start with black canvas to draw images onto
montage_image = np.zeros(shape=(image_shape[1] * (montage_shape[1]), image_shape[0] * montage_shape[0], 3),
dtype=np.uint8)
cursor_pos = [0, 0]
start_new_img = False
for img in image_list:
if type(img).__module__ != np.__name__:
raise Exception('input of type {} is not a valid numpy array'.format(type(img)))
start_new_img = False
img = cv2.resize(img, image_shape)
# draw image to black canvas
montage_image[cursor_pos[1]:cursor_pos[1] + image_shape[1], cursor_pos[0]:cursor_pos[0] + image_shape[0]] = img
cursor_pos[0] += image_shape[0] # increment cursor x position
if cursor_pos[0] >= montage_shape[0] * image_shape[0]:
cursor_pos[1] += image_shape[1] # increment cursor y position
cursor_pos[0] = 0
if cursor_pos[1] >= montage_shape[1] * image_shape[1]:
cursor_pos = [0, 0]
image_montages.append(montage_image)
# reset black canvas
montage_image = np.zeros(shape=(image_shape[1] * (montage_shape[1]), image_shape[0] * montage_shape[0], 3),
dtype=np.uint8)
start_new_img = True
if start_new_img is False:
image_montages.append(montage_image) # add unfinished montage
return image_montages
def adjust_brightness_contrast(image, brightness=0., contrast=0.):
"""
Adjust the brightness and/or contrast of an image
:param image: OpenCV BGR image
:param contrast: Float, contrast adjustment with 0 meaning no change
:param brightness: Float, brightness adjustment with 0 meaning no change
"""
beta = 0
# See the OpenCV docs for more info on the `beta` parameter to addWeighted
# https://docs.opencv.org/3.4.2/d2/de8/group__core__array.html#gafafb2513349db3bcff51f54ee5592a19
return cv2.addWeighted(image,
1 + float(contrast) / 100.,
image,
beta,
float(brightness))
|
mit
|
tmhm/scikit-learn
|
sklearn/neighbors/tests/test_ball_tree.py
|
129
|
10192
|
import pickle
import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.neighbors.ball_tree import (BallTree, NeighborsHeap,
simultaneous_sort, kernel_norm,
nodeheap_sort, DTYPE, ITYPE)
from sklearn.neighbors.dist_metrics import DistanceMetric
from sklearn.utils.testing import SkipTest, assert_allclose
rng = np.random.RandomState(10)
V = rng.rand(3, 3)
V = np.dot(V, V.T)
DIMENSION = 3
METRICS = {'euclidean': {},
'manhattan': {},
'minkowski': dict(p=3),
'chebyshev': {},
'seuclidean': dict(V=np.random.random(DIMENSION)),
'wminkowski': dict(p=3, w=np.random.random(DIMENSION)),
'mahalanobis': dict(V=V)}
DISCRETE_METRICS = ['hamming',
'canberra',
'braycurtis']
BOOLEAN_METRICS = ['matching', 'jaccard', 'dice', 'kulsinski',
'rogerstanimoto', 'russellrao', 'sokalmichener',
'sokalsneath']
def dist_func(x1, x2, p):
return np.sum((x1 - x2) ** p) ** (1. / p)
def brute_force_neighbors(X, Y, k, metric, **kwargs):
D = DistanceMetric.get_metric(metric, **kwargs).pairwise(Y, X)
ind = np.argsort(D, axis=1)[:, :k]
dist = D[np.arange(Y.shape[0])[:, None], ind]
return dist, ind
def test_ball_tree_query():
np.random.seed(0)
X = np.random.random((40, DIMENSION))
Y = np.random.random((10, DIMENSION))
def check_neighbors(dualtree, breadth_first, k, metric, kwargs):
bt = BallTree(X, leaf_size=1, metric=metric, **kwargs)
dist1, ind1 = bt.query(Y, k, dualtree=dualtree,
breadth_first=breadth_first)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric, **kwargs)
# don't check indices here: if there are any duplicate distances,
# the indices may not match. Distances should not have this problem.
assert_array_almost_equal(dist1, dist2)
for (metric, kwargs) in METRICS.items():
for k in (1, 3, 5):
for dualtree in (True, False):
for breadth_first in (True, False):
yield (check_neighbors,
dualtree, breadth_first,
k, metric, kwargs)
def test_ball_tree_query_boolean_metrics():
np.random.seed(0)
X = np.random.random((40, 10)).round(0)
Y = np.random.random((10, 10)).round(0)
k = 5
def check_neighbors(metric):
bt = BallTree(X, leaf_size=1, metric=metric)
dist1, ind1 = bt.query(Y, k)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric)
assert_array_almost_equal(dist1, dist2)
for metric in BOOLEAN_METRICS:
yield check_neighbors, metric
def test_ball_tree_query_discrete_metrics():
np.random.seed(0)
X = (4 * np.random.random((40, 10))).round(0)
Y = (4 * np.random.random((10, 10))).round(0)
k = 5
def check_neighbors(metric):
bt = BallTree(X, leaf_size=1, metric=metric)
dist1, ind1 = bt.query(Y, k)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric)
assert_array_almost_equal(dist1, dist2)
for metric in DISCRETE_METRICS:
yield check_neighbors, metric
def test_ball_tree_query_radius(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
bt = BallTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind = bt.query_radius(query_pt, r + eps)[0]
i = np.where(rad <= r + eps)[0]
ind.sort()
i.sort()
assert_array_almost_equal(i, ind)
def test_ball_tree_query_radius_distance(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
bt = BallTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind, dist = bt.query_radius(query_pt, r + eps, return_distance=True)
ind = ind[0]
dist = dist[0]
d = np.sqrt(((query_pt - X[ind]) ** 2).sum(1))
assert_array_almost_equal(d, dist)
def compute_kernel_slow(Y, X, kernel, h):
d = np.sqrt(((Y[:, None, :] - X) ** 2).sum(-1))
norm = kernel_norm(h, X.shape[1], kernel)
if kernel == 'gaussian':
return norm * np.exp(-0.5 * (d * d) / (h * h)).sum(-1)
elif kernel == 'tophat':
return norm * (d < h).sum(-1)
elif kernel == 'epanechnikov':
return norm * ((1.0 - (d * d) / (h * h)) * (d < h)).sum(-1)
elif kernel == 'exponential':
return norm * (np.exp(-d / h)).sum(-1)
elif kernel == 'linear':
return norm * ((1 - d / h) * (d < h)).sum(-1)
elif kernel == 'cosine':
return norm * (np.cos(0.5 * np.pi * d / h) * (d < h)).sum(-1)
else:
raise ValueError('kernel not recognized')
def test_ball_tree_kde(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
bt = BallTree(X, leaf_size=10)
for kernel in ['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']:
for h in [0.01, 0.1, 1]:
dens_true = compute_kernel_slow(Y, X, kernel, h)
def check_results(kernel, h, atol, rtol, breadth_first):
dens = bt.kernel_density(Y, h, atol=atol, rtol=rtol,
kernel=kernel,
breadth_first=breadth_first)
assert_allclose(dens, dens_true,
atol=atol, rtol=max(rtol, 1e-7))
for rtol in [0, 1E-5]:
for atol in [1E-6, 1E-2]:
for breadth_first in (True, False):
yield (check_results, kernel, h, atol, rtol,
breadth_first)
def test_gaussian_kde(n_samples=1000):
# Compare gaussian KDE results to scipy.stats.gaussian_kde
from scipy.stats import gaussian_kde
np.random.seed(0)
x_in = np.random.normal(0, 1, n_samples)
x_out = np.linspace(-5, 5, 30)
for h in [0.01, 0.1, 1]:
bt = BallTree(x_in[:, None])
try:
gkde = gaussian_kde(x_in, bw_method=h / np.std(x_in))
except TypeError:
raise SkipTest("Old version of scipy, doesn't accept "
"explicit bandwidth.")
dens_bt = bt.kernel_density(x_out[:, None], h) / n_samples
dens_gkde = gkde.evaluate(x_out)
assert_array_almost_equal(dens_bt, dens_gkde, decimal=3)
def test_ball_tree_two_point(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
r = np.linspace(0, 1, 10)
bt = BallTree(X, leaf_size=10)
D = DistanceMetric.get_metric("euclidean").pairwise(Y, X)
counts_true = [(D <= ri).sum() for ri in r]
def check_two_point(r, dualtree):
counts = bt.two_point_correlation(Y, r=r, dualtree=dualtree)
assert_array_almost_equal(counts, counts_true)
for dualtree in (True, False):
yield check_two_point, r, dualtree
def test_ball_tree_pickle():
np.random.seed(0)
X = np.random.random((10, 3))
bt1 = BallTree(X, leaf_size=1)
# Test if BallTree with callable metric is picklable
bt1_pyfunc = BallTree(X, metric=dist_func, leaf_size=1, p=2)
ind1, dist1 = bt1.query(X)
ind1_pyfunc, dist1_pyfunc = bt1_pyfunc.query(X)
def check_pickle_protocol(protocol):
s = pickle.dumps(bt1, protocol=protocol)
bt2 = pickle.loads(s)
s_pyfunc = pickle.dumps(bt1_pyfunc, protocol=protocol)
bt2_pyfunc = pickle.loads(s_pyfunc)
ind2, dist2 = bt2.query(X)
ind2_pyfunc, dist2_pyfunc = bt2_pyfunc.query(X)
assert_array_almost_equal(ind1, ind2)
assert_array_almost_equal(dist1, dist2)
assert_array_almost_equal(ind1_pyfunc, ind2_pyfunc)
assert_array_almost_equal(dist1_pyfunc, dist2_pyfunc)
for protocol in (0, 1, 2):
yield check_pickle_protocol, protocol
def test_neighbors_heap(n_pts=5, n_nbrs=10):
heap = NeighborsHeap(n_pts, n_nbrs)
for row in range(n_pts):
d_in = np.random.random(2 * n_nbrs).astype(DTYPE)
i_in = np.arange(2 * n_nbrs, dtype=ITYPE)
for d, i in zip(d_in, i_in):
heap.push(row, d, i)
ind = np.argsort(d_in)
d_in = d_in[ind]
i_in = i_in[ind]
d_heap, i_heap = heap.get_arrays(sort=True)
assert_array_almost_equal(d_in[:n_nbrs], d_heap[row])
assert_array_almost_equal(i_in[:n_nbrs], i_heap[row])
def test_node_heap(n_nodes=50):
vals = np.random.random(n_nodes).astype(DTYPE)
i1 = np.argsort(vals)
vals2, i2 = nodeheap_sort(vals)
assert_array_almost_equal(i1, i2)
assert_array_almost_equal(vals[i1], vals2)
def test_simultaneous_sort(n_rows=10, n_pts=201):
dist = np.random.random((n_rows, n_pts)).astype(DTYPE)
ind = (np.arange(n_pts) + np.zeros((n_rows, 1))).astype(ITYPE)
dist2 = dist.copy()
ind2 = ind.copy()
# simultaneous sort rows using function
simultaneous_sort(dist, ind)
# simultaneous sort rows using numpy
i = np.argsort(dist2, axis=1)
row_ind = np.arange(n_rows)[:, None]
dist2 = dist2[row_ind, i]
ind2 = ind2[row_ind, i]
assert_array_almost_equal(dist, dist2)
assert_array_almost_equal(ind, ind2)
def test_query_haversine():
np.random.seed(0)
X = 2 * np.pi * np.random.random((40, 2))
bt = BallTree(X, leaf_size=1, metric='haversine')
dist1, ind1 = bt.query(X, k=5)
dist2, ind2 = brute_force_neighbors(X, X, k=5, metric='haversine')
assert_array_almost_equal(dist1, dist2)
assert_array_almost_equal(ind1, ind2)
|
bsd-3-clause
|
rmvanhees/pynadc
|
scripts/sdmf_calibSMR.py
|
1
|
54150
|
"""
This file is part of pynadc
https://github.com/rmvanhees/pynadc
Calibrate Sciamachy Solar Mean Radiance measurements
Copyright (c) 2016 SRON - Netherlands Institute for Space Research
All Rights Reserved
License: BSD-3-Clause
"""
#
# Code layout:
# SECTION VERSION
# - define various versions of the S/W
# SECTION AUXILIARY CKD
# - define functions to read and pre-process CKD required for the calibration
# - defined functions: get_h5_RadSensMoni
# SECTION READ DATA
# - define class SDMFreadSun to read the Sun measurements
# SECTION CALIBRATE DATA
# - define class 'SMRcalib' to calibrate the Sun measurements
# SECTION WRITE DATA
# - define class 'SMRdb' to write calibrated spectra and meta-data to database
# SECTION DISPLAY DATA
# - define class 'SMRshow' to display Sun measurements
# SECTION ARGPARSE
# - define function 'handleCmdParams' to obtain command-line parameters
# SECTION MAIN
# - code to be run as a standalone program
#
import sys
import argparse
import numpy as np
import numpy.ma as ma
import h5py
#-------------------------SECTION VERSION-----------------------------------
_swVersion = {'major': 0,
'minor': 8,
'revision' : 4}
_calibVersion = {'major': 1,
'minor': 1,
'revision' : 0}
_dbVersion = {'major': 0,
'minor': 9,
'revision' : 3}
#-------------------------SECTION ERROR CLASSES-----------------------------
class dbError(Exception):
pass
class readSunInfo(Exception):
pass
#-------------------------SECTION AUXILIARY CKD-----------------------------
def get_h5_RadSensMoni(NDF=True, debug=False):
fmtRSPM = '%-dfloat32, %-dfloat32, 8192float32, (%-d,%-d,8192)float64'
nameRSPM = ('ang_ele', 'ang_asm', 'wvlen', 'sensitivity')
fid = h5py.File('/SCIA/share/nadc_tools/key_radsens.h5', 'r')
grp = fid['PPG0']
dset = grp['PPG0']
ppg0 = dset[:]
grp = fid['ABS_RAD']
dset = grp['Axis 1 Wavelength']
ref_wl = dset[:] # regrid all data to ref_wl instead of Sun spectrum
dset = grp['ABS_RAD']
abs_rad = dset[:].astype('float64')
abs_rad /= (5.035e8 * ref_wl * ppg0)
del ppg0
grp = fid['OBM_s_p']
dset = grp['Axis 1 Wavelength']
obm_s_p_wl = dset[:]
dset = grp['OBM_s_p']
tmp = dset[:].astype('float64')
obm_s_p = np.empty_like(tmp)
for nc in range(8):
i_mn = nc * 1024
i_mx = i_mn + 1024
obm_s_p[i_mn:i_mx] = np.interp(ref_wl[i_mn:i_mx],
obm_s_p_wl[i_mn:i_mx],
tmp[i_mn:i_mx])
del tmp
grp = fid['ELEV_p']
dset = grp['Axis 1 wavelength']
elev_p_wl = dset[:]
dset = grp['Axis 2 elevation angle']
elev_p_angle = dset[:]
dset = grp['ELEV_p']
elev_p = dset[2, :]
grp = fid['ELEV_s']
dset = grp['Axis 1 wavelength']
elev_s_wl = dset[:]
dset = grp['Axis 2 elevation angle']
elev_s_angle = dset[:]
dset = grp['ELEV_s']
elev_s = dset[2, :]
elev_p_a0 = np.interp(ref_wl, elev_p_wl, elev_p.astype('float64'))
elev_s_a0 = np.interp(ref_wl, elev_s_wl, elev_s.astype('float64'))
elev_a0 = obm_s_p * elev_s_a0 + elev_p_a0
if NDF:
grp = fid['NDF']
dset = grp['Axis 1 Wavelength']
ndf_wl = dset[:]
dset = grp['NDF']
tmp = dset[:].astype('float64')
ndf = np.empty_like(tmp)
for nc in range(8):
i_mn = nc * 1024
i_mx = i_mn + 1024
ndf[i_mn:i_mx] = np.interp(ref_wl[i_mn:i_mx],
ndf_wl[i_mn:i_mx],
tmp[i_mn:i_mx])
del tmp
grp = fid['NDF_s_p']
dset = grp['Axis 1 Wavelength']
ndf_s_p_wl = dset[:]
dset = grp['NDF_s_p']
ndf_s_p = dset[:]
tmp = dset[:].astype('float64')
ndf_s_p = np.empty_like(tmp)
for nc in range(8):
i_mn = nc * 1024
i_mx = i_mn + 1024
ndf_s_p[i_mn:i_mx] = np.interp(ref_wl[i_mn:i_mx],
ndf_s_p_wl[i_mn:i_mx],
tmp[i_mn:i_mx])
del tmp
abs_rad *= (2 * ndf / (1 + ndf_s_p))
obm_s_p *= ndf_s_p
grp = fid['BRDF_p']
dset = grp['Axis 1 vacuum wavelength']
brdf_p_wl = dset[:]
dset = grp['Axis 2 elevation angle']
brdf_p_ele = dset[:]
dset = grp['Axis 3 ASM angle']
brdf_p_asm = dset[:]
dset = grp['BRDF_p']
brdf_p = dset[:].astype('float64')
grp = fid['BRDF_s']
dset = grp['Axis 1 vacuum wavelength']
brdf_s_wl = dset[:]
dset = grp['Axis 2 elevation angle']
brdf_s_ele = dset[:]
dset = grp['Axis 3 ASM angle']
brdf_s_asm = dset[:]
dset = grp['BRDF_s']
brdf_s = dset[:].astype('float64')
fid.close()
# re-arrange data: switch ang_asm and ang_ele and store both increasing
dimWave = brdf_p_wl.shape[0]
dimASM = brdf_p_asm.shape[0]
dimELE = brdf_p_ele.shape[0]
dimBRDF = dimASM * dimELE
indx = []
for ni in range(dimELE):
indx += list(ni
+ np.linspace(dimBRDF-dimELE, 0, num=dimASM).astype(int))
# write data to output structure, interpolated to the ele,asm grid
rspm = np.empty(1, dtype=fmtRSPM % (dimELE, dimASM, dimELE, dimASM))
rspm.dtype.names = nameRSPM
rspm['ang_ele'] = brdf_p_ele
rspm['ang_asm'] = brdf_p_asm[::-1]
rspm['wvlen'] = ref_wl
brdf_p = brdf_p.reshape(dimBRDF, dimWave)
brdf_s = brdf_s.reshape(dimBRDF, dimWave)
for ni in range(dimBRDF):
brdf_p_ni = np.interp(ref_wl, brdf_p_wl, brdf_p[indx[ni], :])
brdf_s_ni = np.interp(ref_wl, brdf_s_wl, brdf_s[indx[ni], :])
rspm['sensitivity'][0, ni // dimASM, ni % dimASM, :] = \
abs_rad * (obm_s_p * brdf_s_ni + brdf_p_ni) / elev_a0
if debug:
fid = h5py.File('scia_key_rspm.h5', 'w')
dset = fid.create_dataset('rspm', data=rspm)
fid.close()
return rspm
#-------------------------SECTION READ DATA---------------------------------
class SDMFextractSun:
"""
Read Sciamachy Sun/State 62 SDMF (v3.1) data
"""
def __init__(self, state_id=62, sun_db='/SCIA/SDMF31/sdmf_extract_sun.h5'):
self.sun_db = sun_db
self.state_id = state_id
self.numChannels = 8 # add some constants for SCIA
self.channelSize = 1024
self.numPixels = self.numChannels * self.channelSize
self.smr = None
self.wvlen = None
with h5py.File(sun_db, 'r') as fid:
dset = fid['ClusDef']
self.clusDef = dset[:]
def selectOrbits(self, orbitRange):
with h5py.File(self.sun_db, 'r') as fid:
grp = fid['State_%02d' % self.state_id]
dset = grp['orbitList']
orbitList = dset[:]
if isinstance(orbitRange, int):
self.metaIndx = np.argmin(abs(orbitList - orbitRange))
else:
self.metaIndx = np.where((orbitList >= orbitRange[0])
& (orbitList <= orbitRange[1]))[0]
self.orbitList = orbitList[self.metaIndx]
if self.orbitList.size == 0:
print('* Info: no orbits selected from sdmf_extract_sun.h5')
raise readSunInfo
def readData(self):
if self.metaIndx is None or self.metaIndx.size == 0:
raise readSunInfo
if isinstance(self.metaIndx, np.ndarray):
metaIndx = self.metaIndx[0]
self.metaIndx = self.metaIndx[1:]
else:
metaIndx = self.metaIndx
self.metaIndx = None
fid = h5py.File(self.sun_db, 'r')
grp = fid['State_%02d' % self.state_id]
dset = grp['metaTable']
self.mtbl = dset[metaIndx]
self.absOrbit = self.mtbl['absOrbit']
self.obmTemp = self.mtbl['obmTemp']
self.detTemp = self.mtbl['detTemp']
dset = grp['pointing']
pointing = dset[metaIndx]
self.julianDay = np.array([x[0] for x in pointing])
self.asmAngle = np.array([x[1] for x in pointing])
self.esmAngle = np.array([x[2] for x in pointing])
self.sunAzim = np.array([x[3] for x in pointing])
self.sunElev = np.array([x[4] for x in pointing])
dset = grp['cluster_01']
self.numSpectra = dset[0].shape[0]
self.coaddf = np.ones((self.numPixels,), dtype=np.uint8)
self.pet = np.zeros((self.numPixels,), dtype=float)
self.spectra = ma.array(
np.zeros((self.numSpectra, self.numPixels), dtype='float64'),
mask=np.zeros((self.numSpectra, self.numPixels), dtype=int),
hard_mask=True)
for nc in range(self.clusDef.shape[0]):
dset = grp['cluster_%02d' % (nc+1)]
x = self.clusDef[nc]
self.coaddf[x[2]:x[2]+x[3]] = dset.attrs['coaddf'][0]
self.pet[x[2]:x[2]+x[3]] = dset.attrs['PET'][0]
self.spectra[:, x[2]:x[2]+x[3]] = dset[metaIndx].astype('float64')
fid.close()
#-------------------------SECTION CALIBRATE DATA----------------------------
class SMRcalib:
"""
Listing of implemented calibration IDs:
Mask dead & blinded pixels
Co-addition division correction
1. Memory Effect correction
2. Non-Linearity correction
3. Background Signal correction
4. Stray Light correction
5. Apply fit parameters
6. Apply mirror model
7. Radiance correction
8. Combine scans to Sun Mean Reference (implied for option "db")
"""
def __init__(self):
self.funclist = (
'maskDead', 'coaddDivision', 'memoryEffect', 'nonLinearity',
'backGround', 'strayLight', 'fitParam', 'mirrorModel',
'radiance', 'combineSpectra'
)
self.funcdict = dict(
zip((self.funclist),
(self.maskDead, self.coaddDivision,
self.memoryEffect, self.nonLinearity,
self.backGround, self.strayLight,
self.fitParam, self.mirrorModel,
self.radiance, self.combineSpectra))
)
def maskDead(self, smr, verbose=False):
"""
(*) Identifies dead pixels (based on measurements)
and blinded pixels at start and end of detector array.
Parameters
----------
None
Returns
-------
mask where the dead/blinded pixels have boolean value True
Notes
-----
None
"""
if verbose:
print('(*) Perform masking of the dead/bad pixel')
smr.errorType = 'F'
#
# mask blinded pixels
#
smr.blinded = np.empty((smr.numPixels,), dtype=bool)
smr.blinded[:] = False
id_list = np.array(list(range(10)) + list(range(1024-10, 1024)))
smr.blinded[0+id_list] = True # channel 1
smr.blinded[1024+id_list] = True # channel 2
smr.blinded[2048+id_list] = True # channel 3
smr.blinded[3072+id_list] = True # channel 4
smr.blinded[4096+id_list] = True # channel 5
smr.blinded[5120+id_list] = True # channel 6
smr.blinded[6144+id_list] = True # channel 7
smr.blinded[7168+id_list] = True # channel 8
#
# mask dead pixels
#
i_masked = smr.spectra.mask.sum()
smr.spectra = ma.masked_equal(smr.spectra, 0, copy=False)
if verbose:
masked = smr.spectra.mask.sum()
print('* Info: masked %6.1f pixels/spectrum with zero signal'
% ((masked - i_masked) / float(smr.numSpectra)))
i_masked = masked
smr.spectra = ma.masked_where((smr.spectra / smr.coaddf) >= 65535.,
smr.spectra, copy=False)
if verbose:
masked = smr.spectra.mask.sum()
print('* Info: masked %6.1f pixels/spectrum with saturated signal'
% ((masked - i_masked) / float(smr.numSpectra)))
i_masked = masked
def coaddDivision(self, smr, verbose=False):
"""
(*) Co-addition division correction divides the data by the number
of measurements added in the on-board co-added.
Parameters
----------
coaddf : number of measurements co-added (C3)
dimension should be one or equal to the spatial dimension
Returns
-------
Signal S_out(i,j) corrected for coadding [ADC counts]
Notes
-----
* None
"""
if verbose:
print('(*) Perform division by co-adding factor')
smr.errorType = 'M'
smr.spectra /= smr.coaddf
def memoryEffect(self, smr, verbose=False):
"""
(1) Memory Effect correction, this is the effect that the current
measurement depends on the previous measurement.
Parameters
----------
c_mem : memory correction parameters [i,j] (C1)
Returns
-------
Signal S_out(i,j) corrected for memory effect
Notes
-----
* approximation first read-out is obviously wrong, but first readout is
not used anyway...
* error estimate not implemented
"""
if verbose:
print('(1) Perform memory correction (Reticon detectors)')
smr.errorType = 'A'
#
# read memory correction values
#
with h5py.File('/SCIA/share/nadc_tools/MEMcorr.h5', 'r') as fid:
dset = fid['MemTable']
memtbl = dset[:]
#
# apply memory correction
#
id_array = np.arange(smr.channelSize)
for nch in range(5):
ipx = id_array + nch * smr.channelSize
coaddf = smr.coaddf[ipx].max()
sign = np.rint(smr.spectra[0, ipx]).astype('uint16')
for nspec in range(smr.numSpectra):
corr = memtbl[nch, sign]
sign = np.rint(smr.spectra[nspec, ipx]).astype('uint16')
if coaddf > 1:
for ni in range(1, coaddf):
corr += memtbl[nch, sign]
corr /= coaddf
smr.spectra.data[nspec, ipx] -= corr
def nonLinearity(self, smr, verbose=False):
"""
(2) Non-Linearity correction.
Parameters
----------
c_nlin : non-linearity correction parameters [i,j] (C1/C2)
Returns
-------
Signal S_out(i,j) corrected for non-linearity effect
Notes
-----
* error estimate not implemented
"""
if verbose:
print('(2) Perform non-linearity correction (Epitaxx detectors)')
smr.errorType = 'A'
#
# read non-linearity correction values
#
with h5py.File('/SCIA/share/nadc_tools/NLcorr.h5', 'r') as fid:
dset = fid['CurveIndex']
curveIndex = dset[:]
dset = fid['nLinTable']
nlintbl = dset[:]
#
# apply non-linearity correction
#
id_array = np.arange(smr.channelSize)
for nch in range(5, 8):
pixelList = id_array + nch * smr.channelSize
curves = curveIndex[nch, id_array]
for nspec in range(smr.numSpectra):
sign = np.rint(smr.spectra[nspec, pixelList]).astype('uint16')
smr.spectra.data[nspec, pixelList] -= nlintbl[curves, sign]
def backGround(self, smr, verbose=False):
"""
(3) Background Signal correction, consists of the dark current (DC)
and the thermal background (BG_term).
BS(i,j) = coaddf * pet * (DC + c_ice * QE * BG_therm)
Parameters
----------
coaddf : number of measurements co-added [i,j] (C3)
pet : pixel exposure time [i,j] (C3)
DC : dark current (C2/C3)
c_ice : transmission coefficient of the ice layer (C2)
QE : quantum efficiency of the detector (C1/C2)
BG_term : thermal background,
depends on T_det, T_opt, T_cal and T_grating (C3)
Returns
-------
Signal S_out(i,j) corrected for background signal (i,j)
Notes
-----
* error estimate not implemented
"""
from math import cos, pi
if verbose:
print('(3) Perform subtraction of dark signal')
smr.errorType = 'A'
# make a copy to correct Epitaxx PET without modifying the SMR object
pet = smr.pet.copy()
pet[5 * smr.channelSize:] -= 1.18125e-3
#
# read dark correction values
#
with h5py.File('/SCIA/SDMF31/sdmf_dark.h5', 'r') as fid:
grp = fid['/DarkFit']
dset = grp['metaTable']
mtbl = dset[:]
orbit = mtbl['absOrbit']
# reject these orbits
orbit[np.where(mtbl['stateCount'] < 3)] = 999999
metaIndx = np.argmin(orbit - smr.absOrbit)
dset = grp['analogOffset']
ao = dset[metaIndx, :]
dset = grp['darkCurrent']
lc = dset[metaIndx, :]
corr = ao + pet * lc
with h5py.File('/SCIA/SDMF30/sdmf_simudark.h5', 'r') as fid:
grp = fid['/ch8']
dset = grp['orbitList']
orbitList = dset[:]
metaIndx = np.argmin(abs(orbitList - smr.absOrbit))
dset = grp['metaTable']
mtbl = dset[:]
mtbl = dset[metaIndx]
dset = grp['ao']
ao = dset[:, metaIndx]
dset = grp['lc']
lc = dset[:, metaIndx]
dset = grp['amp1']
amp1 = dset[:, metaIndx]
orbvar = cos(2 * pi * (mtbl['PHASE1'] + smr.mtbl['orbitPhase'])) \
+ mtbl['AMP2'] * cos(4 * pi * (mtbl['PHASE2']
+ smr.mtbl['orbitPhase']))
# orbsig = cos(2 * pi * (mtbl['PHASE1'] + smr.mtbl['orbitPhase'])) \
# + mtbl['SIG_AMP2'] * cos(4 * pi * (mtbl['PHASE2']
# + smr.mtbl['orbitPhase']))
indx = 7 * smr.channelSize + np.arange(smr.channelSize)
corr[indx] = ao + pet[indx] * (lc + orbvar * amp1)
smr.spectra -= corr
#
# masked invalid pixels
#
i_masked = smr.spectra.mask.sum()
tmp = np.array([~(np.isfinite(corr)),] * smr.numSpectra)
smr.spectra = ma.masked_where(tmp, smr.spectra, copy=False)
del tmp
if verbose:
masked = smr.spectra.mask.sum()
print('* Info: masked %6.1f pixels/spectrum with invalid darks'
% ((masked - i_masked) / float(smr.numSpectra)))
i_masked = masked
smr.spectra = ma.masked_less(smr.spectra, 1, copy=False)
if verbose:
masked = smr.spectra.mask.sum()
print('* Info: masked %6.1f pixels/spectrum with too large darks'
% ((masked - i_masked) / float(smr.numSpectra)))
i_masked = masked
def strayLight(self, smr, verbose=False):
"""
(4) Stray Light correction
Parameters
----------
M_stray : stray light correction matrix [i,j,x,y] (C1/C2)
S_in : rebin(S_in(i,j) + missing spectrum) [x,y]
Returns
-------
Signal S_out(i,j) corrected for stray light
Notes
-----
* error estimate not implemented
"""
if verbose:
print('(4) Perform subtraction of spectral stray-light')
smr.errorType = 'A'
#
# read stray-light correction matrix
#
with h5py.File('/SCIA/share/nadc_tools/Straylight.h5', 'r') as fid:
dset = fid['strayMatrix']
strayMatrix = dset[:]
dset = fid['strayGhost']
strayGhost = dset[:]
dset = fid['grid_in']
grid_in = dset[:]
dset = fid['grid_out']
grid_out = dset[:]
# calculate derivative of grid_out
deriv_out = (np.roll(grid_out, -1) - np.roll(grid_out, 1))/2.
deriv_out[0] = (4 * grid_out[1] - 3 * grid_out[0] - grid_out[2])/2.
deriv_out[-1] = (3 * grid_out[-1] - 4 * grid_out[-2] + grid_out[-3])/2.
# obtain lower and upper indices for regridding, per channel
low_indx = np.zeros(grid_in.shape)
high_indx = np.zeros(grid_in.shape)
input_ch = np.floor(grid_in / smr.channelSize)
for nc in range(smr.numChannels):
w = (input_ch == nc)
grid_ch = grid_in[w]
# trick to omit round-off errors (fast - only integer manipulation)
ll = np.empty(grid_ch.shape, dtype=np.uint16)
ll[1:] = (grid_ch[:-1] + grid_ch[1:]).astype('uint16')
ll[(ll % 2) == 1] += 1
ll //= 2
ll[0] = nc * smr.channelSize
ul = np.roll(ll, -1)
ul[-1] = (nc+1) * smr.channelSize
low_indx[w] = ll
high_indx[w] = ul
# reduce the spectrum, according to grid_in
# scale_mask: compensates the reduced spectrum for masked read-outs
# fillings: compensates for the PET w.r.t. 1/16 sec
spec_r = np.zeros((smr.numSpectra, grid_in.shape[0]), dtype='float64')
for ni in range(grid_in.shape[0]):
num = ma.count(smr.spectra[:, low_indx[ni]:high_indx[ni]], axis=1)
num[num < 1] = 1
scale_mask = num / float(high_indx[ni] - low_indx[ni])
fillings = 16 * smr.pet[grid_in[ni]]
spec_r[:, ni] = smr.spectra[:,
low_indx[ni]:high_indx[ni]].sum(axis=1)
spec_r[:, ni] /= (fillings * scale_mask)
# print(ni, low_indx[ni], high_indx[ni], spec_r[120,ni],
# scale_mask[120], fillings, deriv_out[ni])
# reverse channel 2 (using the numpy 'view' method)
tmp = spec_r[:, (input_ch == 1)]
spec_r[:, (input_ch == 1)] = tmp[:, ::-1]
# obtain straylight spectrum
stray_r = np.dot(spec_r, np.transpose(strayMatrix))
# correct for sampling distance of the output grid
stray_r /= deriv_out
# resample straylight spectrum to SCIA spectrum
stray = np.zeros((smr.numSpectra, smr.numPixels), dtype='float64')
for ns in range(smr.numSpectra):
stray[ns, :] = np.interp(np.arange(smr.numPixels, dtype='float64'),
grid_out, stray_r[ns, :])
# blank out blinded pixels
stray[:, smr.blinded] = 0.
# scale to original PET of spectra
stray *= ((16 * smr.pet * smr.coaddf) / smr.coaddf.max())
# reverse channel 2 (using the numpy 'view' method)
tmp = stray[:, 1024:2048]
tmp[:, :] = tmp[:, ::-1]
# calculate stray-light contribution of the ghosts
ghosts = np.zeros((smr.numSpectra, smr.numPixels), dtype='float64')
for ng in range(strayGhost.shape[0]):
pp = np.arange(strayGhost[ng, 3], strayGhost[ng, 5], dtype=int)
pos = np.polyval(strayGhost[ng, 2::-1], pp)
fact = np.polyval(strayGhost[ng, 10:6:-1], pp)
mask = ((pos >= strayGhost[ng, 4]) & (pos <= strayGhost[ng, 6])
& (fact > 0))
pos = pos[mask]
ghost = fact[mask] * smr.spectra[:, pp[mask]]
if pos[0] > pos[-1]:
pos = pos[::-1]
ghost = ghost[:, ::-1]
pixels = np.arange(int(pos[0]), int(pos[-1]), dtype=int)
for ns in range(smr.numSpectra):
ghosts[ns, pixels] += np.interp(pixels, pos, ghost[ns, :])
# for ni in range(smr.numPixels):
# print(ni, smr.spectra[120,ni], stray[120,ni], ghosts[120,ni])
# blank out blinded pixels
ghosts[:, smr.blinded] = 0.
# subtract straylight from spectra
smr.spectra -= (stray + ghosts)
def fitParam(self, smr, verbose=False):
"""
(5) Correct Sun (state 62) for the intensity change during the scan
Parameters
----------
* Read fit parameters from sun_fitpars.h5
Returns
-------
* Sun measurements for intensity change during scan (Diffuser/ESM)
Notes
-----
* error estimate not implemented
"""
if verbose:
print('(5) Perform correction for diffuser effects')
smr.errorType = 'M'
#
# read fit parameters for the correction of diffuser effects
#
with h5py.File('/SCIA/SDMF31/Auxiliary/sun_fitpars.h5', 'r') as fid:
dset = fid['fit_parameters']
fit_param = dset[:]
jd = smr.julianDay.mean() - fit_param[0, 0]
saa = smr.sunAzim[1:-1].mean() - fit_param[1, 0]
sza = smr.sunElev - fit_param[2, 0]
saa_grid = np.arange(fit_param[3, 0]) / (fit_param[3, 0] - 1) \
* (fit_param[5, 0]-fit_param[4, 0]) + fit_param[4, 0] \
- fit_param[1, 0]
sza_grid = np.arange(fit_param[6, 0]) / (fit_param[6, 0] - 1) \
* (fit_param[8, 0]-fit_param[7, 0]) + fit_param[7, 0] \
- fit_param[2, 0]
for ip in range(smr.numPixels):
if fit_param[0, ip] == 0:
continue
np0 = 19
np1 = 19 + fit_param[3, ip]
saa_lut = fit_param[np0:np1, ip]
np0 += fit_param[3, ip]
np1 += fit_param[6, ip]
sza_lut = fit_param[np0:np1, ip]
saa_val = np.interp(saa, saa_grid, saa_lut)
sza_val = np.interp(sza, sza_grid, sza_lut)
slope = saa_val + fit_param[10, ip] + fit_param[11, ip] * jd \
+ fit_param[12, ip] * jd**2
ymod = (1 + slope * sza) * (1 + sza_val)
smr.spectra.data[:, ip] /= ymod
def mirrorModel(self, smr, verbose=False):
"""(6) Apply Sun mirror model
Parameters
----------
Returns
-------
Notes
-----
Implementation TBD
"""
if verbose:
print('(6) Perform correction for mirror degradation')
smr.errorType = 'M'
pass
def radiance(self, smr, verbose=False):
from pynadc.scia import db, lv1
"""
(7) Radiance correction, the light which the telescope recieved in
the nadir direction, i.e. all the light comming from the Earth in the
FoV.
Parameters
----------
F_t : radiance sensitivity function, obtained from on-ground,
updated by monitor [i,j] (C1/C2)
Returns
-------
Radiance I(i,j)
Notes
-----
* error estimate not implemented
"""
if verbose:
print('(7) Perform radiance correction')
smr.errorType = 'M'
# obtain wavelength grid from level 1b product
orbits = [int(smr.absOrbit)]
fileList = db.get_product_by_type(prod_type='1',
proc_best=True,
orbits=orbits)
if len(fileList) > 0:
try:
l1b = lv1.File(fileList[0])
l1b.getSRS()
except scia_l1b.fmtError as e:
print(e.msg)
sys.exit(1)
wv_interp = True
smr.wvlen = np.array(l1b.srs['wavelength'][0])
l1b.__del__()
else:
wv_interp = False
smr.wvlen = smr.rspm['wvlen'] # use rspm grid as approximation
# reverse channel 2 (using the numpy 'view' method)
tmp = smr.spectra.data[:, 1024:2048]
tmp[:, :] = tmp[:, ::-1]
# make a copy to correct Epitaxx PET without modifying the SMR object
pet = smr.pet.copy()
pet[5 * smr.channelSize:] -= 1.18125e-3
# linear interpolate for obs_ele, obs_asm
obs_ele = np.array(smr.sunElev)
obs_asm = -45 - 0.5 * np.array(smr.asmAngle)
for no in range(smr.numSpectra):
na = np.argmin(abs(smr.rspm['ang_asm'][0, :] - obs_asm[no]))
if obs_asm[no] < smr.rspm['ang_asm'][0, na] and na > 0: na -= 1
frac_asm = (obs_asm[no] - smr.rspm['ang_asm'][0, na]) \
/ (smr.rspm['ang_asm'][0, na+1] - smr.rspm['ang_asm'][0, na])
ne = np.argmin(abs(smr.rspm['ang_ele'][0, :] - obs_ele[no]))
if obs_ele[no] < smr.rspm['ang_ele'][0, ne] and ne > 0: ne -= 1
frac_ele = (obs_ele[no] - smr.rspm['ang_ele'][0, ne]) \
/ (smr.rspm['ang_ele'][0, ne+1] - smr.rspm['ang_ele'][0, ne])
radsensAzi1 = (1-frac_asm) \
* smr.rspm['sensitivity'][0, ne, na, :] \
+ frac_asm * smr.rspm['sensitivity'][0, ne, na+1, :]
radsensAzi2 = (1-frac_asm) \
* smr.rspm['sensitivity'][0, ne+1, na, :] \
+ frac_asm * smr.rspm['sensitivity'][0, ne+1, na+1, :]
radsens = (1-frac_ele) * radsensAzi1 + frac_ele * radsensAzi2
if wv_interp:
tmp = radsens.copy()
for nc in range(smr.numChannels):
i_mn = nc * smr.channelSize
i_mx = i_mn + smr.channelSize
radsens[i_mn:i_mx] = np.interp(
smr.wvlen[i_mn:i_mx],
smr.rspm['wvlen'][0, i_mn:i_mx],
tmp[i_mn:i_mx])
del tmp
smr.spectra.data[no, :] /= (pet * radsens)
# mask invalid values
smr.spectra = ma.masked_less(smr.spectra, 0, copy=False)
smr.spectra = ma.masked_invalid(smr.spectra, copy=False)
def combineSpectra(self, smr, verbose=False):
"""
(8) Calculate Sun Mean Reference Spectrum
Parameters
----------
Returns
-------
* mean of the 238 scans, skip first and last scan
* variance of the 238 scans, skip first and last scan
* bad dead pixel mask
Notes
-----
* error estimate not implemented
"""
if verbose:
print('(8) Return average of the Sun spectra')
smr.errorType = 'A'
# omit first and last spectra
smr.smr = ma.mean(smr.spectra[1:-1, :], axis=0)
smr.smrError = ma.std(smr.spectra[1:-1, :], axis=0)
smr.bdpm = (ma.count_masked(smr.spectra[1:-1, :], axis=0) > 0)
smr.smrVar = np.zeros(smr.numPixels, dtype='float64')
smr.smrSlope = np.zeros(smr.numPixels, dtype='float64')
x = np.arange(238)
A = np.vstack([x, np.ones(238)]).T
ipList = np.arange(smr.numPixels)[~smr.bdpm]
for ip in ipList:
y = smr.spectra.data[1:-1, ip]
m, c = np.linalg.lstsq(A, y)[0]
smr.smrVar[ip] = np.var(y - (m * x + c))
smr.smrSlope[ip] = m
#-------------------------SECTION WRITE DATA--------------------------------
class SMRdb:
def __init__(self, args=None, db_name='./sdmf_smr.h5',
truncate=False, calibration=None, verbose=False):
if args:
self.db_name = args.db_name
self.calibration = args.calibration.copy()
self.truncate = args.truncate
self.verbose = args.verbose
else:
self.db_name = db_name
self.calibration = calibration
self.truncate = truncate
self.verbose = verbose
def checkDataBase(self):
with h5py.File(self.db_name, 'r') as fid:
mystr = ','.join(list(self.calibration.astype('str')))
if fid.attrs['calibOptions'] != mystr:
print('Fatal:', 'incompatible calibration options')
raise dbError('incompatible calibration options')
myversion = '%(major)d.%(minor)d' % _swVersion
if fid.attrs['swVersion'].rsplit('.', 1)[0] != myversion:
print('Fatal:', 'incompatible with _swVersion')
raise dbError('incompatible with _swVersion')
myversion = '%(major)d.%(minor)d' % _dbVersion
if fid.attrs['dbVersion'].rsplit('.', 1)[0] != myversion:
print('Fatal:', 'incompatible with _dbVersion')
raise dbError('incompatible with _dbVersion')
myversion = '%(major)d.%(minor)d' % _calibVersion
if fid.attrs['calibVersion'].rsplit('.', 1)[0] != myversion:
print('Fatal:', 'incompatible with _calibVersion')
raise dbError('incompatible with _calibVersion')
def fill_mtbl(self, smr):
from datetime import datetime
fmtMTBL = \
'float64,a20,uint16,uint16,float32,float32,float32,float32' \
+ ',float32,float32,float32,float32,8float32'
nameMTBL = ('julianDay', 'entryDate', 'absOrbit', 'quality',
'orbitPhase', 'longitude', 'latitude', 'asmAngle',
'esmAngle', 'sunAzim', 'sunElev', 'obmTemp',
'detTemp')
self.mtbl = np.empty(1, dtype=fmtMTBL)
self.mtbl.dtype.names = nameMTBL
self.mtbl['julianDay'] = smr.mtbl['julianDay']
self.mtbl['entryDate'] = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
self.mtbl['absOrbit'] = smr.absOrbit
self.mtbl['quality'] = 0
self.mtbl['orbitPhase'] = smr.mtbl['orbitPhase']
self.mtbl['longitude'] = smr.mtbl['longitude']
self.mtbl['latitude'] = smr.mtbl['latitude']
self.mtbl['asmAngle'] = smr.asmAngle[1:-1].mean()
self.mtbl['esmAngle'] = smr.esmAngle[1:-1].mean()
self.mtbl['sunAzim'] = smr.sunAzim[1:-1].mean()
self.mtbl['sunElev'] = smr.sunElev[1:-1].mean()
self.mtbl['obmTemp'] = smr.mtbl['obmTemp']
self.mtbl['detTemp'] = smr.mtbl['detTemp']
def create(self, smr):
with h5py.File(self.db_name, 'w', libver='latest') as fid:
ds = fid.create_dataset('orbitList', dtype='uint16',
data=smr.absOrbit.reshape(1,),
maxshape=(None,), chunks=(512,))
ds = fid.create_dataset('metaTable',
data=self.mtbl,
chunks=(16384 // self.mtbl.dtype.itemsize,),
shuffle=True, compression='gzip',
compression_opts=1, maxshape=(None,))
ds = fid.create_dataset('smr',
data=smr.smr.reshape(1, smr.numPixels),
maxshape=(None, smr.numPixels),
chunks=(8, smr.numPixels),
compression='gzip', compression_opts=1,
shuffle=True)
ds = fid.create_dataset('smrVariance',
data=smr.smrVar.reshape(1, smr.numPixels),
maxshape=(None, smr.numPixels),
chunks=(8, smr.numPixels),
compression='gzip', compression_opts=1,
shuffle=True)
ds = fid.create_dataset('smrSlope',
data=smr.smrSlope.reshape(1, smr.numPixels),
maxshape=(None, smr.numPixels),
chunks=(8, smr.numPixels),
compression='gzip', compression_opts=1,
shuffle=True)
ds = fid.create_dataset('smrError',
data=smr.smrError.reshape(1, smr.numPixels),
maxshape=(None, smr.numPixels),
chunks=(8, smr.numPixels),
compression='gzip', compression_opts=1,
shuffle=True)
ds = fid.create_dataset('bdpm',
data=smr.bdpm.reshape(1, smr.numPixels),
maxshape=(None, smr.numPixels),
chunks=(8, smr.numPixels),
compression='gzip', compression_opts=1,
shuffle=True)
if smr.wvlen is not None:
ds = fid.create_dataset('wavelength',
data=smr.wvlen.reshape(1,
smr.numPixels),
maxshape=(None, smr.numPixels),
chunks=(8, smr.numPixels),
compression='gzip', compression_opts=1,
shuffle=True)
# create attributes in the HDF5 root
mystr = ','.join(list(self.calibration.astype('str')))
fid.attrs['calibOptions'] = mystr
fid.attrs['swVersion'] = \
'%(major)d.%(minor)d.%(revision)d' % _swVersion
fid.attrs['dbVersion'] = \
'%(major)d.%(minor)d.%(revision)d' % _dbVersion
fid.attrs['calibVersion'] = \
'%(major)d.%(minor)d.%(revision)d' % _calibVersion
def append(self, smr):
with h5py.File(self.db_name, 'r+') as fid:
dset = fid['orbitList'] # orbitList
ax1 = dset.len()
dset.resize(ax1+1, axis=0)
dset[ax1] = smr.absOrbit
orbitList = dset[:]
dset = fid['metaTable'] # metaTable
dset.resize(ax1+1, axis=0)
dset[ax1] = self.mtbl
dset = fid['smr'] # SMR
dset.resize(ax1+1, axis=0)
dset[ax1, :] = smr.smr.reshape(1, smr.numPixels)
dset = fid['smrVariance'] # SMR(variance)
dset.resize(ax1+1, axis=0)
dset[ax1, :] = smr.smrVar.reshape(1, smr.numPixels)
dset = fid['smrSlope'] # SMR(slope)
dset.resize(ax1+1, axis=0)
dset[ax1, :] = smr.smrSlope.reshape(1, smr.numPixels)
dset = fid['smrError'] # SMR(error)
dset.resize(ax1+1, axis=0)
dset[ax1, :] = smr.smrError.reshape(1, smr.numPixels)
dset = fid['bdpm'] # BDPM
dset.resize(ax1+1, axis=0)
dset[ax1, :] = smr.bdpm.reshape(1, smr.numPixels)
if smr.wvlen is not None:
dset = fid['wavelength'] # waveLength
dset.resize(ax1+1, axis=0)
dset[ax1, :] = smr.wvlen.reshape(1, smr.numPixels)
def rewrite(self, smr):
with h5py.File(self.db_name, 'r+') as fid:
dset = fid['orbitList'] # orbitList
orbitList = dset[:]
ax1 = np.nonzero(orbitList == smr.absOrbit)[0][0]
dset = fid['metaTable'] # metaTable
dset[ax1] = self.mtbl
dset = fid['smr'] # SMR
dset[ax1, :] = smr.smr.reshape(1, smr.numPixels)
dset = fid['smrVariance'] # SMR(variance)
dset[ax1, :] = smr.smrVar.reshape(1, smr.numPixels)
dset = fid['smrSlope'] # SMR(slope)
dset[ax1, :] = smr.smrSlope.reshape(1, smr.numPixels)
dset = fid['smrError'] # SMR(error)
dset[ax1, :] = smr.smrError.reshape(1, smr.numPixels)
dset = fid['bdpm'] # BDPM
dset[ax1, :] = smr.bdpm.reshape(1, smr.numPixels)
if smr.wvlen is not None:
dset = fid['wavelength'] # waveLength
dset[ax1, :] = smr.wvlen.reshape(1, smr.numPixels)
def store(self, smr, verbose=False):
self.fill_mtbl(smr)
if not h5py.is_hdf5(self.db_name):
if verbose: print('* Info: create new database')
self.create(smr)
elif self.truncate:
if verbose: print('* Info: replace database (and start a new)')
self.create(smr)
else:
self.checkDataBase()
with h5py.File(self.db_name, 'r') as fid:
dset = fid['orbitList']
orbitList = dset[:]
if np.nonzero(orbitList == smr.absOrbit)[0].size == 0:
if verbose: print('* Info: append new SMR to database')
self.append(smr)
else:
if verbose: print('* Info: overwrite entry in database')
self.rewrite(smr)
#-------------------------SECTION DISPLAY DATA------------------------------
class SMRshow:
def __init__(self, args):
self.refCalibID = -1
if args.effect:
if args.calibration.size == 0:
print('Fatal, need atleast one calibration option')
sys.exit(1)
elif args.calibration.size == 1:
self.refCalibID = 0
else:
self.refCalibID = args.calibration[-2]
def showPixel(self, smr, pixelID):
import matplotlib.pyplot as plt
plt.figure(1, figsize=(11.69, 8.27))
plt.title('SMR - State 62 - Orbit %d - Pixel %d'
% (smr.absOrbit, pixelID))
plt.grid(True)
plt.plot(smr.spectra[:, pixelID], 'b+-')
plt.show()
def showSpectrum(self, args, smr):
import matplotlib.pyplot as plt
pixelList = []
for nch in args.channel:
pixelList += list(range((nch-1) * smr.channelSize,
nch * smr.channelSize))
fig = plt.figure(1, figsize=(11.69, 8.27))
fig.text(0.5, 0.01, 'Temperatures of OBM and Detectors: %6.2f - '
% (smr.obmTemp) + np.array_str(smr.detTemp, precision=2),
fontsize=9, horizontalalignment='center',
verticalalignment='bottom')
plt.title('SMR - State 62 - Orbit %d' % smr.absOrbit)
plt.grid(True)
if smr.wvlen is not None:
plt.xlim([smr.wvlen[pixelList[0]], smr.wvlen[pixelList[-1]]])
plt.xlabel('Wavelength (nm)')
for nch in args.channel:
if (nch % 2) == 0:
icol = 'b.'
else:
icol = 'r.'
i_mx = nch * smr.channelSize
i_mn = i_mx - smr.channelSize
if smr.smr is None:
plt.plot(smr.wvlen[i_mn:i_mx],
smr.spectra[120, i_mn:i_mx], icol)
else:
plt.plot(smr.wvlen[i_mn:i_mx], smr.smr[i_mn:i_mx], icol)
else:
plt.xlabel('Pixel number')
plt.xlim([pixelList[0], pixelList[-1]])
if smr.smr is None:
plt.plot(pixelList, smr.spectra[120, pixelList], 'b.')
else:
plt.plot(pixelList, smr.smr[pixelList], 'b.')
plt.ylabel('Signal')
# remove outliers
tmp = smr.smr[pixelList].compressed()
tmp = tmp[np.isfinite(tmp)]
tmp.sort()
plt.ylim((0, tmp[99*tmp.size/100]))
plt.show()
def showEffect(self, args, smr):
import matplotlib.pyplot as plt
pixelList = []
for nch in args.channel:
pixelList += list(range((nch-1) * smr.channelSize,
nch * smr.channelSize))
fig, axs = plt.subplots(nrows=2, ncols=1, sharex=True,
figsize=(11.69, 8.27))
ax = axs[0]
effect = smr.refSpectra[120, pixelList].copy()
if smr.errorType == 'M':
str_effect = '%d / %d' % (args.calibration[-2], args.calibration[-1])
if smr.smr is None:
effect /= smr.spectra[120, pixelList]
else:
effect /= smr.smr[pixelList]
elif smr.errorType == 'A':
str_effect = '%d - %d' % (args.calibration[-2], args.calibration[-1])
if smr.smr is None:
effect -= smr.spectra[120, pixelList]
else:
effect -= smr.smr[pixelList]
else:
pass
ax.plot(pixelList, effect, 'b.')
ax.set_xlim([pixelList[0], pixelList[-1]])
ax.grid(True)
ax.set_title('SMR - calibration: ' + str_effect)
ax = axs[1]
if smr.smr is None:
ax.plot(pixelList, smr.spectra[120, pixelList], 'b.')
else:
ax.plot(pixelList, smr.smr[pixelList], 'b.')
ax.set_xlim([pixelList[0], pixelList[-1]])
ax.grid(True)
ax.set_title('SMR - calibration: '
+ ','.join(str(x) for x in args.calibration))
fig.suptitle('SMR - State 62 - Orbit %d'% smr.absOrbit)
plt.show()
def screen(self, args, smr):
if args.pixel:
self.showPixel(smr, args.pixel)
elif args.effect:
self.showEffect(args, smr)
else:
self.showSpectrum(args, smr)
#-------------------------SECTION ARGPARSE----------------------------------
def handleCmdParams():
from argparse import ArgumentParser, ArgumentTypeError
import re
def parseCalibList(str):
"""
- always perform 'maskDead' and 'coaddDivision'
- optional are: (1) 'memoryEffect', (2) 'nonLinearity',
(3)'backGround', (4) 'strayLight', (5) 'fitParam',
(6) 'mirrorModel', (7) 'radiance', (8) 'combineSpectra'
- note: option 'db' implies 'combineSpectra'
- internally we count from 0 to 9
"""
if str.lower() == 'none':
return np.arange(2, dtype=np.uint8)
if str.lower() == 'full':
return np.arange(10, dtype=np.uint8)
p = re.compile(r'\D')
if p.match(str) is not None:
msg = 'Calibration IDs should be digits'
raise argparse.ArgumentTypeError(msg)
id_list = np.array(str.split(','), dtype=np.uint8)
if id_list.min() < 1 or id_list.max() > 8:
msg = 'Only calibration IDs between 1 and 8 are allowed'
raise argparse.ArgumentTypeError(msg)
# add required calibration steps
return np.concatenate(([0, 1], id_list+1))
def parseOrbitList(str):
msg1 = "'" + str + "' is not a range or number." \
+ " Expected forms like '20000-25000' or '20000'."
msg2 = "'" + str + "' is not valid orbit number."
if str.lower() == 'all':
return None
m = re.match(r'(\d+)(?:-(\d+))?$', str)
if not m:
raise ArgumentTypeError(msg1)
v1 = int(m.group(1))
if m.group(2):
v2 = int(m.group(2))
if v1 < 1 or v2 > 100000:
raise ArgumentTypeError(msg2)
return (v1, v2)
else:
return v1
def parseChannelList(str):
msg1 = "'" + str + "' is not a range or number." \
+ " Expected forms like '1-5' or '2'."
msg2 = "'" + str + "' is not valid channel ID."
m = re.match(r'(\d+)(?:-(\d+))?$', str)
if not m:
raise ArgumentTypeError(msg1)
v1 = int(m.group(1))
v2 = int(m.group(2) or v1)
if v1 < 1 or v2 > 8:
raise ArgumentTypeError(msg2)
return list(range(v1, v2+1))
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description='Perform calibration on Sciamachy State 62 data',
epilog=SMRcalib.__doc__)
parser.add_argument('-v', '--verbose', action='store_true',
help='show documentation of object SMRcalib')
mystr = '%(major)d.%(minor)d.%(revision)d' % _swVersion
parser.add_argument('-V', '--version', action='version',
version='%(prog)s ' + mystr,
help='Display the version number then quit')
# define subparser to display SMR
subparsers = parser.add_subparsers(title='subcommands',
dest='subparser_name')
parser_db = subparsers.add_parser('db',
help='options to archive SMR in database')
parser_db.add_argument('db_name', nargs='?', type=str,
default='./sdmf_smr.h5',
help='write to hdf5 database')
parser_db.add_argument('--orbit', type=parseOrbitList,
default=None,
help='process data from a orbit range')
parser_db.add_argument('-c', '--calibration', default='full',
type=parseCalibList,
help=('calibration IDs (comma separated)'
', or \"none\" or \"full\"'))
parser_db.add_argument('--truncate', action='store_true', default=False,
help='destroy database and start a new one')
parser_db.add_argument('-P', '--progressbar', action='store_true',
default=False, help='display progress bar')
parser_show = subparsers.add_parser('show',
help='options to display SMR')
parser_show.add_argument('--orbit', type=int, default=12005,
help='process Sun spectrum of given orbit')
parser_show.add_argument('-c', '--calibration', default='full',
type=parseCalibList,
help=('calibration IDs (comma separated)'
', or \"none\" or \"full\"'))
parser_show.add_argument('--chan', type=parseChannelList,
dest='channel', default='1-8',
help='show data of one or more channels')
parser_show.add_argument('--pixel', type=int,
default=None, help='show data of one pixel')
parser_show.add_argument('--effect', action='store_true', default=False,
help='show effect of last calibration step')
return parser.parse_args()
# update_progress() : Displays or updates a console progress bar
## Accepts a float between 0 and 1. Any int will be converted to a float.
## A value under 0 represents a 'halt'.
## A value at 1 or bigger represents 100%
def update_progress(progress):
barLength = 40 # Modify this to change the length of the progress bar
status = ""
if isinstance(progress, int):
progress = float(progress)
if not isinstance(progress, float):
progress = 0
status = "error: progress var must be float\r\n"
if progress < 0:
progress = 0
status = "Halt...\r\n"
if progress >= 1:
progress = 1
status = "Done...\r\n"
block = int(round(barLength*progress))
text = "\rPercent: [{}] {:.2%} {}".format("#"*block + "-"*(barLength-block), progress, status)
sys.stdout.write(text)
sys.stdout.flush()
#-------------------------SECTION MAIN--------------------------------------
if __name__ == '__main__':
sys.path.append('/opt/local/EOS/bin')
#
#
#
args = handleCmdParams()
if args.verbose: print(args)
#
# show data
#
if args.subparser_name == 'show':
# create object to show figure
obj_fig = SMRshow(args)
# create object with calibration routines
obj_cal = SMRcalib()
# read data
try:
smr = SDMFextractSun()
smr.selectOrbits(args.orbit)
smr.readData()
# read required keydata
if obj_cal.funclist.index('radiance') in args.calibration:
smr.rspm = get_h5_RadSensMoni()
# perform calibration
for calibID in args.calibration:
obj_cal.funcdict[obj_cal.funclist[calibID]](smr, verbose=args.verbose)
if calibID == obj_fig.refCalibID:
smr.refSpectra = smr.spectra.copy()
except readSunInfo:
sys.exit(1)
except:
print("Unexpected error:", sys.exc_info()[0])
raise
else:
obj_fig.screen(args, smr)
#
# store data
#
if args.subparser_name == 'db':
# create object with calibration routines
obj_cal = SMRcalib()
# make sure we combine the Sun spectra
indx = obj_cal.funclist.index('combineSpectra')
if indx not in args.calibration:
args.calibration = np.append(args.calibration, indx)
# create database object
obj_db = SMRdb(args)
# read data
smr = SDMFextractSun()
try:
smr.selectOrbits(args.orbit)
except readSunInfo:
sys.exit(0)
# read required keydata
if obj_cal.funclist.index('radiance') in args.calibration:
smr.rspm = get_h5_RadSensMoni()
# process all selected orbits
if args.progressbar:
p_todo = float(smr.orbitList.size)
p_done = smr.orbitList.size - smr.metaIndx.size
update_progress(p_done / p_todo)
while True:
try:
smr.readData()
# perform calibration
for calibID in args.calibration:
obj_cal.funcdict[obj_cal.funclist[calibID]](smr, verbose=args.verbose)
obj_db.store(smr, verbose=args.verbose)
if args.progressbar:
p_done = smr.orbitList.size - smr.metaIndx.size
update_progress(p_done / p_todo)
except readSunInfo:
sys.exit(1)
except:
print("Unexpected error:", sys.exc_info()[0])
raise
|
bsd-3-clause
|
chrisdev/django-pandas
|
django_pandas/utils.py
|
1
|
2859
|
# coding: utf-8
from django.core.cache import cache
from django.utils.encoding import force_text
from django.db.models import Field
def get_model_name(model):
"""
Returns the name of the model
"""
return model._meta.model_name
def replace_from_choices(choices):
def inner(values):
return [choices.get(v, v) for v in values]
return inner
def get_base_cache_key(model):
return 'pandas_%s_%s_%%s_rendering' % (
model._meta.app_label, get_model_name(model))
def get_cache_key(obj):
return get_base_cache_key(obj._meta.model) % obj.pk
def invalidate(obj):
cache.delete(get_cache_key(obj))
def invalidate_signal_handler(sender, **kwargs):
invalidate(kwargs['instance'])
def replace_pk(model):
base_cache_key = get_base_cache_key(model)
def get_cache_key_from_pk(pk):
return None if pk is None else base_cache_key % str(pk)
def inner(pk_series):
pk_series = pk_series.where(pk_series.notnull(), None)
cache_keys = pk_series.apply(
get_cache_key_from_pk, convert_dtype=False)
unique_cache_keys = list(filter(None, cache_keys.unique()))
if not unique_cache_keys:
return pk_series
out_dict = cache.get_many(unique_cache_keys)
if len(out_dict) < len(unique_cache_keys):
out_dict = dict([(base_cache_key % obj.pk, force_text(obj))
for obj in model.objects.filter(
pk__in=list(filter(None, pk_series.unique())))])
cache.set_many(out_dict)
return list(map(out_dict.get, cache_keys))
return inner
def build_update_functions(fieldnames, fields):
for fieldname, field in zip(fieldnames, fields):
if not isinstance(field, Field):
yield fieldname, None
else:
if field and field.choices:
choices = dict([(k, force_text(v))
for k, v in field.flatchoices])
yield fieldname, replace_from_choices(choices)
elif field and field.get_internal_type() == 'ForeignKey':
yield fieldname, replace_pk(get_related_model(field))
def update_with_verbose(df, fieldnames, fields):
for fieldname, function in build_update_functions(fieldnames, fields):
if function is not None:
df[fieldname] = function(df[fieldname])
def get_related_model(field):
"""Gets the related model from a related field"""
model = None
if hasattr(field, 'related_model') and field.related_model: # pragma: no cover
model = field.related_model
# Django<1.8 doesn't have the related_model API, so we need to use rel,
# which was removed in Django 2.0
elif hasattr(field, 'rel') and field.rel: # pragma: no cover
model = field.rel.to
return model
|
bsd-3-clause
|
xwolf12/scikit-learn
|
examples/applications/plot_model_complexity_influence.py
|
323
|
6372
|
"""
==========================
Model Complexity Influence
==========================
Demonstrate how model complexity influences both prediction accuracy and
computational performance.
The dataset is the Boston Housing dataset (resp. 20 Newsgroups) for
regression (resp. classification).
For each class of models we make the model complexity vary through the choice
of relevant model parameters and measure the influence on both computational
performance (latency) and predictive power (MSE or Hamming Loss).
"""
print(__doc__)
# Author: Eustache Diemert <eustache@diemert.fr>
# License: BSD 3 clause
import time
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.parasite_axes import host_subplot
from mpl_toolkits.axisartist.axislines import Axes
from scipy.sparse.csr import csr_matrix
from sklearn import datasets
from sklearn.utils import shuffle
from sklearn.metrics import mean_squared_error
from sklearn.svm.classes import NuSVR
from sklearn.ensemble.gradient_boosting import GradientBoostingRegressor
from sklearn.linear_model.stochastic_gradient import SGDClassifier
from sklearn.metrics import hamming_loss
###############################################################################
# Routines
# initialize random generator
np.random.seed(0)
def generate_data(case, sparse=False):
"""Generate regression/classification data."""
bunch = None
if case == 'regression':
bunch = datasets.load_boston()
elif case == 'classification':
bunch = datasets.fetch_20newsgroups_vectorized(subset='all')
X, y = shuffle(bunch.data, bunch.target)
offset = int(X.shape[0] * 0.8)
X_train, y_train = X[:offset], y[:offset]
X_test, y_test = X[offset:], y[offset:]
if sparse:
X_train = csr_matrix(X_train)
X_test = csr_matrix(X_test)
else:
X_train = np.array(X_train)
X_test = np.array(X_test)
y_test = np.array(y_test)
y_train = np.array(y_train)
data = {'X_train': X_train, 'X_test': X_test, 'y_train': y_train,
'y_test': y_test}
return data
def benchmark_influence(conf):
"""
Benchmark influence of :changing_param: on both MSE and latency.
"""
prediction_times = []
prediction_powers = []
complexities = []
for param_value in conf['changing_param_values']:
conf['tuned_params'][conf['changing_param']] = param_value
estimator = conf['estimator'](**conf['tuned_params'])
print("Benchmarking %s" % estimator)
estimator.fit(conf['data']['X_train'], conf['data']['y_train'])
conf['postfit_hook'](estimator)
complexity = conf['complexity_computer'](estimator)
complexities.append(complexity)
start_time = time.time()
for _ in range(conf['n_samples']):
y_pred = estimator.predict(conf['data']['X_test'])
elapsed_time = (time.time() - start_time) / float(conf['n_samples'])
prediction_times.append(elapsed_time)
pred_score = conf['prediction_performance_computer'](
conf['data']['y_test'], y_pred)
prediction_powers.append(pred_score)
print("Complexity: %d | %s: %.4f | Pred. Time: %fs\n" % (
complexity, conf['prediction_performance_label'], pred_score,
elapsed_time))
return prediction_powers, prediction_times, complexities
def plot_influence(conf, mse_values, prediction_times, complexities):
"""
Plot influence of model complexity on both accuracy and latency.
"""
plt.figure(figsize=(12, 6))
host = host_subplot(111, axes_class=Axes)
plt.subplots_adjust(right=0.75)
par1 = host.twinx()
host.set_xlabel('Model Complexity (%s)' % conf['complexity_label'])
y1_label = conf['prediction_performance_label']
y2_label = "Time (s)"
host.set_ylabel(y1_label)
par1.set_ylabel(y2_label)
p1, = host.plot(complexities, mse_values, 'b-', label="prediction error")
p2, = par1.plot(complexities, prediction_times, 'r-',
label="latency")
host.legend(loc='upper right')
host.axis["left"].label.set_color(p1.get_color())
par1.axis["right"].label.set_color(p2.get_color())
plt.title('Influence of Model Complexity - %s' % conf['estimator'].__name__)
plt.show()
def _count_nonzero_coefficients(estimator):
a = estimator.coef_.toarray()
return np.count_nonzero(a)
###############################################################################
# main code
regression_data = generate_data('regression')
classification_data = generate_data('classification', sparse=True)
configurations = [
{'estimator': SGDClassifier,
'tuned_params': {'penalty': 'elasticnet', 'alpha': 0.001, 'loss':
'modified_huber', 'fit_intercept': True},
'changing_param': 'l1_ratio',
'changing_param_values': [0.25, 0.5, 0.75, 0.9],
'complexity_label': 'non_zero coefficients',
'complexity_computer': _count_nonzero_coefficients,
'prediction_performance_computer': hamming_loss,
'prediction_performance_label': 'Hamming Loss (Misclassification Ratio)',
'postfit_hook': lambda x: x.sparsify(),
'data': classification_data,
'n_samples': 30},
{'estimator': NuSVR,
'tuned_params': {'C': 1e3, 'gamma': 2 ** -15},
'changing_param': 'nu',
'changing_param_values': [0.1, 0.25, 0.5, 0.75, 0.9],
'complexity_label': 'n_support_vectors',
'complexity_computer': lambda x: len(x.support_vectors_),
'data': regression_data,
'postfit_hook': lambda x: x,
'prediction_performance_computer': mean_squared_error,
'prediction_performance_label': 'MSE',
'n_samples': 30},
{'estimator': GradientBoostingRegressor,
'tuned_params': {'loss': 'ls'},
'changing_param': 'n_estimators',
'changing_param_values': [10, 50, 100, 200, 500],
'complexity_label': 'n_trees',
'complexity_computer': lambda x: x.n_estimators,
'data': regression_data,
'postfit_hook': lambda x: x,
'prediction_performance_computer': mean_squared_error,
'prediction_performance_label': 'MSE',
'n_samples': 30},
]
for conf in configurations:
prediction_performances, prediction_times, complexities = \
benchmark_influence(conf)
plot_influence(conf, prediction_performances, prediction_times,
complexities)
|
bsd-3-clause
|
pythonvietnam/scikit-learn
|
examples/linear_model/plot_ard.py
|
248
|
2622
|
"""
==================================================
Automatic Relevance Determination Regression (ARD)
==================================================
Fit regression model with Bayesian Ridge Regression.
See :ref:`bayesian_ridge_regression` for more information on the regressor.
Compared to the OLS (ordinary least squares) estimator, the coefficient
weights are slightly shifted toward zeros, which stabilises them.
The histogram of the estimated weights is very peaked, as a sparsity-inducing
prior is implied on the weights.
The estimation of the model is done by iteratively maximizing the
marginal log-likelihood of the observations.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn.linear_model import ARDRegression, LinearRegression
###############################################################################
# Generating simulated data with Gaussian weights
# Parameters of the example
np.random.seed(0)
n_samples, n_features = 100, 100
# Create Gaussian data
X = np.random.randn(n_samples, n_features)
# Create weigts with a precision lambda_ of 4.
lambda_ = 4.
w = np.zeros(n_features)
# Only keep 10 weights of interest
relevant_features = np.random.randint(0, n_features, 10)
for i in relevant_features:
w[i] = stats.norm.rvs(loc=0, scale=1. / np.sqrt(lambda_))
# Create noite with a precision alpha of 50.
alpha_ = 50.
noise = stats.norm.rvs(loc=0, scale=1. / np.sqrt(alpha_), size=n_samples)
# Create the target
y = np.dot(X, w) + noise
###############################################################################
# Fit the ARD Regression
clf = ARDRegression(compute_score=True)
clf.fit(X, y)
ols = LinearRegression()
ols.fit(X, y)
###############################################################################
# Plot the true weights, the estimated weights and the histogram of the
# weights
plt.figure(figsize=(6, 5))
plt.title("Weights of the model")
plt.plot(clf.coef_, 'b-', label="ARD estimate")
plt.plot(ols.coef_, 'r--', label="OLS estimate")
plt.plot(w, 'g-', label="Ground truth")
plt.xlabel("Features")
plt.ylabel("Values of the weights")
plt.legend(loc=1)
plt.figure(figsize=(6, 5))
plt.title("Histogram of the weights")
plt.hist(clf.coef_, bins=n_features, log=True)
plt.plot(clf.coef_[relevant_features], 5 * np.ones(len(relevant_features)),
'ro', label="Relevant features")
plt.ylabel("Features")
plt.xlabel("Values of the weights")
plt.legend(loc=1)
plt.figure(figsize=(6, 5))
plt.title("Marginal log-likelihood")
plt.plot(clf.scores_)
plt.ylabel("Score")
plt.xlabel("Iterations")
plt.show()
|
bsd-3-clause
|
fejoa/IVANWorldmapResearch
|
WorldBuild_Hybrid3d.py
|
1
|
7670
|
# -*- coding: utf-8 -*-
"""
IVAN Worldmap Research
Copyright (C) Ryan van Herel
Released under the GNU General
Public License
See LICENSING which should be included
along with this file for more details
@author: fejoa
"""
import os
from random import randint
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.cm as cm
from mpl_toolkits.mplot3d import axes3d
from matplotlib.colors import LinearSegmentedColormap
cdict = {'red': ((0.0, 0.0, 0.0),
(1.0, 0.0, 0.0)),
'green': ((0.0, 0.0, 0.0),
(0.5, 0.0, 1.0),
(1.0, 0.1, 1.0)),
'blue': ((0.0, 0.0, 0.1),
(0.5, 1.0, 0.0),
(1.0, 0.0, 0.0)),
'alpha': ((0.0, 1.0, 1.0),
# (0.25,1.0, 1.0),
(0.5, 0.3, 0.3),
# (0.75,1.0, 1.0),
(1.0, 1.0, 1.0))
}
valpuri = LinearSegmentedColormap('valpurus', cdict)
plt.register_cmap(cmap=valpuri)
class worldmap:
def __init__(self, length, width, smooth, steps, GENERATE_CONTINENTS):
self.__length = length
self.__width = width
self.__area = length * width
self.__AltitudeBuffer = np.zeros((width, length))
self.__OldAltitudeBuffer = np.zeros((width, length))
self.__DisplayMap = np.zeros((width, length))
self.__gen_initial_map(smooth, steps, GENERATE_CONTINENTS)
def __gen_initial_map(self, smooth, steps, GENERATE_CONTINENTS):
#create initial random map
HYBRID = 2
if GENERATE_CONTINENTS == HYBRID or GENERATE_CONTINENTS == 1:
for x in range(self.__width):
for y in range(self.__length):
self.__AltitudeBuffer[x][y] = (4000 - randint(0, 8000))
if GENERATE_CONTINENTS == HYBRID or GENERATE_CONTINENTS == 0:
#create "splodges"
for x in range(self.__width/2):
for y in range(self.__length/2):
self.__AltitudeBuffer[x][y] += (randint(0, x*y)) - 800
for x in range(self.__width/2, self.__width):
for y in range(self.__length/2, self.__length):
self.__AltitudeBuffer[x][y] += (randint(0, (self.__width-x)*(self.__length-y))) - 800
for x in range(self.__width/2):
for y in range(self.__length/2, self.__length):
self.__AltitudeBuffer[x][y] += (randint(0, (x)*(self.__length-y))) - 800
for x in range(self.__width/2, self.__width):
for y in range(self.__length/2):
self.__AltitudeBuffer[x][y] += (randint(0, (self.__width-x)*(y))) - 800
if smooth == 1:
self.__smooth_altitude(steps)
print "DONE"
def __quantize_grid(self):
LAND = 1
SEA = 0
for x in range(self.__width):
for y in range(self.__length):
if self.__AltitudeBuffer[x][y] > 0.0:
self.__DisplayMap[x][y] = LAND
else:
self.__DisplayMap[x][y] = SEA
def __smooth_altitude(self, steps):
for c in range(steps):
#self.show_world()
#self.__plot_landsea(c, steps)
for y in range(self.__length):
self.__safe_smooth(0, y)
for x in range(1, self.__width - 1):
self.__safe_smooth(x, 0)
for y in range(1, self.__length - 1):
self.__fast_smooth(x, y)
self.__safe_smooth(x, self.__length - 1)
for y in range(self.__length):
self.__safe_smooth(self.__width - 1, y)
if(c > 8):
self.__plot_landsea(c, steps)
def __safe_smooth(self, x, y):
HeightNear = 0
SquaresNear = 0
DirX = [ -1, -1, -1, 0, 0, 1, 1, 1 ]
DirY = [ -1, 0, 1, -1, 1, -1, 0, 1 ]
for d in range(0, 4):
X = x + DirX[d]
Y = y + DirY[d]
if self.__is_valid_position(X, Y):
HeightNear += self.__OldAltitudeBuffer[X][Y]
SquaresNear += 1
for d in range(4, 7):
X = x + DirX[d]
Y = y + DirY[d]
if self.__is_valid_position(X, Y):
HeightNear += self.__AltitudeBuffer[X][Y]
SquaresNear += 1
self.__OldAltitudeBuffer[x][y] = self.__AltitudeBuffer[x][y]
self.__AltitudeBuffer[x][y] = HeightNear / SquaresNear
def __fast_smooth(self, x, y):
HeightNear = 0
DirX = [ -1, -1, -1, 0, 0, 1, 1, 1 ]
DirY = [ -1, 0, 1, -1, 1, -1, 0, 1 ]
for d in range(0, 4):
HeightNear += self.__OldAltitudeBuffer[x + DirX[d]][y + DirY[d]]
for d in range(4, 7):
HeightNear += self.__AltitudeBuffer[x + DirX[d]][y + DirY[d]]
self.__OldAltitudeBuffer[x][y] = self.__AltitudeBuffer[x][y];
self.__AltitudeBuffer[x][y] = HeightNear / 8;
def __is_valid_position(self, X, Y):
return ((X >= 0) and (Y >= 0) and (X < self.__width) and (Y < self.__length))
def __plot_landsea(self, step, maxsteps):
mini = np.min(self.__AltitudeBuffer)
maxi = np.max(self.__AltitudeBuffer)
difi = (maxi - mini) / 9
absmax = max(abs(mini), maxi)
print "max altitude is ", maxi
print "min altitude is ", mini
destination = os.path.dirname(os.path.abspath(__file__)) + str(r'\outputs\%d'% step) + str(r'.png')
#self.__quantize_grid()
# fig = plt.figure()
# plt.imshow(self.__DisplayMap, interpolation='bilinear', origin='lower', cmap=cm.winter)
# CS = plt.contour(self.__DisplayMap, [0, 1], cmap=cm.winter)
# CB = plt.colorbar(CS, shrink=0.8, extend='both')
# l,b,w,h = plt.gca().get_position().bounds
# ll,bb,ww,hh = CB.ax.get_position().bounds
# CB.ax.set_position([ll, b+0.1*h, ww, h*0.8])
# plt.savefig(destination, bbox_inches='tight')
elevations = [-2000, -200, -100, -50, 0, 1, 50, 100, 200, 2000]
cols = ('#0000e6', '#0000ff', '#1a1aff', '#3333ff', '#33cc33', '#2eb82e', '#29a329', '#248f24', '#1f7a1f', '#1a651a')
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.view_init(elev=20, azim=30)
x = y = np.linspace(0,127,128)
X, Y = np.meshgrid(x, y)
Z = self.__AltitudeBuffer
ax.plot_surface(X, Y, Z, rstride=1, cstride=1, alpha=0.3, cmap=valpuri, linewidth=0.1)
#cset = ax.contourf(X, Y, Z, zdir='z', offset=mini, cmap=valpuri, levels=elevations) # , colors=cols)#np.arange(mini, maxi, difi))
cset = ax.contourf(X, Y, Z, zdir='z', offset=mini, colors=cols, levels=elevations)
#cset.cmap.set_under('#0000e6')
#cset.cmap.set_over('#1a651a')
plt.title(str(r'Valpuri step %d'% step))
ax.set_xlabel('X')
ax.set_xlim(0, 127)
ax.set_ylabel('Y')
ax.set_ylim(0, 127)
ax.set_zlabel('Z')
#ax.set_zlim(-4000, 4000)
ax.set_zlim(-absmax, absmax)
#ax.set_zlim(mini, maxi)
cbar = plt.colorbar(cset)
plt.savefig(destination, bbox_inches='tight')
#plt.show()
if (step >= 9):
plt.show()
# useage: worldmap(XSize, YSize, use smoothing? [Y/n], number of steps in smoothing, 0=single island 1=lots of continents 2=continent with islands)
world = worldmap(128, 128, 1, 10, 1)
|
gpl-2.0
|
gdsglgf/tutorials
|
python/pillow/show_image.py
|
1
|
2746
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
def addSalt(path):
img = np.array(Image.open(path))
#随机生成5000个椒盐
rows,cols,dims = img.shape
print img.shape
print rows, cols, dims
for i in range(5000):
x = np.random.randint(0,rows)
y = np.random.randint(0,cols)
img[x,y,:] = 255
show2(img)
def info(img):
print '-------image-------'
print type(img)
print img.size #图片的尺寸
print img.mode #图片的模式
print img.format #图片的格式
def arrayInfo(data):
print '--------numpy.ndarray-------'
print data
print data.shape
print data.dtype
print data.size #图片的尺寸
print type(data)
def showZeroOneImage(path):
img = np.array(Image.open(path).convert('1'))
# rows, cols = img.shape
# print img.shape
# img = img.reshape(rows * cols)
# img = np.array([1 if i else 0 for i in img])
# img = img.reshape(rows, cols)
print img
show2(img)
def toZeroOne(img):
rows, cols = img.shape
for i in range(rows):
for j in range(cols):
if (img[i, j] <= 128):
img[i, j] = 0
else:
img[i, j] = 1
return img
def showBlackWhiteImage(path):
img = np.array(Image.open(path).convert('L'))
print img.shape
print img
show2(img)
img = toZeroOne(img)
show2(img)
def showDiff(path):
plt.figure(path)
plt.subplot(2, 2, 1)
plt.title('1')
plt.imshow(np.array(Image.open(path).convert('1')), cmap='gray')
plt.axis('off')
plt.subplot(2, 2, 2)
plt.title('L')
plt.imshow(np.array(Image.open(path).convert('L')), cmap='gray')
plt.axis('off')
plt.subplot(2, 2, 3)
plt.title('RGB')
plt.imshow(np.array(Image.open(path).convert('RGB')))
plt.axis('off')
img = np.array(Image.open(path).convert('L'))
img = toZeroOne(img)
plt.subplot(2, 2, 4)
plt.title('ZeroOne')
plt.imshow(img, cmap='gray')
plt.axis('off')
plt.show()
def show1(img):
img.show() # 先保存成临时图片文件, 然后使用操作系统默认图片浏览工具显示
def show2(imgData):
plt.imshow(imgData)
# plt.axis('off')
plt.show()
def show3(imgData):
from scipy.misc import toimage
toimage(imgData).show()
def show4(imgData):
from scipy.misc import imshow
imshow(imgData)
def main():
path = 'lena.jpg'
showFlag = '2'
if len(sys.argv) > 2:
path = sys.argv[1]
showFlag = sys.argv[2]
img = Image.open(path)
info(img)
imgData = np.array(img)
arrayInfo(imgData)
if showFlag == '1':
show1(img)
elif showFlag == '2':
show2(imgData)
elif showFlag == '3':
show3(imgData)
elif showFlag == '4':
show4(imgData)
else:
print 'Not found show function show%s' %(showFlag)
showZeroOneImage(path)
addSalt(path)
showBlackWhiteImage(path)
if __name__ == '__main__':
main()
|
mit
|
yl565/statsmodels
|
statsmodels/duration/tests/test_survfunc.py
|
2
|
11876
|
import numpy as np
from statsmodels.duration.survfunc import (
SurvfuncRight, survdiff, plot_survfunc,
CumIncidenceRight)
from numpy.testing import assert_allclose
from numpy.testing import dec
import pandas as pd
import os
# If true, the output is written to a multi-page pdf file.
pdf_output = False
try:
import matplotlib.pyplot as plt
have_matplotlib = True
except ImportError:
have_matplotlib = False
def close_or_save(pdf, fig):
if pdf_output:
pdf.savefig(fig)
else:
plt.close(fig)
"""
library(survival)
ti1 = c(3, 1, 2, 3, 2, 1, 5, 3)
st1 = c(0, 1, 1, 1, 0, 0, 1, 0)
ti2 = c(1, 1, 2, 3, 7, 1, 5, 3, 9)
st2 = c(0, 1, 0, 0, 1, 0, 1, 0, 1)
ti = c(ti1, ti2)
st = c(st1, st2)
ix = c(rep(1, length(ti1)), rep(2, length(ti2)))
sd = survdiff(Surv(ti, st) ~ ix)
"""
ti1 = np.r_[3, 1, 2, 3, 2, 1, 5, 3]
st1 = np.r_[0, 1, 1, 1, 0, 0, 1, 0]
times1 = np.r_[1, 2, 3, 5]
surv_prob1 = np.r_[0.8750000, 0.7291667, 0.5468750, 0.0000000]
surv_prob_se1 = np.r_[0.1169268, 0.1649762, 0.2005800, np.nan]
n_risk1 = np.r_[8, 6, 4, 1]
n_events1 = np.r_[1., 1., 1., 1.]
ti2 = np.r_[1, 1, 2, 3, 7, 1, 5, 3, 9]
st2 = np.r_[0, 1, 0, 0, 1, 0, 1, 0, 1]
times2 = np.r_[1, 5, 7, 9]
surv_prob2 = np.r_[0.8888889, 0.5925926, 0.2962963, 0.0000000]
surv_prob_se2 = np.r_[0.1047566, 0.2518034, 0.2444320, np.nan]
n_risk2 = np.r_[9, 3, 2, 1]
n_events2 = np.r_[1., 1., 1., 1.]
cur_dir = os.path.dirname(os.path.abspath(__file__))
fp = os.path.join(cur_dir, 'results', 'bmt.csv')
bmt = pd.read_csv(fp)
def test_survfunc1():
# Test where all times have at least 1 event.
sr = SurvfuncRight(ti1, st1)
assert_allclose(sr.surv_prob, surv_prob1, atol=1e-5, rtol=1e-5)
assert_allclose(sr.surv_prob_se, surv_prob_se1, atol=1e-5, rtol=1e-5)
assert_allclose(sr.surv_times, times1)
assert_allclose(sr.n_risk, n_risk1)
assert_allclose(sr.n_events, n_events1)
def test_survfunc2():
# Test where some times have no events.
sr = SurvfuncRight(ti2, st2)
assert_allclose(sr.surv_prob, surv_prob2, atol=1e-5, rtol=1e-5)
assert_allclose(sr.surv_prob_se, surv_prob_se2, atol=1e-5, rtol=1e-5)
assert_allclose(sr.surv_times, times2)
assert_allclose(sr.n_risk, n_risk2)
assert_allclose(sr.n_events, n_events2)
def test_survdiff_basic():
# Constants taken from R, code above
ti = np.concatenate((ti1, ti2))
st = np.concatenate((st1, st2))
groups = np.ones(len(ti))
groups[0:len(ti1)] = 0
z, p = survdiff(ti, st, groups)
assert_allclose(z, 2.14673, atol=1e-4, rtol=1e-4)
assert_allclose(p, 0.14287, atol=1e-4, rtol=1e-4)
def test_simultaneous_cb():
# The exact numbers here are regression tests, but they are close
# to page 103 of Klein and Moeschberger.
df = bmt.loc[bmt["Group"] == "ALL", :]
sf = SurvfuncRight(df["T"], df["Status"])
lcb1, ucb1 = sf.simultaneous_cb(transform="log")
lcb2, ucb2 = sf.simultaneous_cb(transform="arcsin")
ti = sf.surv_times.tolist()
ix = [ti.index(x) for x in (110, 122, 129, 172)]
assert_allclose(lcb1[ix], np.r_[0.43590582, 0.42115592,
0.4035897, 0.38785927])
assert_allclose(ucb1[ix], np.r_[0.93491636, 0.89776803,
0.87922239, 0.85894181])
assert_allclose(lcb2[ix], np.r_[0.52115708, 0.48079378,
0.45595321, 0.43341115])
assert_allclose(ucb2[ix], np.r_[0.96465636, 0.92745068,
0.90885428, 0.88796708])
def test_bmt():
# All tests against SAS
# Results taken from here:
# http://support.sas.com/documentation/cdl/en/statug/68162/HTML/default/viewer.htm#statug_lifetest_details03.htm
# Confidence intervals for 25% percentile of the survival
# distribution (for "ALL" subjects), taken from the SAS web site
cb = {"linear": [107, 276],
"cloglog": [86, 230],
"log": [107, 332],
"asinsqrt": [104, 276],
"logit": [104, 230]}
dfa = bmt[bmt.Group == "ALL"]
cur_dir = os.path.dirname(os.path.abspath(__file__))
fp = os.path.join(cur_dir, 'results', 'bmt_results.csv')
rslt = pd.read_csv(fp)
sf = SurvfuncRight(dfa["T"].values, dfa.Status.values)
assert_allclose(sf.surv_times, rslt.t)
assert_allclose(sf.surv_prob, rslt.s, atol=1e-4, rtol=1e-4)
assert_allclose(sf.surv_prob_se, rslt.se, atol=1e-4, rtol=1e-4)
for method in "linear", "cloglog", "log", "logit", "asinsqrt":
lcb, ucb = sf.quantile_ci(0.25, method=method)
assert_allclose(cb[method], np.r_[lcb, ucb])
def test_survdiff():
# Results come from R survival and survMisc packages (survMisc is
# used for non G-rho family tests but does not seem to support
# stratification)
df = bmt[bmt.Group != "ALL"].copy()
# Not stratified
stat, p = survdiff(df["T"], df.Status, df.Group)
assert_allclose(stat, 13.44556, atol=1e-4, rtol=1e-4)
stat, p = survdiff(df["T"], df.Status, df.Group, weight_type="gb")
assert_allclose(stat, 15.38787, atol=1e-4, rtol=1e-4)
stat, p = survdiff(df["T"], df.Status, df.Group, weight_type="tw")
assert_allclose(stat, 14.98382, atol=1e-4, rtol=1e-4)
stat, p = survdiff(df["T"], df.Status, df.Group, weight_type="fh",
fh_p=0.5)
assert_allclose(stat, 14.46866, atol=1e-4, rtol=1e-4)
stat, p = survdiff(df["T"], df.Status, df.Group, weight_type="fh",
fh_p=1)
assert_allclose(stat, 14.84500, atol=1e-4, rtol=1e-4)
# 5 strata
strata = np.arange(df.shape[0]) % 5
df["strata"] = strata
stat, p = survdiff(df["T"], df.Status, df.Group, strata=df.strata)
assert_allclose(stat, 11.97799, atol=1e-4, rtol=1e-4)
stat, p = survdiff(df["T"], df.Status, df.Group, strata=df.strata,
weight_type="fh", fh_p=0.5)
assert_allclose(stat, 12.6257, atol=1e-4, rtol=1e-4)
stat, p = survdiff(df["T"], df.Status, df.Group, strata=df.strata,
weight_type="fh", fh_p=1)
assert_allclose(stat, 12.73565, atol=1e-4, rtol=1e-4)
# 8 strata
df["strata"] = np.arange(df.shape[0]) % 8
stat, p = survdiff(df["T"], df.Status, df.Group, strata=df.strata)
assert_allclose(stat, 12.12631, atol=1e-4, rtol=1e-4)
stat, p = survdiff(df["T"], df.Status, df.Group, strata=df.strata,
weight_type="fh", fh_p=0.5)
assert_allclose(stat, 12.9633, atol=1e-4, rtol=1e-4)
stat, p = survdiff(df["T"], df.Status, df.Group, strata=df.strata,
weight_type="fh", fh_p=1)
assert_allclose(stat, 13.35259, atol=1e-4, rtol=1e-4)
@dec.skipif(not have_matplotlib)
def test_plot_km():
if pdf_output:
from matplotlib.backends.backend_pdf import PdfPages
pdf = PdfPages("test_survfunc.pdf")
else:
pdf = None
sr1 = SurvfuncRight(ti1, st1)
sr2 = SurvfuncRight(ti2, st2)
fig = plot_survfunc(sr1)
close_or_save(pdf, fig)
fig = plot_survfunc(sr2)
close_or_save(pdf, fig)
fig = plot_survfunc([sr1, sr2])
close_or_save(pdf, fig)
# Plot the SAS BMT data
gb = bmt.groupby("Group")
sv = []
for g in gb:
s0 = SurvfuncRight(g[1]["T"], g[1]["Status"], title=g[0])
sv.append(s0)
fig = plot_survfunc(sv)
ax = fig.get_axes()[0]
ax.set_position([0.1, 0.1, 0.64, 0.8])
ha, lb = ax.get_legend_handles_labels()
fig.legend([ha[k] for k in (0, 2, 4)],
[lb[k] for k in (0, 2, 4)],
'center right')
close_or_save(pdf, fig)
# Simultaneous CB for BMT data
ii = bmt.Group == "ALL"
sf = SurvfuncRight(bmt.loc[ii, "T"], bmt.loc[ii, "Status"])
fig = sf.plot()
ax = fig.get_axes()[0]
ax.set_position([0.1, 0.1, 0.64, 0.8])
ha, lb = ax.get_legend_handles_labels()
lcb, ucb = sf.simultaneous_cb(transform="log")
plt.fill_between(sf.surv_times, lcb, ucb, color="lightgrey")
lcb, ucb = sf.simultaneous_cb(transform="arcsin")
plt.plot(sf.surv_times, lcb, color="darkgrey")
plt.plot(sf.surv_times, ucb, color="darkgrey")
plt.plot(sf.surv_times, sf.surv_prob - 2*sf.surv_prob_se, color="red")
plt.plot(sf.surv_times, sf.surv_prob + 2*sf.surv_prob_se, color="red")
plt.xlim(100, 600)
close_or_save(pdf, fig)
if pdf_output:
pdf.close()
def test_weights1():
# tm = c(1, 3, 5, 6, 7, 8, 8, 9, 3, 4, 1, 3, 2)
# st = c(1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0)
# wt = c(1, 2, 3, 2, 3, 1, 2, 1, 1, 2, 2, 3, 1)
# library(survival)
# sf = survfit(Surv(tm, st) ~ 1, weights=wt, err='tsiatis')
tm = np.r_[1, 3, 5, 6, 7, 8, 8, 9, 3, 4, 1, 3, 2]
st = np.r_[1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0]
wt = np.r_[1, 2, 3, 2, 3, 1, 2, 1, 1, 2, 2, 3, 1]
sf = SurvfuncRight(tm, st, "", freq_weights=wt)
assert_allclose(sf.surv_times, np.r_[1, 3, 6, 7, 9])
assert_allclose(sf.surv_prob,
np.r_[0.875, 0.65625, 0.51041667, 0.29166667, 0.])
assert_allclose(sf.surv_prob_se,
np.r_[0.07216878, 0.13307266, 0.20591185, 0.3219071,
1.05053519])
def test_weights2():
# tm = c(1, 3, 5, 6, 7, 2, 4, 6, 8, 10)
# st = c(1, 1, 0, 1, 1, 1, 1, 0, 1, 1)
# wt = c(1, 1, 1, 1, 1, 2, 2, 2, 2, 2)
# library(survival)
# sf = survfit(Surv(tm, st) ~ 1, weights=wt, err='tsiatis')
tm = np.r_[1, 3, 5, 6, 7, 2, 4, 6, 8, 10]
st = np.r_[1, 1, 0, 1, 1, 1, 1, 0, 1, 1]
wt = np.r_[1, 1, 1, 1, 1, 2, 2, 2, 2, 2]
tm0 = np.r_[1, 3, 5, 6, 7, 2, 4, 6, 8, 10, 2, 4, 6, 8, 10]
st0 = np.r_[1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1]
sf0 = SurvfuncRight(tm, st, "", freq_weights=wt)
sf1 = SurvfuncRight(tm0, st0, "")
assert_allclose(sf0.surv_times, sf1.surv_times)
assert_allclose(sf0.surv_prob, sf1.surv_prob)
assert_allclose(sf0.surv_prob_se,
np.r_[0.06666667, 0.1210311, 0.14694547,
0.19524829, 0.23183377,
0.30618115, 0.46770386, 0.84778942])
def test_incidence():
# Check estimates in R:
# ftime = c(1, 1, 2, 4, 4, 4, 6, 6, 7, 8, 9, 9, 9, 1, 2, 2, 4, 4)
# fstat = c(1, 1, 1, 2, 2, 2, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0)
# cuminc(ftime, fstat)
#
# The standard errors agree with Stata, not with R (cmprisk
# package), which uses a different SE formula from Aalen (1978)
ftime = np.r_[1, 1, 2, 4, 4, 4, 6, 6, 7, 8, 9, 9, 9, 1, 2, 2, 4, 4]
fstat = np.r_[1, 1, 1, 2, 2, 2, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0]
ci = CumIncidenceRight(ftime, fstat)
cinc = [np.array([0.11111111, 0.17037037, 0.17037037, 0.17037037,
0.17037037, 0.17037037, 0.17037037]),
np.array([0., 0., 0.20740741, 0.20740741,
0.20740741, 0.20740741, 0.20740741]),
np.array([0., 0., 0., 0.17777778,
0.26666667, 0.26666667, 0.26666667])]
assert_allclose(cinc[0], ci.cinc[0])
assert_allclose(cinc[1], ci.cinc[1])
assert_allclose(cinc[2], ci.cinc[2])
cinc_se = [np.array([0.07407407, 0.08976251, 0.08976251, 0.08976251,
0.08976251, 0.08976251, 0.08976251]),
np.array([0., 0., 0.10610391, 0.10610391, 0.10610391,
0.10610391, 0.10610391]),
np.array([0., 0., 0., 0.11196147, 0.12787781,
0.12787781, 0.12787781])]
assert_allclose(cinc_se[0], ci.cinc_se[0])
assert_allclose(cinc_se[1], ci.cinc_se[1])
assert_allclose(cinc_se[2], ci.cinc_se[2])
# Simple check for frequency weights
weights = np.ones(len(ftime))
ciw = CumIncidenceRight(ftime, fstat, freq_weights=weights)
assert_allclose(ci.cinc[0], ciw.cinc[0])
assert_allclose(ci.cinc[1], ciw.cinc[1])
assert_allclose(ci.cinc[2], ciw.cinc[2])
|
bsd-3-clause
|
toobaz/pandas
|
pandas/tests/sparse/frame/test_analytics.py
|
2
|
1236
|
import numpy as np
import pytest
from pandas import DataFrame, SparseDataFrame, SparseSeries
from pandas.util import testing as tm
@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
@pytest.mark.xfail(reason="Wrong SparseBlock initialization (GH#17386)")
def test_quantile():
# GH 17386
data = [[1, 1], [2, 10], [3, 100], [np.nan, np.nan]]
q = 0.1
sparse_df = SparseDataFrame(data)
result = sparse_df.quantile(q)
dense_df = DataFrame(data)
dense_expected = dense_df.quantile(q)
sparse_expected = SparseSeries(dense_expected)
tm.assert_series_equal(result, dense_expected)
tm.assert_sp_series_equal(result, sparse_expected)
@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
@pytest.mark.xfail(reason="Wrong SparseBlock initialization (GH#17386)")
def test_quantile_multi():
# GH 17386
data = [[1, 1], [2, 10], [3, 100], [np.nan, np.nan]]
q = [0.1, 0.5]
sparse_df = SparseDataFrame(data)
result = sparse_df.quantile(q)
dense_df = DataFrame(data)
dense_expected = dense_df.quantile(q)
sparse_expected = SparseDataFrame(dense_expected)
tm.assert_frame_equal(result, dense_expected)
tm.assert_sp_frame_equal(result, sparse_expected)
|
bsd-3-clause
|
wizmer/NeuroM
|
neurom/tests/test_viewer.py
|
1
|
5335
|
# Copyright (c) 2015, Ecole Polytechnique Federale de Lausanne, Blue Brain Project
# All rights reserved.
#
# This file is part of NeuroM <https://github.com/BlueBrain/NeuroM>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of
# its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
from pathlib import Path
import shutil
import tempfile
import mock
import sys
import matplotlib
if 'DISPLAY' not in os.environ: # noqa
matplotlib.use('Agg') # noqa
import neurom
from neurom.view import common, plotly
from neurom import load_neuron, viewer, NeuriteType
from nose import tools as nt
from numpy.testing import assert_allclose, assert_array_almost_equal
DATA_PATH = Path(__file__).parent.parent.parent / 'test_data/swc'
MORPH_FILENAME = Path(DATA_PATH, 'Neuron.swc')
nrn = load_neuron(MORPH_FILENAME)
def _reload_module(module):
"""Force module reload."""
import importlib
importlib.reload(module)
def test_plotly_extra_not_installed():
with mock.patch.dict(sys.modules, {'plotly': None}):
try:
_reload_module(neurom.view.plotly)
nt.ok_(False, "ImportError not triggered")
except ImportError as e:
nt.assert_equal(str(e),
'neurom[plotly] is not installed. '
'Please install it by doing: pip install neurom[plotly]')
def test_plotly_draw_neuron3d():
plotly.draw(nrn, plane='3d', auto_open=False)
plotly.draw(nrn.neurites[0], plane='3d', auto_open=False)
fig = plotly.draw(load_neuron(Path(DATA_PATH, 'simple-different-soma.swc')),
auto_open=False)
x, y, z = [fig['data'][2][key] for key in str('xyz')]
assert_allclose(x[0, 0], 2)
assert_allclose(x[33, 33], -1.8971143170299758)
assert_allclose(y[0, 0], 3)
assert_allclose(y[33, 33], 9.75)
assert_allclose(z[0, 0], 13)
assert_allclose(z[33, 33], 8.5)
def test_plotly_draw_neuron2d():
plotly.draw(nrn, plane='xy', auto_open=False)
plotly.draw(nrn.neurites[0], plane='xy', auto_open=False)
def test_draw_neuron():
viewer.draw(nrn)
common.plt.close('all')
def test_draw_filter_neurite():
for mode in ['2d', '3d']:
viewer.draw(nrn, mode=mode, neurite_type=NeuriteType.basal_dendrite)
assert_allclose(common.plt.gca().get_ylim(),
[-30., 78], atol=5)
common.plt.close('all')
def test_draw_neuron3d():
viewer.draw(nrn, mode='3d')
common.plt.close('all')
nt.assert_raises(NotImplementedError, viewer.draw, nrn, mode='3d', realistic_diameters=True)
def test_draw_tree():
viewer.draw(nrn.neurites[0])
common.plt.close('all')
def test_draw_tree3d():
viewer.draw(nrn.neurites[0], mode='3d')
common.plt.close('all')
def test_draw_soma():
viewer.draw(nrn.soma)
common.plt.close('all')
def test_draw_soma3d():
viewer.draw(nrn.soma, mode='3d')
common.plt.close('all')
def test_draw_dendrogram():
viewer.draw(nrn, mode='dendrogram')
common.plt.close('all')
viewer.draw(nrn.neurites[0], mode='dendrogram')
common.plt.close('all')
def test_draw_dendrogram_empty_segment():
neuron = load_neuron(Path(DATA_PATH, 'empty_segments.swc'))
viewer.draw(neuron, mode='dendrogram')
common.plt.close('all')
@nt.raises(viewer.InvalidDrawModeError)
def test_invalid_draw_mode_raises():
viewer.draw(nrn, mode='4d')
@nt.raises(viewer.NotDrawableError)
def test_invalid_object_raises():
class Dummy(object):
pass
viewer.draw(Dummy())
@nt.raises(viewer.NotDrawableError)
def test_invalid_combo_raises():
viewer.draw(nrn.soma, mode='dendrogram')
def test_writing_output():
with tempfile.TemporaryDirectory() as folder:
output_dir = Path(folder, 'subdir')
viewer.draw(nrn, mode='2d', output_path=output_dir)
nt.ok_((output_dir / 'Figure.png').is_file())
common.plt.close('all')
|
bsd-3-clause
|
hainm/scikit-learn
|
sklearn/linear_model/tests/test_sparse_coordinate_descent.py
|
244
|
9986
|
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import ignore_warnings
from sklearn.linear_model.coordinate_descent import (Lasso, ElasticNet,
LassoCV, ElasticNetCV)
def test_sparse_coef():
# Check that the sparse_coef propery works
clf = ElasticNet()
clf.coef_ = [1, 2, 3]
assert_true(sp.isspmatrix(clf.sparse_coef_))
assert_equal(clf.sparse_coef_.toarray().tolist()[0], clf.coef_)
def test_normalize_option():
# Check that the normalize option in enet works
X = sp.csc_matrix([[-1], [0], [1]])
y = [-1, 0, 1]
clf_dense = ElasticNet(fit_intercept=True, normalize=True)
clf_sparse = ElasticNet(fit_intercept=True, normalize=True)
clf_dense.fit(X, y)
X = sp.csc_matrix(X)
clf_sparse.fit(X, y)
assert_almost_equal(clf_dense.dual_gap_, 0)
assert_array_almost_equal(clf_dense.coef_, clf_sparse.coef_)
def test_lasso_zero():
# Check that the sparse lasso can handle zero data without crashing
X = sp.csc_matrix((3, 1))
y = [0, 0, 0]
T = np.array([[1], [2], [3]])
clf = Lasso().fit(X, y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy_list_input():
# Test ElasticNet for various values of alpha and l1_ratio with list X
X = np.array([[-1], [0], [1]])
X = sp.csc_matrix(X)
Y = [-1, 0, 1] # just a straight line
T = np.array([[2], [3], [4]]) # test sample
# this should be the same as unregularized least squares
clf = ElasticNet(alpha=0, l1_ratio=1.0)
# catch warning about alpha=0.
# this is discouraged but should work.
ignore_warnings(clf.fit)(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=1000)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy_explicit_sparse_input():
# Test ElasticNet for various values of alpha and l1_ratio with sparse X
f = ignore_warnings
# training samples
X = sp.lil_matrix((3, 1))
X[0, 0] = -1
# X[1, 0] = 0
X[2, 0] = 1
Y = [-1, 0, 1] # just a straight line (the identity function)
# test samples
T = sp.lil_matrix((3, 1))
T[0, 0] = 2
T[1, 0] = 3
T[2, 0] = 4
# this should be the same as lasso
clf = ElasticNet(alpha=0, l1_ratio=1.0)
f(clf.fit)(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=1000)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def make_sparse_data(n_samples=100, n_features=100, n_informative=10, seed=42,
positive=False, n_targets=1):
random_state = np.random.RandomState(seed)
# build an ill-posed linear regression problem with many noisy features and
# comparatively few samples
# generate a ground truth model
w = random_state.randn(n_features, n_targets)
w[n_informative:] = 0.0 # only the top features are impacting the model
if positive:
w = np.abs(w)
X = random_state.randn(n_samples, n_features)
rnd = random_state.uniform(size=(n_samples, n_features))
X[rnd > 0.5] = 0.0 # 50% of zeros in input signal
# generate training ground truth labels
y = np.dot(X, w)
X = sp.csc_matrix(X)
if n_targets == 1:
y = np.ravel(y)
return X, y
def _test_sparse_enet_not_as_toy_dataset(alpha, fit_intercept, positive):
n_samples, n_features, max_iter = 100, 100, 1000
n_informative = 10
X, y = make_sparse_data(n_samples, n_features, n_informative,
positive=positive)
X_train, X_test = X[n_samples // 2:], X[:n_samples // 2]
y_train, y_test = y[n_samples // 2:], y[:n_samples // 2]
s_clf = ElasticNet(alpha=alpha, l1_ratio=0.8, fit_intercept=fit_intercept,
max_iter=max_iter, tol=1e-7, positive=positive,
warm_start=True)
s_clf.fit(X_train, y_train)
assert_almost_equal(s_clf.dual_gap_, 0, 4)
assert_greater(s_clf.score(X_test, y_test), 0.85)
# check the convergence is the same as the dense version
d_clf = ElasticNet(alpha=alpha, l1_ratio=0.8, fit_intercept=fit_intercept,
max_iter=max_iter, tol=1e-7, positive=positive,
warm_start=True)
d_clf.fit(X_train.toarray(), y_train)
assert_almost_equal(d_clf.dual_gap_, 0, 4)
assert_greater(d_clf.score(X_test, y_test), 0.85)
assert_almost_equal(s_clf.coef_, d_clf.coef_, 5)
assert_almost_equal(s_clf.intercept_, d_clf.intercept_, 5)
# check that the coefs are sparse
assert_less(np.sum(s_clf.coef_ != 0.0), 2 * n_informative)
def test_sparse_enet_not_as_toy_dataset():
_test_sparse_enet_not_as_toy_dataset(alpha=0.1, fit_intercept=False,
positive=False)
_test_sparse_enet_not_as_toy_dataset(alpha=0.1, fit_intercept=True,
positive=False)
_test_sparse_enet_not_as_toy_dataset(alpha=1e-3, fit_intercept=False,
positive=True)
_test_sparse_enet_not_as_toy_dataset(alpha=1e-3, fit_intercept=True,
positive=True)
def test_sparse_lasso_not_as_toy_dataset():
n_samples = 100
max_iter = 1000
n_informative = 10
X, y = make_sparse_data(n_samples=n_samples, n_informative=n_informative)
X_train, X_test = X[n_samples // 2:], X[:n_samples // 2]
y_train, y_test = y[n_samples // 2:], y[:n_samples // 2]
s_clf = Lasso(alpha=0.1, fit_intercept=False, max_iter=max_iter, tol=1e-7)
s_clf.fit(X_train, y_train)
assert_almost_equal(s_clf.dual_gap_, 0, 4)
assert_greater(s_clf.score(X_test, y_test), 0.85)
# check the convergence is the same as the dense version
d_clf = Lasso(alpha=0.1, fit_intercept=False, max_iter=max_iter, tol=1e-7)
d_clf.fit(X_train.toarray(), y_train)
assert_almost_equal(d_clf.dual_gap_, 0, 4)
assert_greater(d_clf.score(X_test, y_test), 0.85)
# check that the coefs are sparse
assert_equal(np.sum(s_clf.coef_ != 0.0), n_informative)
def test_enet_multitarget():
n_targets = 3
X, y = make_sparse_data(n_targets=n_targets)
estimator = ElasticNet(alpha=0.01, fit_intercept=True, precompute=None)
# XXX: There is a bug when precompute is not None!
estimator.fit(X, y)
coef, intercept, dual_gap = (estimator.coef_,
estimator.intercept_,
estimator.dual_gap_)
for k in range(n_targets):
estimator.fit(X, y[:, k])
assert_array_almost_equal(coef[k, :], estimator.coef_)
assert_array_almost_equal(intercept[k], estimator.intercept_)
assert_array_almost_equal(dual_gap[k], estimator.dual_gap_)
def test_path_parameters():
X, y = make_sparse_data()
max_iter = 50
n_alphas = 10
clf = ElasticNetCV(n_alphas=n_alphas, eps=1e-3, max_iter=max_iter,
l1_ratio=0.5, fit_intercept=False)
ignore_warnings(clf.fit)(X, y) # new params
assert_almost_equal(0.5, clf.l1_ratio)
assert_equal(n_alphas, clf.n_alphas)
assert_equal(n_alphas, len(clf.alphas_))
sparse_mse_path = clf.mse_path_
ignore_warnings(clf.fit)(X.toarray(), y) # compare with dense data
assert_almost_equal(clf.mse_path_, sparse_mse_path)
def test_same_output_sparse_dense_lasso_and_enet_cv():
X, y = make_sparse_data(n_samples=40, n_features=10)
for normalize in [True, False]:
clfs = ElasticNetCV(max_iter=100, cv=5, normalize=normalize)
ignore_warnings(clfs.fit)(X, y)
clfd = ElasticNetCV(max_iter=100, cv=5, normalize=normalize)
ignore_warnings(clfd.fit)(X.toarray(), y)
assert_almost_equal(clfs.alpha_, clfd.alpha_, 7)
assert_almost_equal(clfs.intercept_, clfd.intercept_, 7)
assert_array_almost_equal(clfs.mse_path_, clfd.mse_path_)
assert_array_almost_equal(clfs.alphas_, clfd.alphas_)
clfs = LassoCV(max_iter=100, cv=4, normalize=normalize)
ignore_warnings(clfs.fit)(X, y)
clfd = LassoCV(max_iter=100, cv=4, normalize=normalize)
ignore_warnings(clfd.fit)(X.toarray(), y)
assert_almost_equal(clfs.alpha_, clfd.alpha_, 7)
assert_almost_equal(clfs.intercept_, clfd.intercept_, 7)
assert_array_almost_equal(clfs.mse_path_, clfd.mse_path_)
assert_array_almost_equal(clfs.alphas_, clfd.alphas_)
|
bsd-3-clause
|
sangwook236/general-development-and-testing
|
sw_dev/python/rnd/test/statistics/edward/edward_mixture_density_network.py
|
1
|
7356
|
#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import seaborn as sns
import tensorflow as tf
import edward as ed
from edward.models import Categorical, Mixture, Normal
from scipy import stats
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
def plot_normal_mix(pis, mus, sigmas, ax, label='', comp=True):
"""Plots the mixture of Normal models to axis=ax comp=True plots all
components of mixture model
"""
x = np.linspace(-10.5, 10.5, 250)
final = np.zeros_like(x)
for i, (weight_mix, mu_mix, sigma_mix) in enumerate(zip(pis, mus, sigmas)):
temp = stats.norm.pdf(x, mu_mix, sigma_mix) * weight_mix
final = final + temp
if comp:
ax.plot(x, temp, label='Normal ' + str(i))
ax.plot(x, final, label='Mixture of Normals ' + label)
ax.legend(fontsize=13)
def sample_from_mixture(x, pred_weights, pred_means, pred_std, amount):
"""Draws samples from mixture model.
Returns 2 d array with input X and sample from prediction of mixture model.
"""
samples = np.zeros((amount, 2))
n_mix = len(pred_weights[0])
to_choose_from = np.arange(n_mix)
for j, (weights, means, std_devs) in enumerate(zip(pred_weights, pred_means, pred_std)):
index = np.random.choice(to_choose_from, p=weights)
samples[j, 1] = np.random.normal(means[index], std_devs[index], size=1)
samples[j, 0] = x[j]
if j == amount - 1:
break
return samples
def build_toy_dataset(N):
y_data = np.random.uniform(-10.5, 10.5, N)
r_data = np.random.normal(size=N) # Random noise.
x_data = np.sin(0.75 * y_data) * 7.0 + y_data * 0.5 + r_data * 1.0
x_data = x_data.reshape((N, 1))
return train_test_split(x_data, y_data, random_state=42)
def neural_network(X, K):
"""loc, scale, logits = NN(x; theta)"""
# 2 hidden layers with 15 hidden units.
net = tf.layers.dense(X, 15, activation=tf.nn.relu)
net = tf.layers.dense(net, 15, activation=tf.nn.relu)
locs = tf.layers.dense(net, K, activation=None)
scales = tf.layers.dense(net, K, activation=tf.exp)
logits = tf.layers.dense(net, K, activation=None)
return locs, scales, logits
# REF [site] >> http://edwardlib.org/tutorials/mixture-density-network
def mixture_density_network_example():
ed.set_seed(42)
N = 5000 # Number of data points.
D = 1 # Number of features.
K = 20 # Number of mixture components.
X_train, X_test, y_train, y_test = build_toy_dataset(N)
print('Size of features in training data: {}'.format(X_train.shape))
print('Size of output in training data: {}'.format(y_train.shape))
print('Size of features in test data: {}'.format(X_test.shape))
print('Size of output in test data: {}'.format(y_test.shape))
sns.regplot(X_train, y_train, fit_reg=False)
plt.show()
#--------------------
X_ph = tf.placeholder(tf.float32, [None, D])
y_ph = tf.placeholder(tf.float32, [None])
# We use a mixture of 20 normal distributions parameterized by a feedforward network.
# The membership probabilities and per-component mean and standard deviation are given by the output of a feedforward network.
# We use tf.layers to construct neural networks.
# We specify a three-layer network with 15 hidden units for each hidden layer.
locs, scales, logits = neural_network(X_ph, K)
cat = Categorical(logits=logits)
components = [Normal(loc=loc, scale=scale) for loc, scale in zip(tf.unstack(tf.transpose(locs)), tf.unstack(tf.transpose(scales)))]
y = Mixture(cat=cat, components=components, value=tf.zeros_like(y_ph))
# Note: A bug exists in Mixture which prevents samples from it to have a shape of [None].
# For now fix it using the value argument, as sampling is not necessary for MAP estimation anyways.
#--------------------
# We use MAP estimation, passing in the model and data set.
# There are no latent variables to infer.
# Thus inference is concerned with only training model parameters, which are baked into how we specify the neural networks.
inference = ed.MAP(data={y: y_ph})
optimizer = tf.train.AdamOptimizer(5e-3)
inference.initialize(optimizer=optimizer, var_list=tf.trainable_variables())
# we will manually control the inference and how data is passed into it at each step.
# Initialize the algorithm and the TensorFlow variables.
sess = ed.get_session()
tf.global_variables_initializer().run()
# Now we train the MDN by calling inference.update(), passing in the data.
# The quantity inference.loss is the loss function (negative log-likelihood) at that step of inference.
# We also report the loss function on test data by calling inference.loss and where we feed test data to the TensorFlow placeholders instead of training data.
# We keep track of the losses under train_loss and test_loss.
n_epoch = 1000
train_loss = np.zeros(n_epoch)
test_loss = np.zeros(n_epoch)
for i in range(n_epoch):
info_dict = inference.update(feed_dict={X_ph: X_train, y_ph: y_train})
train_loss[i] = info_dict['loss']
test_loss[i] = sess.run(inference.loss, feed_dict={X_ph: X_test, y_ph: y_test})
inference.print_progress(info_dict)
#--------------------
# After training for a number of iterations, we get out the predictions we are interested in from the model: the predicted mixture weights, cluster means, and cluster standard deviations.
# To do this, we fetch their values from session, feeding test data X_test to the placeholder X_ph.
pred_weights, pred_means, pred_std = sess.run([tf.nn.softmax(logits), locs, scales], feed_dict={X_ph: X_test})
# Let's plot the log-likelihood of the training and test data as functions of the training epoch.
# The quantity inference.loss is the total log-likelihood, not the loss per data point.
# Below we plot the per-data point log-likelihood by dividing by the size of the train and test data respectively.
fig, axes = plt.subplots(nrows=1, ncols=1, figsize=(16, 3.5))
plt.plot(np.arange(n_epoch), -test_loss / len(X_test), label='Test')
plt.plot(np.arange(n_epoch), -train_loss / len(X_train), label='Train')
plt.legend(fontsize=20)
plt.xlabel('Epoch', fontsize=15)
plt.ylabel('Log-likelihood', fontsize=15)
plt.show()
#--------------------
# Criticism.
# Note that as this is an inverse problem we can't get the answer correct, but we can hope that the truth lies in area where the model has high probability.
obj = [0, 4, 6]
fig, axes = plt.subplots(nrows=3, ncols=1, figsize=(16, 6))
plot_normal_mix(pred_weights[obj][0], pred_means[obj][0], pred_std[obj][0], axes[0], comp=False)
axes[0].axvline(x=y_test[obj][0], color='black', alpha=0.5)
plot_normal_mix(pred_weights[obj][2], pred_means[obj][2], pred_std[obj][2], axes[1], comp=False)
axes[1].axvline(x=y_test[obj][2], color='black', alpha=0.5)
plot_normal_mix(pred_weights[obj][1], pred_means[obj][1], pred_std[obj][1], axes[2], comp=False)
axes[2].axvline(x=y_test[obj][1], color='black', alpha=0.5)
plt.show()
# We can check the ensemble by drawing samples of the prediction and plotting the density of those.
a = sample_from_mixture(X_test, pred_weights, pred_means, pred_std, amount=len(X_test))
sns.jointplot(a[:, 0], a[:, 1], kind='hex', color='#4CB391', ylim=(-10, 10), xlim=(-14, 14))
plt.show()
def main():
mixture_density_network_example()
#%%------------------------------------------------------------------
if '__main__' == __name__:
main()
|
gpl-2.0
|
xwolf12/scikit-learn
|
examples/ensemble/plot_adaboost_multiclass.py
|
354
|
4124
|
"""
=====================================
Multi-class AdaBoosted Decision Trees
=====================================
This example reproduces Figure 1 of Zhu et al [1] and shows how boosting can
improve prediction accuracy on a multi-class problem. The classification
dataset is constructed by taking a ten-dimensional standard normal distribution
and defining three classes separated by nested concentric ten-dimensional
spheres such that roughly equal numbers of samples are in each class (quantiles
of the :math:`\chi^2` distribution).
The performance of the SAMME and SAMME.R [1] algorithms are compared. SAMME.R
uses the probability estimates to update the additive model, while SAMME uses
the classifications only. As the example illustrates, the SAMME.R algorithm
typically converges faster than SAMME, achieving a lower test error with fewer
boosting iterations. The error of each algorithm on the test set after each
boosting iteration is shown on the left, the classification error on the test
set of each tree is shown in the middle, and the boost weight of each tree is
shown on the right. All trees have a weight of one in the SAMME.R algorithm and
therefore are not shown.
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
print(__doc__)
# Author: Noel Dawe <noel.dawe@gmail.com>
#
# License: BSD 3 clause
from sklearn.externals.six.moves import zip
import matplotlib.pyplot as plt
from sklearn.datasets import make_gaussian_quantiles
from sklearn.ensemble import AdaBoostClassifier
from sklearn.metrics import accuracy_score
from sklearn.tree import DecisionTreeClassifier
X, y = make_gaussian_quantiles(n_samples=13000, n_features=10,
n_classes=3, random_state=1)
n_split = 3000
X_train, X_test = X[:n_split], X[n_split:]
y_train, y_test = y[:n_split], y[n_split:]
bdt_real = AdaBoostClassifier(
DecisionTreeClassifier(max_depth=2),
n_estimators=600,
learning_rate=1)
bdt_discrete = AdaBoostClassifier(
DecisionTreeClassifier(max_depth=2),
n_estimators=600,
learning_rate=1.5,
algorithm="SAMME")
bdt_real.fit(X_train, y_train)
bdt_discrete.fit(X_train, y_train)
real_test_errors = []
discrete_test_errors = []
for real_test_predict, discrete_train_predict in zip(
bdt_real.staged_predict(X_test), bdt_discrete.staged_predict(X_test)):
real_test_errors.append(
1. - accuracy_score(real_test_predict, y_test))
discrete_test_errors.append(
1. - accuracy_score(discrete_train_predict, y_test))
n_trees_discrete = len(bdt_discrete)
n_trees_real = len(bdt_real)
# Boosting might terminate early, but the following arrays are always
# n_estimators long. We crop them to the actual number of trees here:
discrete_estimator_errors = bdt_discrete.estimator_errors_[:n_trees_discrete]
real_estimator_errors = bdt_real.estimator_errors_[:n_trees_real]
discrete_estimator_weights = bdt_discrete.estimator_weights_[:n_trees_discrete]
plt.figure(figsize=(15, 5))
plt.subplot(131)
plt.plot(range(1, n_trees_discrete + 1),
discrete_test_errors, c='black', label='SAMME')
plt.plot(range(1, n_trees_real + 1),
real_test_errors, c='black',
linestyle='dashed', label='SAMME.R')
plt.legend()
plt.ylim(0.18, 0.62)
plt.ylabel('Test Error')
plt.xlabel('Number of Trees')
plt.subplot(132)
plt.plot(range(1, n_trees_discrete + 1), discrete_estimator_errors,
"b", label='SAMME', alpha=.5)
plt.plot(range(1, n_trees_real + 1), real_estimator_errors,
"r", label='SAMME.R', alpha=.5)
plt.legend()
plt.ylabel('Error')
plt.xlabel('Number of Trees')
plt.ylim((.2,
max(real_estimator_errors.max(),
discrete_estimator_errors.max()) * 1.2))
plt.xlim((-20, len(bdt_discrete) + 20))
plt.subplot(133)
plt.plot(range(1, n_trees_discrete + 1), discrete_estimator_weights,
"b", label='SAMME')
plt.legend()
plt.ylabel('Weight')
plt.xlabel('Number of Trees')
plt.ylim((0, discrete_estimator_weights.max() * 1.2))
plt.xlim((-20, n_trees_discrete + 20))
# prevent overlapping y-axis labels
plt.subplots_adjust(wspace=0.25)
plt.show()
|
bsd-3-clause
|
detrout/debian-statsmodels
|
statsmodels/regression/tests/test_quantile_regression.py
|
8
|
7766
|
import scipy.stats
import numpy as np
import statsmodels.api as sm
from numpy.testing import assert_allclose, assert_equal, assert_almost_equal
from patsy import dmatrices # pylint: disable=E0611
from statsmodels.regression.quantile_regression import QuantReg
from .results_quantile_regression import (
biweight_chamberlain, biweight_hsheather, biweight_bofinger,
cosine_chamberlain, cosine_hsheather, cosine_bofinger,
gaussian_chamberlain, gaussian_hsheather, gaussian_bofinger,
epan2_chamberlain, epan2_hsheather, epan2_bofinger,
parzen_chamberlain, parzen_hsheather, parzen_bofinger,
#rectangle_chamberlain, rectangle_hsheather, rectangle_bofinger,
#triangle_chamberlain, triangle_hsheather, triangle_bofinger,
#epanechnikov_chamberlain, epanechnikov_hsheather, epanechnikov_bofinger,
epanechnikov_hsheather_q75, Rquantreg)
idx = ['income', 'Intercept']
class CheckModelResultsMixin(object):
def test_params(self):
assert_allclose(np.ravel(self.res1.params.ix[idx]),
self.res2.table[:,0], rtol=1e-3)
def test_bse(self):
assert_equal(self.res1.scale, 1)
assert_allclose(np.ravel(self.res1.bse.ix[idx]),
self.res2.table[:,1], rtol=1e-3)
def test_tvalues(self):
assert_allclose(np.ravel(self.res1.tvalues.ix[idx]),
self.res2.table[:,2], rtol=1e-2)
def test_pvalues(self):
pvals_stata = scipy.stats.t.sf(self.res2.table[:, 2] , self.res2.df_r)
assert_allclose(np.ravel(self.res1.pvalues.ix[idx]),
pvals_stata, rtol=1.1)
# test that we use the t distribution for the p-values
pvals_t = scipy.stats.t.sf(self.res1.tvalues , self.res2.df_r) * 2
assert_allclose(np.ravel(self.res1.pvalues),
pvals_t, rtol=1e-9, atol=1e-10)
def test_conf_int(self):
assert_allclose(self.res1.conf_int().ix[idx],
self.res2.table[:,-2:], rtol=1e-3)
def test_nobs(self):
assert_allclose(self.res1.nobs, self.res2.N, rtol=1e-3)
def test_df_model(self):
assert_allclose(self.res1.df_model, self.res2.df_m, rtol=1e-3)
def test_df_resid(self):
assert_allclose(self.res1.df_resid, self.res2.df_r, rtol=1e-3)
def test_prsquared(self):
assert_allclose(self.res1.prsquared, self.res2.psrsquared, rtol=1e-3)
def test_sparsity(self):
assert_allclose(np.array(self.res1.sparsity),
self.res2.sparsity, rtol=1e-3)
def test_bandwidth(self):
assert_allclose(np.array(self.res1.bandwidth),
self.res2.kbwidth, rtol=1e-3)
d = {('biw','bofinger'): biweight_bofinger,
('biw','chamberlain'): biweight_chamberlain,
('biw','hsheather'): biweight_hsheather,
('cos','bofinger'): cosine_bofinger,
('cos','chamberlain'): cosine_chamberlain,
('cos','hsheather'): cosine_hsheather,
('gau','bofinger'): gaussian_bofinger,
('gau','chamberlain'): gaussian_chamberlain,
('gau','hsheather'): gaussian_hsheather,
('par','bofinger'): parzen_bofinger,
('par','chamberlain'): parzen_chamberlain,
('par','hsheather'): parzen_hsheather,
#('rec','bofinger'): rectangle_bofinger,
#('rec','chamberlain'): rectangle_chamberlain,
#('rec','hsheather'): rectangle_hsheather,
#('tri','bofinger'): triangle_bofinger,
#('tri','chamberlain'): triangle_chamberlain,
#('tri','hsheather'): triangle_hsheather,
('epa', 'bofinger'): epan2_bofinger,
('epa', 'chamberlain'): epan2_chamberlain,
('epa', 'hsheather'): epan2_hsheather
#('epa2', 'bofinger'): epan2_bofinger,
#('epa2', 'chamberlain'): epan2_chamberlain,
#('epa2', 'hsheather'): epan2_hsheather
}
def setup_fun(kernel='gau', bandwidth='bofinger'):
data = sm.datasets.engel.load_pandas().data
y, X = dmatrices('foodexp ~ income', data, return_type='dataframe')
statsm = QuantReg(y, X).fit(vcov='iid', kernel=kernel, bandwidth=bandwidth)
stata = d[(kernel, bandwidth)]
return statsm, stata
def test_fitted_residuals():
data = sm.datasets.engel.load_pandas().data
y, X = dmatrices('foodexp ~ income', data, return_type='dataframe')
res = QuantReg(y, X).fit(q=.1)
# Note: maxabs relative error with fitted is 1.789e-09
assert_almost_equal(np.array(res.fittedvalues), Rquantreg.fittedvalues, 5)
assert_almost_equal(np.array(res.predict()), Rquantreg.fittedvalues, 5)
assert_almost_equal(np.array(res.resid), Rquantreg.residuals, 5)
class TestEpanechnikovHsheatherQ75(CheckModelResultsMixin):
# Vincent Arel-Bundock also spot-checked q=.1
@classmethod
def setUp(cls):
data = sm.datasets.engel.load_pandas().data
y, X = dmatrices('foodexp ~ income', data, return_type='dataframe')
cls.res1 = QuantReg(y, X).fit(q=.75, vcov='iid', kernel='epa', bandwidth='hsheather')
cls.res2 = epanechnikov_hsheather_q75
class TestEpanechnikovBofinger(CheckModelResultsMixin):
@classmethod
def setUp(cls):
cls.res1, cls.res2 = setup_fun('epa', 'bofinger')
class TestEpanechnikovChamberlain(CheckModelResultsMixin):
@classmethod
def setUp(cls):
cls.res1, cls.res2 = setup_fun('epa', 'chamberlain')
class TestEpanechnikovHsheather(CheckModelResultsMixin):
@classmethod
def setUp(cls):
cls.res1, cls.res2 = setup_fun('epa', 'hsheather')
class TestGaussianBofinger(CheckModelResultsMixin):
@classmethod
def setUp(cls):
cls.res1, cls.res2 = setup_fun('gau', 'bofinger')
class TestGaussianChamberlain(CheckModelResultsMixin):
@classmethod
def setUp(cls):
cls.res1, cls.res2 = setup_fun('gau', 'chamberlain')
class TestGaussianHsheather(CheckModelResultsMixin):
@classmethod
def setUp(cls):
cls.res1, cls.res2 = setup_fun('gau', 'hsheather')
class TestBiweightBofinger(CheckModelResultsMixin):
@classmethod
def setUp(cls):
cls.res1, cls.res2 = setup_fun('biw', 'bofinger')
class TestBiweightChamberlain(CheckModelResultsMixin):
@classmethod
def setUp(cls):
cls.res1, cls.res2 = setup_fun('biw', 'chamberlain')
class TestBiweightHsheather(CheckModelResultsMixin):
@classmethod
def setUp(cls):
cls.res1, cls.res2 = setup_fun('biw', 'hsheather')
class TestCosineBofinger(CheckModelResultsMixin):
@classmethod
def setUp(cls):
cls.res1, cls.res2 = setup_fun('cos', 'bofinger')
class TestCosineChamberlain(CheckModelResultsMixin):
@classmethod
def setUp(cls):
cls.res1, cls.res2 = setup_fun('cos', 'chamberlain')
class TestCosineHsheather(CheckModelResultsMixin):
@classmethod
def setUp(cls):
cls.res1, cls.res2 = setup_fun('cos', 'hsheather')
class TestParzeneBofinger(CheckModelResultsMixin):
@classmethod
def setUp(cls):
cls.res1, cls.res2 = setup_fun('par', 'bofinger')
class TestParzeneChamberlain(CheckModelResultsMixin):
@classmethod
def setUp(cls):
cls.res1, cls.res2 = setup_fun('par', 'chamberlain')
class TestParzeneHsheather(CheckModelResultsMixin):
@classmethod
def setUp(cls):
cls.res1, cls.res2 = setup_fun('par', 'hsheather')
#class TestTriangleBofinger(CheckModelResultsMixin):
#@classmethod
#def setUp(cls):
#cls.res1, cls.res2 = setup_fun('tri', 'bofinger')
#class TestTriangleChamberlain(CheckModelResultsMixin):
#@classmethod
#def setUp(cls):
#cls.res1, cls.res2 = setup_fun('tri', 'chamberlain')
#class TestTriangleHsheather(CheckModelResultsMixin):
#@classmethod
#def setUp(cls):
#cls.res1, cls.res2 = setup_fun('tri', 'hsheather')
|
bsd-3-clause
|
CTU-IIG/FlexRaySSScheduler
|
BenchmarkGenerator/Scripts/make_Modified_plot.py
|
1
|
4060
|
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import matplotlib
matplotlib.use("Qt4Agg")
import matplotlib.pyplot as plt
from matplotlib import cm
import argparse
import csv
import os
# python3 make_MV_plot.py --steps 50 SAE_1.txt Results/results_MV_used_for_paper.csv
payloads = 128#15
periods = 4
class Plotter:
def __init__(self):
self.dataset = [[0 for _ in range(periods)] for _ in range(payloads)]
def read_data(self, results_file: str):
if not os.path.exists(results_file):
raise ValueError("The result file: {} does not exists".format(results_file))
step_payload = 16
min_payload = 32
instances = 1
with open(results_file, 'r') as f:
csv_file = csv.reader(f, delimiter=',')
for row in csv_file:
payload = int(row[1])
period = int(row[2])
percentage = int(row[6])
self.dataset[int((payload-min_payload)/step_payload)][period] += percentage if percentage < 150 else np.nan
for x in range(payloads):
for y in range(periods):
self.dataset[x][y] /= instances
def plot(self):
if len(self.dataset) <= 0:
raise ValueError("Dataset should not be empty")
step_payload = 16
min_payload = 32
plt.rc("text", usetex=True)
plt.rcParams['text.latex.preamble'] = [r'\usepackage[euler]{textgreek}']
fig = plt.figure()
fig.patch.set_facecolor('white')
# fig.suptitle('Tak tohle vede do pekel!!!')
ax = fig.add_subplot(111, projection='3d')
# fig = plt.figure()
# ax = fig.gca(projection='3d')
axes_pay = np.asarray([float(x) for x in range(min_payload, min_payload+step_payload*payloads, step_payload)])
axes_peri = np.asarray([float(x) for x in range(0, periods)])
xv, yv = np.meshgrid(axes_peri, axes_pay)
zv = np.copy(yv)
print(zv)
for i in range(len(self.dataset)):
for j in range(len(self.dataset[0])):
# print("{},{}".format(i,j))
if self.dataset[i][j] != 0:
zv[i, j] = self.dataset[i][j]
else:
zv[i, j] = np.NAN
# zv = xv
# ax.plot_wireframe(xv, yv, zv, cstride=1, rstride=1)
# surf = ax.plot_surface(xv, yv, zv, alpha=0.9, cmap=cm.jet, cstride=1, rstride=1, vmin=66, vmax=100, shade=False )
# surf.set_facecolor((1,1,1,1))
v_min_value = np.nanmin(zv)-4
ax.scatter(xv, yv, zv, alpha=0.9, cmap=cm.viridis_r, c=zv, vmin=v_min_value, vmax=100)
# ax.plot_wireframe(xv, yv, zv, alpha=0.9, cmap=cm.jet, cstride=1, rstride=1)
ax.set_xlabel("{\\rmfamily \\large M} {\\normalsize[ms]}")
ax.set_xticklabels(["\\rmfamily \\large 8", "", "", "", "\\rmfamily \\large 4", "", "", "", "\\rmfamily \\large 2", "", "", "", "\\rmfamily \\large 1"])
ax.set_ylabel("{\\rmfamily \\large W} {\\normalsize[bit]}")
# ax.set_yticklabels(["\\rmfamily \\large 0", "\\rmfamily \\large 20", "\\rmfamily \\large 40", "\\rmfamily \\large 60", "\\rmfamily \\large 80", "\\rmfamily \\large 100"])
ax.set_zlabel("{\\rmfamily \\large Portion of the slot threshold [\%]}")
# ax.set_zticklabels(["\\rmfamily \\large 40", "\\rmfamily \\large 50", "\\rmfamily \\large 60", "\\rmfamily \\large 70", "\\rmfamily \\large 80", "\\rmfamily \\large 90", "\\rmfamily \\large 100", "\\rmfamily \\large 110", "\\rmfamily \\large 120"])
ax.grid(True)
# x = [6,3,6,9,12,24]
# y = [3,5,78,12,23,56]
# put 0s on the y-axis, and put the y axis on the z-axis
# ax.plot(xs=x, ys=[0]*len(x), zs=y, zdir='z', label='ys=0, zdir=z')
plt.show()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("results", help="File with the stored results")
args = parser.parse_args()
plotter = Plotter()
plotter.read_data(args.results)
plotter.plot()
|
gpl-2.0
|
valexandersaulys/airbnb_kaggle_contest
|
venv/lib/python3.4/site-packages/sklearn/linear_model/tests/test_ridge.py
|
6
|
24655
|
import numpy as np
import scipy.sparse as sp
from scipy import linalg
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_warns
from sklearn import datasets
from sklearn.metrics import mean_squared_error
from sklearn.metrics import make_scorer
from sklearn.metrics import get_scorer
from sklearn.linear_model.base import LinearRegression
from sklearn.linear_model.ridge import ridge_regression
from sklearn.linear_model.ridge import Ridge
from sklearn.linear_model.ridge import _RidgeGCV
from sklearn.linear_model.ridge import RidgeCV
from sklearn.linear_model.ridge import RidgeClassifier
from sklearn.linear_model.ridge import RidgeClassifierCV
from sklearn.linear_model.ridge import _solve_cholesky
from sklearn.linear_model.ridge import _solve_cholesky_kernel
from sklearn.datasets import make_regression
from sklearn.grid_search import GridSearchCV
from sklearn.cross_validation import KFold
diabetes = datasets.load_diabetes()
X_diabetes, y_diabetes = diabetes.data, diabetes.target
ind = np.arange(X_diabetes.shape[0])
rng = np.random.RandomState(0)
rng.shuffle(ind)
ind = ind[:200]
X_diabetes, y_diabetes = X_diabetes[ind], y_diabetes[ind]
iris = datasets.load_iris()
X_iris = sp.csr_matrix(iris.data)
y_iris = iris.target
DENSE_FILTER = lambda X: X
SPARSE_FILTER = lambda X: sp.csr_matrix(X)
def test_ridge():
# Ridge regression convergence test using score
# TODO: for this test to be robust, we should use a dataset instead
# of np.random.
rng = np.random.RandomState(0)
alpha = 1.0
for solver in ("svd", "sparse_cg", "cholesky", "lsqr", "sag"):
# With more samples than features
n_samples, n_features = 6, 5
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
ridge = Ridge(alpha=alpha, solver=solver)
ridge.fit(X, y)
assert_equal(ridge.coef_.shape, (X.shape[1], ))
assert_greater(ridge.score(X, y), 0.47)
if solver in ("cholesky", "sag"):
# Currently the only solvers to support sample_weight.
ridge.fit(X, y, sample_weight=np.ones(n_samples))
assert_greater(ridge.score(X, y), 0.47)
# With more features than samples
n_samples, n_features = 5, 10
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
ridge = Ridge(alpha=alpha, solver=solver)
ridge.fit(X, y)
assert_greater(ridge.score(X, y), .9)
if solver in ("cholesky", "sag"):
# Currently the only solvers to support sample_weight.
ridge.fit(X, y, sample_weight=np.ones(n_samples))
assert_greater(ridge.score(X, y), 0.9)
def test_primal_dual_relationship():
y = y_diabetes.reshape(-1, 1)
coef = _solve_cholesky(X_diabetes, y, alpha=[1e-2])
K = np.dot(X_diabetes, X_diabetes.T)
dual_coef = _solve_cholesky_kernel(K, y, alpha=[1e-2])
coef2 = np.dot(X_diabetes.T, dual_coef).T
assert_array_almost_equal(coef, coef2)
def test_ridge_singular():
# test on a singular matrix
rng = np.random.RandomState(0)
n_samples, n_features = 6, 6
y = rng.randn(n_samples // 2)
y = np.concatenate((y, y))
X = rng.randn(n_samples // 2, n_features)
X = np.concatenate((X, X), axis=0)
ridge = Ridge(alpha=0)
ridge.fit(X, y)
assert_greater(ridge.score(X, y), 0.9)
def test_ridge_sample_weights():
rng = np.random.RandomState(0)
for solver in ("cholesky", ):
for n_samples, n_features in ((6, 5), (5, 10)):
for alpha in (1.0, 1e-2):
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
sample_weight = 1 + rng.rand(n_samples)
coefs = ridge_regression(X, y,
alpha=alpha,
sample_weight=sample_weight,
solver=solver)
# Sample weight can be implemented via a simple rescaling
# for the square loss.
coefs2 = ridge_regression(
X * np.sqrt(sample_weight)[:, np.newaxis],
y * np.sqrt(sample_weight),
alpha=alpha, solver=solver)
assert_array_almost_equal(coefs, coefs2)
# Test for fit_intercept = True
est = Ridge(alpha=alpha, solver=solver)
est.fit(X, y, sample_weight=sample_weight)
# Check using Newton's Method
# Quadratic function should be solved in a single step.
# Initialize
sample_weight = np.sqrt(sample_weight)
X_weighted = sample_weight[:, np.newaxis] * (
np.column_stack((np.ones(n_samples), X)))
y_weighted = y * sample_weight
# Gradient is (X*coef-y)*X + alpha*coef_[1:]
# Remove coef since it is initialized to zero.
grad = -np.dot(y_weighted, X_weighted)
# Hessian is (X.T*X) + alpha*I except that the first
# diagonal element should be zero, since there is no
# penalization of intercept.
diag = alpha * np.ones(n_features + 1)
diag[0] = 0.
hess = np.dot(X_weighted.T, X_weighted)
hess.flat[::n_features + 2] += diag
coef_ = - np.dot(linalg.inv(hess), grad)
assert_almost_equal(coef_[0], est.intercept_)
assert_array_almost_equal(coef_[1:], est.coef_)
def test_ridge_shapes():
# Test shape of coef_ and intercept_
rng = np.random.RandomState(0)
n_samples, n_features = 5, 10
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
Y1 = y[:, np.newaxis]
Y = np.c_[y, 1 + y]
ridge = Ridge()
ridge.fit(X, y)
assert_equal(ridge.coef_.shape, (n_features,))
assert_equal(ridge.intercept_.shape, ())
ridge.fit(X, Y1)
assert_equal(ridge.coef_.shape, (1, n_features))
assert_equal(ridge.intercept_.shape, (1, ))
ridge.fit(X, Y)
assert_equal(ridge.coef_.shape, (2, n_features))
assert_equal(ridge.intercept_.shape, (2, ))
def test_ridge_intercept():
# Test intercept with multiple targets GH issue #708
rng = np.random.RandomState(0)
n_samples, n_features = 5, 10
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
Y = np.c_[y, 1. + y]
ridge = Ridge()
ridge.fit(X, y)
intercept = ridge.intercept_
ridge.fit(X, Y)
assert_almost_equal(ridge.intercept_[0], intercept)
assert_almost_equal(ridge.intercept_[1], intercept + 1.)
def test_toy_ridge_object():
# Test BayesianRegression ridge classifier
# TODO: test also n_samples > n_features
X = np.array([[1], [2]])
Y = np.array([1, 2])
clf = Ridge(alpha=0.0)
clf.fit(X, Y)
X_test = [[1], [2], [3], [4]]
assert_almost_equal(clf.predict(X_test), [1., 2, 3, 4])
assert_equal(len(clf.coef_.shape), 1)
assert_equal(type(clf.intercept_), np.float64)
Y = np.vstack((Y, Y)).T
clf.fit(X, Y)
X_test = [[1], [2], [3], [4]]
assert_equal(len(clf.coef_.shape), 2)
assert_equal(type(clf.intercept_), np.ndarray)
def test_ridge_vs_lstsq():
# On alpha=0., Ridge and OLS yield the same solution.
rng = np.random.RandomState(0)
# we need more samples than features
n_samples, n_features = 5, 4
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
ridge = Ridge(alpha=0., fit_intercept=False)
ols = LinearRegression(fit_intercept=False)
ridge.fit(X, y)
ols.fit(X, y)
assert_almost_equal(ridge.coef_, ols.coef_)
ridge.fit(X, y)
ols.fit(X, y)
assert_almost_equal(ridge.coef_, ols.coef_)
def test_ridge_individual_penalties():
# Tests the ridge object using individual penalties
rng = np.random.RandomState(42)
n_samples, n_features, n_targets = 20, 10, 5
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples, n_targets)
penalties = np.arange(n_targets)
coef_cholesky = np.array([
Ridge(alpha=alpha, solver="cholesky").fit(X, target).coef_
for alpha, target in zip(penalties, y.T)])
coefs_indiv_pen = [
Ridge(alpha=penalties, solver=solver, tol=1e-8).fit(X, y).coef_
for solver in ['svd', 'sparse_cg', 'lsqr', 'cholesky', 'sag']]
for coef_indiv_pen in coefs_indiv_pen:
assert_array_almost_equal(coef_cholesky, coef_indiv_pen)
# Test error is raised when number of targets and penalties do not match.
ridge = Ridge(alpha=penalties[:-1])
assert_raises(ValueError, ridge.fit, X, y)
def _test_ridge_loo(filter_):
# test that can work with both dense or sparse matrices
n_samples = X_diabetes.shape[0]
ret = []
ridge_gcv = _RidgeGCV(fit_intercept=False)
ridge = Ridge(alpha=1.0, fit_intercept=False)
# generalized cross-validation (efficient leave-one-out)
decomp = ridge_gcv._pre_compute(X_diabetes, y_diabetes)
errors, c = ridge_gcv._errors(1.0, y_diabetes, *decomp)
values, c = ridge_gcv._values(1.0, y_diabetes, *decomp)
# brute-force leave-one-out: remove one example at a time
errors2 = []
values2 = []
for i in range(n_samples):
sel = np.arange(n_samples) != i
X_new = X_diabetes[sel]
y_new = y_diabetes[sel]
ridge.fit(X_new, y_new)
value = ridge.predict([X_diabetes[i]])[0]
error = (y_diabetes[i] - value) ** 2
errors2.append(error)
values2.append(value)
# check that efficient and brute-force LOO give same results
assert_almost_equal(errors, errors2)
assert_almost_equal(values, values2)
# generalized cross-validation (efficient leave-one-out,
# SVD variation)
decomp = ridge_gcv._pre_compute_svd(X_diabetes, y_diabetes)
errors3, c = ridge_gcv._errors_svd(ridge.alpha, y_diabetes, *decomp)
values3, c = ridge_gcv._values_svd(ridge.alpha, y_diabetes, *decomp)
# check that efficient and SVD efficient LOO give same results
assert_almost_equal(errors, errors3)
assert_almost_equal(values, values3)
# check best alpha
ridge_gcv.fit(filter_(X_diabetes), y_diabetes)
alpha_ = ridge_gcv.alpha_
ret.append(alpha_)
# check that we get same best alpha with custom loss_func
f = ignore_warnings
scoring = make_scorer(mean_squared_error, greater_is_better=False)
ridge_gcv2 = RidgeCV(fit_intercept=False, scoring=scoring)
f(ridge_gcv2.fit)(filter_(X_diabetes), y_diabetes)
assert_equal(ridge_gcv2.alpha_, alpha_)
# check that we get same best alpha with custom score_func
func = lambda x, y: -mean_squared_error(x, y)
scoring = make_scorer(func)
ridge_gcv3 = RidgeCV(fit_intercept=False, scoring=scoring)
f(ridge_gcv3.fit)(filter_(X_diabetes), y_diabetes)
assert_equal(ridge_gcv3.alpha_, alpha_)
# check that we get same best alpha with a scorer
scorer = get_scorer('mean_squared_error')
ridge_gcv4 = RidgeCV(fit_intercept=False, scoring=scorer)
ridge_gcv4.fit(filter_(X_diabetes), y_diabetes)
assert_equal(ridge_gcv4.alpha_, alpha_)
# check that we get same best alpha with sample weights
ridge_gcv.fit(filter_(X_diabetes), y_diabetes,
sample_weight=np.ones(n_samples))
assert_equal(ridge_gcv.alpha_, alpha_)
# simulate several responses
Y = np.vstack((y_diabetes, y_diabetes)).T
ridge_gcv.fit(filter_(X_diabetes), Y)
Y_pred = ridge_gcv.predict(filter_(X_diabetes))
ridge_gcv.fit(filter_(X_diabetes), y_diabetes)
y_pred = ridge_gcv.predict(filter_(X_diabetes))
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T,
Y_pred, decimal=5)
return ret
def _test_ridge_cv(filter_):
n_samples = X_diabetes.shape[0]
ridge_cv = RidgeCV()
ridge_cv.fit(filter_(X_diabetes), y_diabetes)
ridge_cv.predict(filter_(X_diabetes))
assert_equal(len(ridge_cv.coef_.shape), 1)
assert_equal(type(ridge_cv.intercept_), np.float64)
cv = KFold(n_samples, 5)
ridge_cv.set_params(cv=cv)
ridge_cv.fit(filter_(X_diabetes), y_diabetes)
ridge_cv.predict(filter_(X_diabetes))
assert_equal(len(ridge_cv.coef_.shape), 1)
assert_equal(type(ridge_cv.intercept_), np.float64)
def _test_ridge_diabetes(filter_):
ridge = Ridge(fit_intercept=False)
ridge.fit(filter_(X_diabetes), y_diabetes)
return np.round(ridge.score(filter_(X_diabetes), y_diabetes), 5)
def _test_multi_ridge_diabetes(filter_):
# simulate several responses
Y = np.vstack((y_diabetes, y_diabetes)).T
n_features = X_diabetes.shape[1]
ridge = Ridge(fit_intercept=False)
ridge.fit(filter_(X_diabetes), Y)
assert_equal(ridge.coef_.shape, (2, n_features))
Y_pred = ridge.predict(filter_(X_diabetes))
ridge.fit(filter_(X_diabetes), y_diabetes)
y_pred = ridge.predict(filter_(X_diabetes))
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T,
Y_pred, decimal=3)
def _test_ridge_classifiers(filter_):
n_classes = np.unique(y_iris).shape[0]
n_features = X_iris.shape[1]
for clf in (RidgeClassifier(), RidgeClassifierCV()):
clf.fit(filter_(X_iris), y_iris)
assert_equal(clf.coef_.shape, (n_classes, n_features))
y_pred = clf.predict(filter_(X_iris))
assert_greater(np.mean(y_iris == y_pred), .79)
n_samples = X_iris.shape[0]
cv = KFold(n_samples, 5)
clf = RidgeClassifierCV(cv=cv)
clf.fit(filter_(X_iris), y_iris)
y_pred = clf.predict(filter_(X_iris))
assert_true(np.mean(y_iris == y_pred) >= 0.8)
def _test_tolerance(filter_):
ridge = Ridge(tol=1e-5, fit_intercept=False)
ridge.fit(filter_(X_diabetes), y_diabetes)
score = ridge.score(filter_(X_diabetes), y_diabetes)
ridge2 = Ridge(tol=1e-3, fit_intercept=False)
ridge2.fit(filter_(X_diabetes), y_diabetes)
score2 = ridge2.score(filter_(X_diabetes), y_diabetes)
assert_true(score >= score2)
# ignore warning that solvers are changed to SAG for
# temporary fix
@ignore_warnings
def test_dense_sparse():
for test_func in (_test_ridge_loo,
_test_ridge_cv,
_test_ridge_diabetes,
_test_multi_ridge_diabetes,
_test_ridge_classifiers,
_test_tolerance):
# test dense matrix
ret_dense = test_func(DENSE_FILTER)
# test sparse matrix
ret_sparse = test_func(SPARSE_FILTER)
# test that the outputs are the same
if ret_dense is not None and ret_sparse is not None:
assert_array_almost_equal(ret_dense, ret_sparse, decimal=3)
def test_ridge_cv_sparse_svd():
X = sp.csr_matrix(X_diabetes)
ridge = RidgeCV(gcv_mode="svd")
assert_raises(TypeError, ridge.fit, X)
def test_ridge_sparse_svd():
X = sp.csc_matrix(rng.rand(100, 10))
y = rng.rand(100)
ridge = Ridge(solver='svd', fit_intercept=False)
assert_raises(TypeError, ridge.fit, X, y)
def test_class_weights():
# Test class weights.
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = RidgeClassifier(class_weight=None)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf = RidgeClassifier(class_weight={1: 0.001})
clf.fit(X, y)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
# check if class_weight = 'balanced' can handle negative labels.
clf = RidgeClassifier(class_weight='balanced')
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# class_weight = 'balanced', and class_weight = None should return
# same values when y has equal number of all labels
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0], [1.0, 1.0]])
y = [1, 1, -1, -1]
clf = RidgeClassifier(class_weight=None)
clf.fit(X, y)
clfa = RidgeClassifier(class_weight='balanced')
clfa.fit(X, y)
assert_equal(len(clfa.classes_), 2)
assert_array_almost_equal(clf.coef_, clfa.coef_)
assert_array_almost_equal(clf.intercept_, clfa.intercept_)
def test_class_weight_vs_sample_weight():
"""Check class_weights resemble sample_weights behavior."""
for clf in (RidgeClassifier, RidgeClassifierCV):
# Iris is balanced, so no effect expected for using 'balanced' weights
clf1 = clf()
clf1.fit(iris.data, iris.target)
clf2 = clf(class_weight='balanced')
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.coef_, clf2.coef_)
# Inflate importance of class 1, check against user-defined weights
sample_weight = np.ones(iris.target.shape)
sample_weight[iris.target == 1] *= 100
class_weight = {0: 1., 1: 100., 2: 1.}
clf1 = clf()
clf1.fit(iris.data, iris.target, sample_weight)
clf2 = clf(class_weight=class_weight)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.coef_, clf2.coef_)
# Check that sample_weight and class_weight are multiplicative
clf1 = clf()
clf1.fit(iris.data, iris.target, sample_weight ** 2)
clf2 = clf(class_weight=class_weight)
clf2.fit(iris.data, iris.target, sample_weight)
assert_almost_equal(clf1.coef_, clf2.coef_)
def test_class_weights_cv():
# Test class weights for cross validated ridge classifier.
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = RidgeClassifierCV(class_weight=None, alphas=[.01, .1, 1])
clf.fit(X, y)
# we give a small weights to class 1
clf = RidgeClassifierCV(class_weight={1: 0.001}, alphas=[.01, .1, 1, 10])
clf.fit(X, y)
assert_array_equal(clf.predict([[-.2, 2]]), np.array([-1]))
def test_ridgecv_store_cv_values():
# Test _RidgeCV's store_cv_values attribute.
rng = rng = np.random.RandomState(42)
n_samples = 8
n_features = 5
x = rng.randn(n_samples, n_features)
alphas = [1e-1, 1e0, 1e1]
n_alphas = len(alphas)
r = RidgeCV(alphas=alphas, store_cv_values=True)
# with len(y.shape) == 1
y = rng.randn(n_samples)
r.fit(x, y)
assert_equal(r.cv_values_.shape, (n_samples, n_alphas))
# with len(y.shape) == 2
n_responses = 3
y = rng.randn(n_samples, n_responses)
r.fit(x, y)
assert_equal(r.cv_values_.shape, (n_samples, n_responses, n_alphas))
def test_ridgecv_sample_weight():
rng = np.random.RandomState(0)
alphas = (0.1, 1.0, 10.0)
# There are different algorithms for n_samples > n_features
# and the opposite, so test them both.
for n_samples, n_features in ((6, 5), (5, 10)):
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
sample_weight = 1 + rng.rand(n_samples)
cv = KFold(n_samples, 5)
ridgecv = RidgeCV(alphas=alphas, cv=cv)
ridgecv.fit(X, y, sample_weight=sample_weight)
# Check using GridSearchCV directly
parameters = {'alpha': alphas}
fit_params = {'sample_weight': sample_weight}
gs = GridSearchCV(Ridge(), parameters, fit_params=fit_params,
cv=cv)
gs.fit(X, y)
assert_equal(ridgecv.alpha_, gs.best_estimator_.alpha)
assert_array_almost_equal(ridgecv.coef_, gs.best_estimator_.coef_)
def test_raises_value_error_if_sample_weights_greater_than_1d():
# Sample weights must be either scalar or 1D
n_sampless = [2, 3]
n_featuress = [3, 2]
rng = np.random.RandomState(42)
for n_samples, n_features in zip(n_sampless, n_featuress):
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
sample_weights_OK = rng.randn(n_samples) ** 2 + 1
sample_weights_OK_1 = 1.
sample_weights_OK_2 = 2.
sample_weights_not_OK = sample_weights_OK[:, np.newaxis]
sample_weights_not_OK_2 = sample_weights_OK[np.newaxis, :]
ridge = Ridge(alpha=1)
# make sure the "OK" sample weights actually work
ridge.fit(X, y, sample_weights_OK)
ridge.fit(X, y, sample_weights_OK_1)
ridge.fit(X, y, sample_weights_OK_2)
def fit_ridge_not_ok():
ridge.fit(X, y, sample_weights_not_OK)
def fit_ridge_not_ok_2():
ridge.fit(X, y, sample_weights_not_OK_2)
assert_raise_message(ValueError,
"Sample weights must be 1D array or scalar",
fit_ridge_not_ok)
assert_raise_message(ValueError,
"Sample weights must be 1D array or scalar",
fit_ridge_not_ok_2)
def test_sparse_design_with_sample_weights():
# Sample weights must work with sparse matrices
n_sampless = [2, 3]
n_featuress = [3, 2]
rng = np.random.RandomState(42)
sparse_matrix_converters = [sp.coo_matrix,
sp.csr_matrix,
sp.csc_matrix,
sp.lil_matrix,
sp.dok_matrix
]
sparse_ridge = Ridge(alpha=1., fit_intercept=False)
dense_ridge = Ridge(alpha=1., fit_intercept=False)
for n_samples, n_features in zip(n_sampless, n_featuress):
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
sample_weights = rng.randn(n_samples) ** 2 + 1
for sparse_converter in sparse_matrix_converters:
X_sparse = sparse_converter(X)
sparse_ridge.fit(X_sparse, y, sample_weight=sample_weights)
dense_ridge.fit(X, y, sample_weight=sample_weights)
assert_array_almost_equal(sparse_ridge.coef_, dense_ridge.coef_,
decimal=6)
def test_raises_value_error_if_solver_not_supported():
# Tests whether a ValueError is raised if a non-identified solver
# is passed to ridge_regression
wrong_solver = "This is not a solver (MagritteSolveCV QuantumBitcoin)"
exception = ValueError
message = "Solver %s not understood" % wrong_solver
def func():
X = np.eye(3)
y = np.ones(3)
ridge_regression(X, y, alpha=1., solver=wrong_solver)
assert_raise_message(exception, message, func)
def test_sparse_cg_max_iter():
reg = Ridge(solver="sparse_cg", max_iter=1)
reg.fit(X_diabetes, y_diabetes)
assert_equal(reg.coef_.shape[0], X_diabetes.shape[1])
@ignore_warnings
def test_n_iter():
# Test that self.n_iter_ is correct.
n_targets = 2
X, y = X_diabetes, y_diabetes
y_n = np.tile(y, (n_targets, 1)).T
for max_iter in range(1, 4):
for solver in ('sag', 'lsqr'):
reg = Ridge(solver=solver, max_iter=max_iter, tol=1e-12)
reg.fit(X, y_n)
assert_array_equal(reg.n_iter_, np.tile(max_iter, n_targets))
for solver in ('sparse_cg', 'svd', 'cholesky'):
reg = Ridge(solver=solver, max_iter=1, tol=1e-1)
reg.fit(X, y_n)
assert_equal(reg.n_iter_, None)
def test_ridge_fit_intercept_sparse():
X, y = make_regression(n_samples=1000, n_features=2, n_informative=2,
bias=10., random_state=42)
X_csr = sp.csr_matrix(X)
dense = Ridge(alpha=1., tol=1.e-15, solver='sag', fit_intercept=True)
sparse = Ridge(alpha=1., tol=1.e-15, solver='sag', fit_intercept=True)
dense.fit(X, y)
sparse.fit(X_csr, y)
assert_almost_equal(dense.intercept_, sparse.intercept_)
assert_array_almost_equal(dense.coef_, sparse.coef_)
# test the solver switch and the corresponding warning
sparse = Ridge(alpha=1., tol=1.e-15, solver='lsqr', fit_intercept=True)
assert_warns(UserWarning, sparse.fit, X_csr, y)
assert_almost_equal(dense.intercept_, sparse.intercept_)
assert_array_almost_equal(dense.coef_, sparse.coef_)
|
gpl-2.0
|
moonbury/pythonanywhere
|
MatplotlibCookbook/Chapter 8/test.py
|
3
|
2513
|
from gi.repository import Gtk
import numpy
from matplotlib.figure import Figure
from matplotlib.backends.backend_gtk3agg import FigureCanvasGTK3Agg
def supershape_radius(phi, a, b, m, n1, n2, n3):
theta = .25 * m * phi
cos = numpy.fabs(numpy.cos(theta) / a) ** n2
sin = numpy.fabs(numpy.sin(theta) / b) ** n3
r = (cos + sin) ** (-1. / n1)
r /= numpy.max(r)
return r
class SuperShapeWindow(Gtk.Window):
def __init__(self):
Gtk.Window.__init__(self, title = 'SuperShape')
layout_box = Gtk.Box.new(Gtk.Orientation.VERTICAL, 0)
self.add(layout_box)
self.m = 3
self.n1 = 2
self.n2 = 18
self.n3 = 18
self.fig = Figure((6, 6), dpi = 80)
#w, h = self.fig.get_size_inches()
#dpi_res = self.fig.get_dpi()
#w, h = int(numpy.ceil(w * dpi_res)), int(numpy.ceil(h * dpi_res))
canvas = FigureCanvasGTK3Agg(self.fig)
#canvas.set_size_request(w, h)
layout_box.add(canvas)
self.m_slider = Gtk.HScale.new(Gtk.Adjustment(self.m, 1, 20, 1., .1, 1))
self.m_slider.connect('value-changed', self.on_m_slide)
layout_box.add(self.m_slider)
self.n1_slider = Gtk.HScale.new(Gtk.Adjustment(self.n1, .01, 20, 1., .1, 1))
self.n1_slider.connect('value-changed', self.on_n1_slide)
layout_box.add(self.n1_slider)
self.n2_slider = Gtk.HScale.new(Gtk.Adjustment(self.n2, .01, 20, 1., .1, 1))
self.n2_slider.connect('value-changed', self.on_n2_slide)
layout_box.add(self.n2_slider)
self.n3_slider = Gtk.HScale.new(Gtk.Adjustment(self.n3, .01, 20, 1., .1, 1))
self.n3_slider.connect('value-changed', self.on_n3_slide)
layout_box.add(self.n3_slider)
self.draw_figure()
def on_m_slide(self, event):
self.m = self.m_slider.get_value()
self.refresh_figure()
def on_n1_slide(self, event):
self.n1 = self.n1_slider.get_value()
self.refresh_figure()
def on_n2_slide(self, event):
self.n2 = self.n2_slider.get_value()
self.refresh_figure()
def on_n3_slide(self, event):
self.n3 = self.n3_slider.get_value()
self.refresh_figure()
def draw_figure(self):
self.phi = numpy.linspace(0, 2 * numpy.pi, 1024)
ax = self.fig.add_subplot(111, polar = True)
r = supershape_radius(self.phi, 1, 1, self.m, self.n1, self.n2, self.n3)
self.lines, = ax.plot(self.phi, r, lw = 3.)
self.fig.canvas.draw()
def refresh_figure(self):
r = supershape_radius(self.phi, 1, 1, self.m, self.n1, self.n2, self.n3)
self.lines.set_ydata(r)
self.fig.canvas.draw_idle()
win = SuperShapeWindow()
win.connect('delete-event', Gtk.main_quit)
win.show_all()
Gtk.main()
|
gpl-3.0
|
lucidfrontier45/scikit-learn
|
benchmarks/bench_plot_ward.py
|
8
|
1151
|
"""
Bench the scikit's ward implement compared to scipy's
"""
import time
import numpy as np
from scipy.cluster import hierarchy
import pylab as pl
from sklearn.cluster import Ward
ward = Ward(n_clusters=3)
n_samples = np.logspace(.5, 3, 9)
n_features = np.logspace(1, 3.5, 7)
N_samples, N_features = np.meshgrid(n_samples,
n_features)
scikits_time = np.zeros(N_samples.shape)
scipy_time = np.zeros(N_samples.shape)
for i, n in enumerate(n_samples):
for j, p in enumerate(n_features):
X = np.random.normal(size=(n, p))
t0 = time.time()
ward.fit(X)
scikits_time[j, i] = time.time() - t0
t0 = time.time()
hierarchy.ward(X)
scipy_time[j, i] = time.time() - t0
ratio = scikits_time / scipy_time
pl.clf()
pl.imshow(np.log(ratio), aspect='auto', origin="lower")
pl.colorbar()
pl.contour(ratio, levels=[1, ], colors='k')
pl.yticks(range(len(n_features)), n_features.astype(np.int))
pl.ylabel('N features')
pl.xticks(range(len(n_samples)), n_samples.astype(np.int))
pl.xlabel('N samples')
pl.title("Scikit's time, in units of scipy time (log)")
pl.show()
|
bsd-3-clause
|
MJuddBooth/pandas
|
pandas/tests/arrays/categorical/test_indexing.py
|
2
|
9775
|
# -*- coding: utf-8 -*-
import numpy as np
import pytest
import pandas as pd
from pandas import Categorical, CategoricalIndex, Index, PeriodIndex, Series
import pandas.core.common as com
from pandas.tests.arrays.categorical.common import TestCategorical
import pandas.util.testing as tm
class TestCategoricalIndexingWithFactor(TestCategorical):
def test_getitem(self):
assert self.factor[0] == 'a'
assert self.factor[-1] == 'c'
subf = self.factor[[0, 1, 2]]
tm.assert_numpy_array_equal(subf._codes,
np.array([0, 1, 1], dtype=np.int8))
subf = self.factor[np.asarray(self.factor) == 'c']
tm.assert_numpy_array_equal(subf._codes,
np.array([2, 2, 2], dtype=np.int8))
def test_setitem(self):
# int/positional
c = self.factor.copy()
c[0] = 'b'
assert c[0] == 'b'
c[-1] = 'a'
assert c[-1] == 'a'
# boolean
c = self.factor.copy()
indexer = np.zeros(len(c), dtype='bool')
indexer[0] = True
indexer[-1] = True
c[indexer] = 'c'
expected = Categorical(['c', 'b', 'b', 'a', 'a', 'c', 'c', 'c'],
ordered=True)
tm.assert_categorical_equal(c, expected)
@pytest.mark.parametrize('other', [
pd.Categorical(['b', 'a']),
pd.Categorical(['b', 'a'], categories=['b', 'a']),
])
def test_setitem_same_but_unordered(self, other):
# GH-24142
target = pd.Categorical(['a', 'b'], categories=['a', 'b'])
mask = np.array([True, False])
target[mask] = other[mask]
expected = pd.Categorical(['b', 'b'], categories=['a', 'b'])
tm.assert_categorical_equal(target, expected)
@pytest.mark.parametrize('other', [
pd.Categorical(['b', 'a'], categories=['b', 'a', 'c']),
pd.Categorical(['b', 'a'], categories=['a', 'b', 'c']),
pd.Categorical(['a', 'a'], categories=['a']),
pd.Categorical(['b', 'b'], categories=['b']),
])
def test_setitem_different_unordered_raises(self, other):
# GH-24142
target = pd.Categorical(['a', 'b'], categories=['a', 'b'])
mask = np.array([True, False])
with pytest.raises(ValueError):
target[mask] = other[mask]
@pytest.mark.parametrize('other', [
pd.Categorical(['b', 'a']),
pd.Categorical(['b', 'a'], categories=['b', 'a'], ordered=True),
pd.Categorical(['b', 'a'], categories=['a', 'b', 'c'], ordered=True),
])
def test_setitem_same_ordered_rasies(self, other):
# Gh-24142
target = pd.Categorical(['a', 'b'], categories=['a', 'b'],
ordered=True)
mask = np.array([True, False])
with pytest.raises(ValueError):
target[mask] = other[mask]
class TestCategoricalIndexing(object):
def test_getitem_listlike(self):
# GH 9469
# properly coerce the input indexers
np.random.seed(1)
c = Categorical(np.random.randint(0, 5, size=150000).astype(np.int8))
result = c.codes[np.array([100000]).astype(np.int64)]
expected = c[np.array([100000]).astype(np.int64)].codes
tm.assert_numpy_array_equal(result, expected)
def test_periodindex(self):
idx1 = PeriodIndex(['2014-01', '2014-01', '2014-02', '2014-02',
'2014-03', '2014-03'], freq='M')
cat1 = Categorical(idx1)
str(cat1)
exp_arr = np.array([0, 0, 1, 1, 2, 2], dtype=np.int8)
exp_idx = PeriodIndex(['2014-01', '2014-02', '2014-03'], freq='M')
tm.assert_numpy_array_equal(cat1._codes, exp_arr)
tm.assert_index_equal(cat1.categories, exp_idx)
idx2 = PeriodIndex(['2014-03', '2014-03', '2014-02', '2014-01',
'2014-03', '2014-01'], freq='M')
cat2 = Categorical(idx2, ordered=True)
str(cat2)
exp_arr = np.array([2, 2, 1, 0, 2, 0], dtype=np.int8)
exp_idx2 = PeriodIndex(['2014-01', '2014-02', '2014-03'], freq='M')
tm.assert_numpy_array_equal(cat2._codes, exp_arr)
tm.assert_index_equal(cat2.categories, exp_idx2)
idx3 = PeriodIndex(['2013-12', '2013-11', '2013-10', '2013-09',
'2013-08', '2013-07', '2013-05'], freq='M')
cat3 = Categorical(idx3, ordered=True)
exp_arr = np.array([6, 5, 4, 3, 2, 1, 0], dtype=np.int8)
exp_idx = PeriodIndex(['2013-05', '2013-07', '2013-08', '2013-09',
'2013-10', '2013-11', '2013-12'], freq='M')
tm.assert_numpy_array_equal(cat3._codes, exp_arr)
tm.assert_index_equal(cat3.categories, exp_idx)
def test_categories_assigments(self):
s = Categorical(["a", "b", "c", "a"])
exp = np.array([1, 2, 3, 1], dtype=np.int64)
s.categories = [1, 2, 3]
tm.assert_numpy_array_equal(s.__array__(), exp)
tm.assert_index_equal(s.categories, Index([1, 2, 3]))
# lengthen
with pytest.raises(ValueError):
s.categories = [1, 2, 3, 4]
# shorten
with pytest.raises(ValueError):
s.categories = [1, 2]
# Combinations of sorted/unique:
@pytest.mark.parametrize("idx_values", [[1, 2, 3, 4], [1, 3, 2, 4],
[1, 3, 3, 4], [1, 2, 2, 4]])
# Combinations of missing/unique
@pytest.mark.parametrize("key_values", [[1, 2], [1, 5], [1, 1], [5, 5]])
@pytest.mark.parametrize("key_class", [Categorical, CategoricalIndex])
def test_get_indexer_non_unique(self, idx_values, key_values, key_class):
# GH 21448
key = key_class(key_values, categories=range(1, 5))
# Test for flat index and CategoricalIndex with same/different cats:
for dtype in None, 'category', key.dtype:
idx = Index(idx_values, dtype=dtype)
expected, exp_miss = idx.get_indexer_non_unique(key_values)
result, res_miss = idx.get_indexer_non_unique(key)
tm.assert_numpy_array_equal(expected, result)
tm.assert_numpy_array_equal(exp_miss, res_miss)
def test_where_unobserved_nan(self):
ser = pd.Series(pd.Categorical(['a', 'b']))
result = ser.where([True, False])
expected = pd.Series(pd.Categorical(['a', None],
categories=['a', 'b']))
tm.assert_series_equal(result, expected)
# all NA
ser = pd.Series(pd.Categorical(['a', 'b']))
result = ser.where([False, False])
expected = pd.Series(pd.Categorical([None, None],
categories=['a', 'b']))
tm.assert_series_equal(result, expected)
def test_where_unobserved_categories(self):
ser = pd.Series(
Categorical(['a', 'b', 'c'], categories=['d', 'c', 'b', 'a'])
)
result = ser.where([True, True, False], other='b')
expected = pd.Series(
Categorical(['a', 'b', 'b'], categories=ser.cat.categories)
)
tm.assert_series_equal(result, expected)
def test_where_other_categorical(self):
ser = pd.Series(
Categorical(['a', 'b', 'c'], categories=['d', 'c', 'b', 'a'])
)
other = Categorical(['b', 'c', 'a'], categories=['a', 'c', 'b', 'd'])
result = ser.where([True, False, True], other)
expected = pd.Series(Categorical(['a', 'c', 'c'], dtype=ser.dtype))
tm.assert_series_equal(result, expected)
def test_where_warns(self):
ser = pd.Series(Categorical(['a', 'b', 'c']))
with tm.assert_produces_warning(FutureWarning):
result = ser.where([True, False, True], 'd')
expected = pd.Series(np.array(['a', 'd', 'c'], dtype='object'))
tm.assert_series_equal(result, expected)
def test_where_ordered_differs_rasies(self):
ser = pd.Series(
Categorical(['a', 'b', 'c'], categories=['d', 'c', 'b', 'a'],
ordered=True)
)
other = Categorical(['b', 'c', 'a'], categories=['a', 'c', 'b', 'd'],
ordered=True)
with tm.assert_produces_warning(FutureWarning):
result = ser.where([True, False, True], other)
expected = pd.Series(np.array(['a', 'c', 'c'], dtype=object))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("index", [True, False])
def test_mask_with_boolean(index):
s = Series(range(3))
idx = Categorical([True, False, True])
if index:
idx = CategoricalIndex(idx)
assert com.is_bool_indexer(idx)
result = s[idx]
expected = s[idx.astype('object')]
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("index", [True, False])
def test_mask_with_boolean_raises(index):
s = Series(range(3))
idx = Categorical([True, False, None])
if index:
idx = CategoricalIndex(idx)
with pytest.raises(ValueError, match='NA / NaN'):
s[idx]
@pytest.fixture
def non_coercible_categorical(monkeypatch):
"""
Monkeypatch Categorical.__array__ to ensure no implicit conversion.
Raises
------
ValueError
When Categorical.__array__ is called.
"""
# TODO(Categorical): identify other places where this may be
# useful and move to a conftest.py
def array(self, dtype=None):
raise ValueError("I cannot be converted.")
with monkeypatch.context() as m:
m.setattr(Categorical, "__array__", array)
yield
def test_series_at(non_coercible_categorical):
arr = Categorical(['a', 'b', 'c'])
ser = Series(arr)
result = ser.at[0]
assert result == 'a'
|
bsd-3-clause
|
fabeschan/midigeneration
|
voices.py
|
1
|
7844
|
import numpy as np
import sys
import analyze, midi, patterns
from data import *
import pprint
import matplotlib.pyplot as plt
pp = pprint.PrettyPrinter(indent=4)
class penalty(object):
def __init__(self):
self.new_voice = 20.0
self.pitch_interval = 1.0
self.silence = 30.0 / 512 # per tick
self.collision = 10.0
def common_starts(notes):
''' Return a dictionary, key=start_pos, value=list of notes '''
d = {}
notes = sorted(notes, key=lambda v: (v.pos, v.dur))
i, pos = 0, 0
c_starts = []
d[pos] = c_starts
for cur in notes:
if pos != cur.pos:
pos = cur.pos
c_starts = []
d[pos] = c_starts
c_starts.append(cur)
if not d[0]: del d[0]
return d
def all_combinations(commons, voices):
''' commons: a list of (notes, voices)
returns: a list of list of (note, voice)
'''
ret = []
for c in commons:
commons_ = commons[:]
commons_.remove(c)
for voice in voices:
voices_ = voices[:]
voices_.remove(voice)
combos = all_combinations(commons_, voices_)
if not combos: ret += [ [(c, voice)] ]
else:
for combo in combos:
cross = False
for e_n, e_v in combo:
if e_n and e_v and voice and c:
if c.pitch > e_n.pitch and voice.pitch < e_v.pitch:
cross = True
break # do not add to ret to avoid voices crossing
if c.pitch < e_n.pitch and voice.pitch > e_v.pitch:
cross = True
break # do not add to ret to avoid voices crossing
if not cross:
ret += [ [(c, voice)] + combo for combo in combos ]
return ret
def calculate_penalty(combo, best_so_far, best_so_far_pen, pen):
''' combo is a list of (note, last_note) '''
penalty = 0
#for n, v in combo:
# if not v:
# penalty += pen.new_voice
# else:
# #penalty += best_so_far_pen[v]
# pen_t_silence = (n.pos - v.pos - v.dur) * pen.silence
# pen_t_pitch_interval = abs(v.pitch - n.pitch) * pen.pitch_interval
# penalty += pen_t_silence + pen_t_pitch_interval
best_so_far = best_so_far.copy()
best_so_far_pen = best_so_far_pen.copy()
for n, v in combo:
best_so_far[n] = v
if v:
pen_t_silence = (n.pos - v.pos - v.dur) * pen.silence
pen_t_pitch_interval = abs(v.pitch - n.pitch) * pen.pitch_interval
best_so_far_pen[n] = best_so_far_pen[v] + pen_t_silence + pen_t_pitch_interval
else:
best_so_far_pen[n] = pen.new_voice
voices = extract_voices(best_so_far)
for v in voices:
last_note = v[-1]
penalty += best_so_far_pen[last_note]
return penalty
class voice(object):
def __init__(self, l=[]):
self.val = l
def __repr__(self):
return self.val.__repr__()
def identify_voices(piece, pen=None, verbose=False):
''' underlying assumptions:
- each note is entirely included in a voice
- voice only spans 1 pitch at any one moment
alg:
- store best-so-far voice ending at each note
'''
# here assume piece has exactly one track (=tracks[0])
iter = 0
if not pen:
pen = penalty()
tr = piece.tracks[0]
track_notes = tr.notes
cs = common_starts(track_notes)
best_so_far = {} # stores previous note in its voice given specified note
best_so_far_pen = {} # stores penalty score of voice ending at specified note
final_voices = set() # will get updated with the voices that we ultimately want in the end
cs_keys = sorted(cs.keys())
for k in cs_keys:
commons = cs.get(k, []) # get list of notes that start at pos k
# find a voice for each note in commons
# find best combination that gives minimum overall penalty
# do not need to consider voices that are too far away
temp_best_so_far = { n:v for n, v in best_so_far.iteritems() if (k - n.pos - n.dur) * pen.silence < pen.new_voice }
#print temp_best_so_far
voices = temp_best_so_far.keys()
print "\nvoices", len(voices)
for i in xrange(len(commons)): # add new empty voices to the mix
voices.append(None)
combinations_temp = all_combinations(commons, voices) # first generate all combos
# get rid of duplicates
combinations = set()
for combos in combinations_temp:
t = frozenset(combo for combo in combos)
combinations.add(t)
print "\ncombo", len(combinations)
# combo is a list of (note, voice). Choose best combo
best_combo = min(combinations, key=lambda c: calculate_penalty(c, temp_best_so_far, best_so_far_pen, pen))
lowest_penalty = calculate_penalty(best_combo, temp_best_so_far, best_so_far_pen, pen)
for n, v in best_combo:
if not v:
best_so_far_pen[n] = pen.new_voice
best_so_far[n] = None
else:
best_so_far_pen[n] = calculate_penalty([(n, v)], temp_best_so_far, best_so_far_pen, pen)
best_so_far[n] = v
sys.stdout.write("\r(Progress: %d/%d)" % (iter, len(cs_keys)))
iter += 1
sys.stdout.flush()
else:
sys.stdout.write("\r")
sys.stdout.flush()
return best_so_far, best_so_far_pen
#pp.pprint(best_so_far)
def extract_voices(best_so_far):
'''
Reconstruct the voices
'''
voices = []
all_notes = best_so_far.keys()
#print "all:", all_notes
while all_notes:
all_notes = sorted(list(all_notes), key=lambda n: -n.pos)
n = all_notes[0]
voice = _extract_voice(best_so_far, n)
#print "voice:", voice
voices.append(voice)
for a in voice:
all_notes.remove(a)
del best_so_far[a]
return voices
def _extract_voice(best_so_far, n):
voice = []
if n in best_so_far and best_so_far[n] and best_so_far[n] in best_so_far:
r = _extract_voice(best_so_far, best_so_far[n])
r.append(n)
return r
else:
return [n]
def plot_voices(voices, bar=0):
for voice_ in voices:
if voice_:
x = [ n.pos for n in voice_ ]
y = [ n.pitch for n in voice_ ]
plt.step(x, y, marker='o', where='post')
ax = plt.axes()
start, end = ax.get_xlim()
if bar:
ax.xaxis.set_ticks(np.arange(start, end, bar))
ax.xaxis.grid(True)
else:
print "WARNING: voice_ has no notes"
plt.show()
if __name__ == '__main__':
if len(sys.argv) > 1:
musicpiece = piece(sys.argv[1])
#musicpiece = a4piece
bar = musicpiece.bar
print "BAR:", bar
if len(sys.argv) == 1:
commons = [3,4,2,3, None]
voices = [1,2,3]
combinations = all_combinations(commons, voices) # first generate all combos
uniques = set()
for combos in combinations:
t = frozenset(combo for combo in combos)
uniques.add(t)
pp.pprint(uniques)
if len(sys.argv) == 2:
best_so_far, best_so_far_pen = identify_voices(musicpiece)
voices = extract_voices(best_so_far)
plot_voices(voices, bar)
if len(sys.argv) == 4: # midi-file, b0, b1
musicpiece = musicpiece.segment_by_bars(int(sys.argv[2]), int(sys.argv[3]))
best_so_far, best_so_far_pen = identify_voices(musicpiece)
voices = extract_voices(best_so_far)
plot_voices(voices, bar)
|
mit
|
lbishal/scikit-learn
|
examples/linear_model/plot_ransac.py
|
73
|
1859
|
"""
===========================================
Robust linear model estimation using RANSAC
===========================================
In this example we see how to robustly fit a linear model to faulty data using
the RANSAC algorithm.
"""
import numpy as np
from matplotlib import pyplot as plt
from sklearn import linear_model, datasets
n_samples = 1000
n_outliers = 50
X, y, coef = datasets.make_regression(n_samples=n_samples, n_features=1,
n_informative=1, noise=10,
coef=True, random_state=0)
# Add outlier data
np.random.seed(0)
X[:n_outliers] = 3 + 0.5 * np.random.normal(size=(n_outliers, 1))
y[:n_outliers] = -3 + 10 * np.random.normal(size=n_outliers)
# Fit line using all data
model = linear_model.LinearRegression()
model.fit(X, y)
# Robustly fit linear model with RANSAC algorithm
model_ransac = linear_model.RANSACRegressor(linear_model.LinearRegression())
model_ransac.fit(X, y)
inlier_mask = model_ransac.inlier_mask_
outlier_mask = np.logical_not(inlier_mask)
# Predict data of estimated models
line_X = np.arange(-5, 5)
line_y = model.predict(line_X[:, np.newaxis])
line_y_ransac = model_ransac.predict(line_X[:, np.newaxis])
# Compare estimated coefficients
print("Estimated coefficients (true, normal, RANSAC):")
print(coef, model.coef_, model_ransac.estimator_.coef_)
lw = 2
plt.scatter(X[inlier_mask], y[inlier_mask], color='yellowgreen', marker='.',
label='Inliers')
plt.scatter(X[outlier_mask], y[outlier_mask], color='gold', marker='.',
label='Outliers')
plt.plot(line_X, line_y, color='navy', linestyle='-', linewidth=lw,
label='Linear regressor')
plt.plot(line_X, line_y_ransac, color='cornflowerblue', linestyle='-',
linewidth=lw, label='RANSAC regressor')
plt.legend(loc='lower right')
plt.show()
|
bsd-3-clause
|
nvoron23/statsmodels
|
statsmodels/tools/parallel.py
|
32
|
2180
|
"""Parallel utility function using joblib
copied from https://github.com/mne-tools/mne-python
Author: Alexandre Gramfort <gramfort@nmr.mgh.harvard.edu>
License: Simplified BSD
changes for statsmodels (Josef Perktold)
- try import from joblib directly, (doesn't import all of sklearn)
"""
from __future__ import print_function
from statsmodels.tools.sm_exceptions import (ModuleUnavailableWarning,
module_unavailable_doc)
def parallel_func(func, n_jobs, verbose=5):
"""Return parallel instance with delayed function
Util function to use joblib only if available
Parameters
----------
func: callable
A function
n_jobs: int
Number of jobs to run in parallel
verbose: int
Verbosity level
Returns
-------
parallel: instance of joblib.Parallel or list
The parallel object
my_func: callable
func if not parallel or delayed(func)
n_jobs: int
Number of jobs >= 0
Examples
--------
>>> from math import sqrt
>>> from statsmodels.tools.parallel import parallel_func
>>> parallel, p_func, n_jobs = parallel_func(sqrt, n_jobs=-1, verbose=0)
>>> print(n_jobs)
>>> parallel(p_func(i**2) for i in range(10))
"""
try:
try:
from joblib import Parallel, delayed
except ImportError:
from sklearn.externals.joblib import Parallel, delayed
parallel = Parallel(n_jobs, verbose=verbose)
my_func = delayed(func)
if n_jobs == -1:
try:
import multiprocessing
n_jobs = multiprocessing.cpu_count()
except (ImportError, NotImplementedError):
import warnings
warnings.warn(module_unavailable_doc.format('multiprocessing'),
ModuleUnavailableWarning)
n_jobs = 1
except ImportError:
import warnings
warnings.warn(module_unavailable_doc.format('joblib'),
ModuleUnavailableWarning)
n_jobs = 1
my_func = func
parallel = list
return parallel, my_func, n_jobs
|
bsd-3-clause
|
ai-se/XTREE
|
src/tools/misc.py
|
2
|
1959
|
from pandas import DataFrame, read_csv, concat
from os import walk
import numpy as np
from pdb import set_trace
import sys
def say(text):
sys.stdout.write(str(text))
def shuffle(df, n=1, axis=0):
df = df.copy()
for _ in range(n):
df.apply(np.random.shuffle, axis=axis)
return df
def csv2DF(dir, as_mtx=False, toBin=False):
files=[]
for f in dir:
df=read_csv(f)
headers = [h for h in df.columns if '?' not in h]
# set_trace()
if isinstance(df[df.columns[-1]][0], str):
df[df.columns[-1]] = DataFrame([0 if 'N' in d or 'n' in d else 1 for d in df[df.columns[-1]]])
if toBin:
df[df.columns[-1]]=DataFrame([1 if d > 0 else 0 for d in df[df.columns[-1]]])
files.append(df[headers])
"For N files in a project, use 1 to N-1 as train."
data_DF = concat(files)
if as_mtx: return data_DF.as_matrix()
else: return data_DF
def explore(dir='../Data/Jureczko/', name=None):
datasets = []
for (dirpath, dirnames, filenames) in walk(dir):
datasets.append(dirpath)
training = []
testing = []
if name:
for k in datasets[1:]:
if name in k:
if 'Jureczko' or 'mccabe' in dir:
train = [[dirPath, fname] for dirPath, _, fname in walk(k)]
test = [train[0][0] + '/' + train[0][1].pop(-1)]
# set_trace()
training = [train[0][0] + '/' + p for p in train[0][1] if not p == '.DS_Store' and '.csv' in p]
testing = test
return training, testing
elif 'Seigmund' in dir:
train = [dir+name+'/'+fname[0] for dirPath, _, fname in walk(k)]
return train
else:
for k in datasets[1:]:
train = [[dirPath, fname] for dirPath, _, fname in walk(k)]
test = [train[0][0] + '/' + train[0][1].pop(-1)]
training.append(
[train[0][0] + '/' + p for p in train[0][1] if not p == '.DS_Store'])
testing.append(test)
return training, testing
|
mit
|
afraser/CellProfiler-Analyst
|
cpa/dimensredux.py
|
1
|
27869
|
#!/usr/bin/env python
# TODO: Add not-classified data functionality, activate url links to 'About'
'''
GUI for visual dimensionality reduction of the data via various methods:
Singular Value Decomposition (Principal Component Analysis)
*A Tutorial on Principal Component Analysis - Jonathon Shlens:
http://www.snl.salk.edu/~shlens/pca.pdf
t-Distributed Stochastic Neighbor Embedding
* L.J.P. van der Maaten and G.E. Hinton. Visualizing High-Dimensional Data Using t-SNE.
Journal of Machine Learning Research 9(Nov):2579-2605, 2008.
http://jmlr.csail.mit.edu/papers/volume9/vandermaaten08a/vandermaaten08a.pdf
By: Juan Escribano Navarro (Intelligent Systems Department, Radboud Universiteit Nijmegen) - 01/05/2010
Modified By: Joris Kraak (Department of Electrical Engineering, Signal Processing Systems Group, Eindhoven University of Technology) - 28-12-2010
'''
import sys
import logging
from operator import itemgetter
import numpy as np
import wx
import wx.aui
from wx.combo import OwnerDrawnComboBox as ComboBox
from matplotlib import cm
from matplotlib.figure import Figure
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as Canvas
from matplotlib.backends.backend_wx import NavigationToolbar2Wx
from imagetools import ShowImage
from dbconnect import DBConnect
from properties import Properties
SVD = 'SVD: Singular Value Decomposition'
TSNE = 't-SNE: t-Distributed Stochastic Neighbor Embedding'
COLORS = ['g', 'r', 'g', 'g', 'g', 'b', 'b', 'b', 'darkorange', 'greenyellow', 'darkorchid', 'aqua', 'deeppink', 'sienna', 'bisque', 'cornflowerblue', 'goldenrod', 'indigo', 'gray', 'olive', 'steelblue']
class PlotPanel(wx.Panel):
'''
Principal Component Analysis (PCA) plot (PCA1 against PCA2) GUI
'''
def __init__(self, parent, id= -1, dpi=None, **kwargs):
wx.Panel.__init__(self, parent, id=id, **kwargs)
self.figure = Figure(dpi=dpi, figsize=(2, 2))
self.canvas = Canvas(self, -1, self.figure)
self.figure.set_facecolor((1, 1, 1))
self.figure.set_edgecolor((1, 1, 1))
self.canvas.SetBackgroundColour('white')
self.subplot = self.figure.add_subplot(111)
self.plot_scores = None
self.class_masks = None
self.class_names = None
self.Loadings = None
self.object_opacity = None
self.object_accuracies = None
self.leg = None
self.maskedPCA1 = None
self.maskedPCA2 = None
self.axes = None
# If the script is loaded from ClassifierGUI, load the classification weaklearners
try:
self.classifier = classifier
self.classifier_rules = classifier.algorithm.weak_learners
except:
self.classifier_rules = [('None', 0, np.array([0, 0]))]
self.chMap = p.image_channel_colors
self.toolbar = NavigationToolbar2Wx(self.canvas)
self.toolbar.Realize()
POSITION_OF_CONFIGURE_SUBPLOTS_BTN = 6
self.toolbar.DeleteToolByPos(POSITION_OF_CONFIGURE_SUBPLOTS_BTN)
self.statusBar = wx.StatusBar(self, -1)
self.statusBar.SetFieldsCount(1)
self.motion_event_active = False
self.canvas.mpl_connect('motion_notify_event', self.update_status_bar)
self.canvas.mpl_connect('button_press_event', self.on_open_image)
self.hide_legend_btn = wx.Button(self, -1, " Hide legend ")
wx.EVT_BUTTON(self.hide_legend_btn, -1, self.hide_show_legend)
self.hide_legend = True
tools_sizer = wx.BoxSizer(wx.HORIZONTAL)
tools_sizer.Add(self.toolbar, 0, wx.RIGHT | wx.EXPAND)
tools_sizer.AddSpacer((5, -1))
tools_sizer.Add(self.hide_legend_btn, 0, wx.LEFT | wx.EXPAND)
tools_sizer.AddSpacer((5, -1))
tools_sizer.Add(self.statusBar, 0, wx.LEFT | wx.EXPAND)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(self.canvas, 1, wx.EXPAND)
sizer.Add(tools_sizer, 0, wx.EXPAND)
self.SetSizer(sizer)
def set_plot_type(self, plot_scores):
'''
Set the plot type (Scores. Loadings) for each notebook page
'''
self.plot_scores = plot_scores
def set_colormap(self, class_array):
'''
Set the colormap based on the number of different classes to plot
'''
self.colormap = cm.get_cmap('hsv')
num_colors = len(class_array)
class_value = np.array(xrange(1, (num_colors + 2)), dtype='float') / num_colors
color_set = np.array(self.colormap(class_value))
return color_set
def on_open_image(self, event):
if event.button == 2 and self.plot_scores == "Scores" and event.inaxes:
self.open_image()
def open_image(self):
'''
Open the image of the selected cell in the Scores plot
'''
imViewer = ShowImage(self.actual_key[:-1], self.chMap[:],
parent=self.classifier, brightness=1.0,
contrast=None)
imViewer.imagePanel.SelectPoint(db.GetObjectCoords(self.actual_key))
def hide_show_legend(self, event):
'''
Hide or show the legend on the canvas by pressing the button
'''
if self.leg is not None:
if self.hide_legend:
self.leg.set_visible(False)
self.figure.canvas.draw()
self.hide_legend = False
self.hide_legend_btn.SetLabel(label='Show legend')
else:
self.leg.set_visible(True)
self.figure.canvas.draw()
self.hide_legend = True
self.hide_legend_btn.SetLabel(label=' Hide legend ')
def update_status_bar(self, event):
'''
Show the key for the nearest object (measured as the Euclidean distance) to the mouse pointer in the
plot (scores pdimensredux.PlotPanel.__init__dimensredux.PlotPanel.__init__lot) or the nearest feature
(loadings plot)
'''
if event.inaxes and self.motion_event_active:
x, y = event.xdata, event.ydata
if self.plot_scores == "Scores":
dist = np.hypot((x - self.Scores[:, 0]), (y - self.Scores[:, 1]))
object_dict_key = np.where(dist == np.amin(dist))
xy_key = int(object_dict_key[0][0])
if self.object_accuracies:
errorData = ', CA = %0.1f%%' % ((1-self.object_opacity[xy_key])*100.0)
else:
errorData = ''
self.statusBar.SetStatusText(("Object key = " + str(self.data_dic[xy_key]) + errorData), 0)
self.actual_key = self.data_dic[xy_key]
elif self.plot_scores == "Loadings":
dist = np.hypot((x - self.Loadings[0]), (y - self.Loadings[1]))
feature_dict_key = np.where(dist == np.amin(dist))
xy_key = int(feature_dict_key[0])
feat_text = self.features_dic[xy_key].split('_')
self.statusBar.SetStatusText(('_'.join(feat_text[1:])), 0)
def plot_pca(self):
'''
Plot the Principal Component Analysis scores (cells) and loadings (features)
along with the percentage of data variance the scores represent
'''
self.subplot.clear()
# Only obtain class data from the database if no data is available yet
if self.class_masks is None or self.class_names is None:
self.class_masks, self.class_names = self.create_class_masks()
self.data = np.nan_to_num(self.data) # Eliminate NaNs
# Calculate PCA-SVD and mask data with class information
centered = self.mean_center(self.data)
U, S, self.Loadings, explained_variance = self.pca_svd(centered, 100, True)
self.Scores = np.array(U[:, 0:2])
self.maskedPCA1, self.maskedPCA2 = self.mask_data(len(self.class_names),
self.class_masks, self.Scores)
self.axes = explained_variance[0:2]
self.color_set = self.set_colormap(self.class_names)
# Plot the first two PCAs' Scores in the Scores canvas
if self.plot_scores == "Scores":
handles = []
labels = []
# Determine the different opacities for the objects. This is set to 1 if no opacities have been specified.
if self.object_opacity is None:
self.object_opacity = np.ones([self.maskedPCA1.shape[0], 1])
self.object_accuracies = False
elif self.object_accuracies is None:
self.object_accuracies = True
opacities = np.unique(self.object_opacity)
nOpacity = len(opacities)
# For each class and opacity combination plot the corresponding objects
for i in xrange(len(self.class_names)):
cell_count = np.shape(np.nonzero(self.maskedPCA1[:, i]))
for j in xrange(nOpacity):
showObjects = np.where(self.object_opacity == opacities[j])
subHandle = self.subplot.scatter(self.maskedPCA1[showObjects[0], i], self.maskedPCA2[showObjects[0], i], 8, c=self.color_set[i, :], linewidth="0.25", alpha=0.25+0.75*opacities[j])
# The highest opacity objects are added to the legend
if opacities[j] == np.max(opacities):
handles.append(subHandle)
labels.append(self.class_names[i] + ': ' + str(cell_count[1]))
# Construct the legend and make up the rest of the plot
self.leg = self.subplot.legend(handles, labels, loc=4, fancybox=True, handlelength=1)
self.leg.get_frame().set_alpha(0.25)
x_var = round(((1 - self.axes[0]) * 100), 2)
y_var = round(((self.axes[0] - self.axes[1]) * 100), 2)
x_axe_var = 'Explained variance: ' + str(x_var) + '%'
y_axe_var = 'Explained variance: ' + str(y_var) + '%'
self.subplot.set_xlabel(x_axe_var, fontsize=12)
self.subplot.set_ylabel(y_axe_var, fontsize=12)
self.subplot.axhline(0, -100000, 100000, c='k', lw=0.1)
self.subplot.axvline(0, -100000, 100000, c='k', lw=0.1)
self.figure.canvas.draw()
elif self.plot_scores == "Loadings":
# Plot the first two PCAs' Loadings in the Loading canvas
weaklearners_mask = np.zeros((np.shape(self.Loadings[0])))
for key in self.features_dic.keys():
for value in self.classifier_rules:
if value[0] == self.features_dic[key]:
weaklearners_mask[key] += 1
scatter_mask = weaklearners_mask + 1
colors_mask = []
size_mask = []
for i in xrange(len(scatter_mask)):
colors_mask.append(COLORS[int(scatter_mask[i])])
size_mask.append((int(scatter_mask[i]) ** 2) * 5)
self.subplot.scatter(self.Loadings[0], self.Loadings[1], c=colors_mask,
s=size_mask, linewidth="0.5", marker='o')
self.subplot.axhline(0, -100000, 100000, c='k', lw=0.1)
self.subplot.axvline(0, -100000, 100000, c='k', lw=0.1)
self.figure.canvas.draw()
self.motion_event_active = True
def plot_tsne(self):
'''
Plot the t-Distributed Stochastic Neighbor Embedding (t-SNE) distribution of the data
'''
self.subplot.clear()
self.data = np.nan_to_num(self.data) # Eliminate NaNs
centered = self.mean_center(self.data)
standardized = self.standardization(centered)
# Calculate t-SNE of the data and mask it (python t-SNE version if Intel IPP is not installed)
try:
from calc_tsne import calc_tsne
U = calc_tsne(standardized, 2, 50, 20.0)
except:
logging.warning('''Could not use fast t-SNE. You may need to install the Intel Integrated Performance Libraries. Will use normal t-SNE instead.''')
try:
from tsne import tsne
U = tsne(standardized, 2, 50, 20.0)
except:
logging.error('''Both t-SNE versions failed. Your dataset may be too large for t-SNE to handle. Will not plot t-SNE results.''')
return
self.Scores = U[:, 0:2]
if self.class_masks is None or self.class_names is None:
self.class_masks, self.class_names = self.create_class_masks()
self.masked_X, self.masked_Y = self.mask_data(len(self.class_names), self.class_masks, self.Scores)
# Plot the masked t-SNE results in the Scores canvas
self.color_set = self.set_colormap(self.class_names)
handles = []
labels = []
# Determine the different opacities for the objects. This is set to 1 if no opacities have been specified.
if self.object_opacity is None:
self.object_opacity = np.ones([self.masked_X.shape[0], 1])
self.object_accuracies = False
elif self.object_accuracies is None:
self.object_accuracies = True
opacities = np.unique(self.object_opacity)
nOpacity = len(opacities)
# For each class and opacity combination plot the corresponding objects
for i in xrange(len(self.class_names)):
cell_count = np.shape(np.nonzero(self.masked_X[:, i]))
for j in xrange(nOpacity):
showObjects = np.where(self.object_opacity == opacities[j])
subHandle = self.subplot.scatter(self.masked_X[showObjects, i], self.masked_Y[showObjects, i], 8, c=self.color_set[i, :], linewidth="0.25", alpha=0.25+0.75*opacities[j])
# The highest opacity objects are added to the legend
if opacities[j] == np.max(opacities):
handles.append(subHandle)
labels.append(self.class_names[i] + ': ' + str(cell_count[1]))
self.leg = self.subplot.legend(handles, labels, loc=4, fancybox=True, handlelength=1)
self.leg.get_frame().set_alpha(0.25)
self.subplot.axhline(0, -100000, 100000, c='k', lw=0.1)
self.subplot.axvline(0, -100000, 100000, c='k', lw=0.1)
self.figure.canvas.draw()
self.motion_event_active = True
def clean_canvas(self):
self.subplot.clear()
def standardization(self, centered_data):
'''
Standardize data prior to calculation in order to improve
the performance over measurements with large differences
in their value ranges
'''
standards = np.std(centered_data, 0)
for value in standards:
if value == 0:
logging.error('Division by zero, cannot proceed (an object measurements in your dataset has 0 standard deviation, please check your database)')
standardized_data = centered_data / standards
return standardized_data
def mean_center(self, raw_data):
'''
Centering the measurements data around the mean is necessary prior to
calculation
'''
row, col = np.shape(raw_data)
centered_data = raw_data
mean_data = raw_data.mean(axis=0)
for i in xrange(row):
centered_data[i] -= mean_data
centered_data = centered_data[:,np.var(centered_data, axis=0) != 0]
return centered_data
def pca_svd(self, data, PCs=100, standardize=True):
'''
Calculate the eigenvectors of the data array using SVD
(Singular Value Decomposition) method
'''
row, col = np.shape(data)
if PCs > col:
PCs = col
if standardize:
data = self.standardization(data)
import time
U, S, V = np.linalg.svd(data, full_matrices=False)
# Calculate the percentage of data measurements variance each PCA explains
E = data.copy()
row, col = np.shape(E)
explained_variance = np.zeros((PCs))
total_explained_variance = 0
init_total_error = np.sum(np.square(E))
for k in xrange(PCs):
T = (U[:, k].reshape(row, 1)) * S[k]
V_t = np.transpose(V)
P = V_t[:, k].reshape(col, 1)
E = E - T * (np.transpose(P))
total_error = np.sum(np.square(E))
total_object_residual_variance = (total_error / init_total_error)
explained_variance[k] = 1 - total_object_residual_variance - total_explained_variance
total_explained_variance += explained_variance[k]
return U, S, V, explained_variance
def create_class_masks(self):
'''
Create class masks for the data based on the classification data from CPAnalyst.
This is done in order to print Scoring plots with points in different colors
for each class
'''
class_data = db.execute('SELECT class, class_number FROM %s ' \
'ORDER BY %s ASC, %s ASC' % (p.class_table, \
p.image_id, p.object_id))
class_name_number = set([result for result in class_data])
class_name_number = sorted(class_name_number, key=itemgetter(1))
class_names = [item[0] for item in class_name_number]
class_number = np.array([result[1] for result in class_data])
num_classes = len(class_names)
# In case class numbers are missing in the range (for instance some classes
# that were available to train objects in have no objects classified in them)
# the class numbers should be remapped
class_ids = [item[1] for item in class_name_number]
max_id = np.max(class_ids)
if len(class_ids) != max_id:
logging.info('Found non-consecutive class IDs. Remapping class IDs.')
missing_ids = np.flipud(np.setdiff1d(np.arange(max_id)+1, class_ids))
while missing_ids.shape != (0,):
indices = class_number >= missing_ids[0]
class_number[indices] -= 1
missing_ids = np.delete(missing_ids, 0)
class_masks = np.zeros((len(class_number), num_classes))
for i in range(len(class_number)):
class_col = class_number[i] - 1
class_masks[i, class_col] = 1
return class_masks, class_names
def mask_data(self, num_classes, class_masks, Scores):
'''
Mask the Score matrixes using the masks from create_class_mask
'''
row = np.size(Scores[:, 0])
col = num_classes
masked_data_X = np.zeros((row, col))
masked_data_Y = np.zeros((row, col))
for i in xrange(num_classes):
masked_data_X[:, i] = Scores[:, 0] * class_masks[:, i]
masked_data_Y[:, i] = Scores[:, 1] * class_masks[:, i]
return masked_data_X, masked_data_Y
class PlotControl(wx.Panel):
'''
Control panel for the dimensionality reduction analysis
'''
def __init__(self, parent, fig_sco, fig_load, **kwargs):
wx.Panel.__init__(self, parent, **kwargs)
self.fig_sco = fig_sco
self.fig_load = fig_load
sizer = wx.BoxSizer(wx.VERTICAL)
self.method_choice = ComboBox(self, -1, choices=[SVD, TSNE], style=wx.CB_READONLY)
self.method_choice.Select(0)
self.update_chart_btn = wx.Button(self, -1, "Show plot")
self.help_btn = wx.Button(self, -1, "About")
sz = wx.BoxSizer(wx.HORIZONTAL)
sz.Add(wx.StaticText(self, -1, "Method:"))
sz.AddSpacer((5, -1))
sz.Add(self.method_choice, 1, wx.EXPAND)
sizer.Add(sz, 1, wx.EXPAND)
sizer.AddSpacer((-1, 5))
sz2 = wx.BoxSizer(wx.HORIZONTAL)
sz2.Add(self.help_btn, wx.LEFT)
sz2.AddSpacer((400, -1))
sz2.Add(self.update_chart_btn, wx.RIGHT)
sizer.Add(sz2, 1, wx.EXPAND)
sizer.AddSpacer((-1, 5))
wx.EVT_BUTTON(self.update_chart_btn, -1, self.on_show_pressed)
wx.EVT_BUTTON(self.help_btn, -1, self.on_show_about)
self.SetSizer(sizer)
self.Show(1)
def on_show_about(self, evt):
'''
Shows a message box with the version number etc.
'''
message = ('Dimensionality Reduction Plot was developed at the Intelligent Systems Dept., '
'Radboud Universiteit Nijmegen as part of the CellProfiler project and is'
' distributed under the GNU General Public License version 2.\n'
'\n'
'For more information about the dimensionality reduction algorithms check:\n'
'\n'
'*Singular Value Decomposition: http://www.snl.salk.edu/~shlens/pca.pdf\n'
'\n'
'*t-SNE: http://homepage.tudelft.nl/19j49/t-SNE.html\n')
dlg = wx.MessageDialog(self, message, 'CellProfiler Analyst 2.0', style=wx.OK | wx.ICON_INFORMATION)
dlg.ShowModal()
def on_show_pressed(self, evt):
'''
Show the selected dimensionality reduction plot on the canvas
'''
selected_method = self.method_choice.GetStringSelection()
if selected_method == SVD:
self.fig_sco.plot_pca()
self.fig_load.plot_pca()
elif selected_method == TSNE:
self.fig_sco.plot_tsne()
self.fig_load.clean_canvas()
class PlotNotebook(wx.Panel):
'''
A simple wx notebook to create different tabs for Scores and Loadings plot
'''
def __init__(self, parent, id= -1):
wx.Panel.__init__(self, parent, id=id)
self.nb = wx.aui.AuiNotebook(self)
sizer = wx.BoxSizer()
sizer.Add(self.nb, 1, wx.EXPAND)
self.SetSizer(sizer)
def add(self, name):
page = PlotPanel(self.nb)
self.nb.AddPage(page, name)
return page
class StopCalculating:
pass
class PlotMain(wx.Frame):
'''
Dimensionality reduction GUI main frame
'''
def __init__(self, parent, properties = None, show_controls = True, size=(600, 600), loadData = True, **kwargs):
wx.Frame.__init__(self, parent, -1, size=size, title='Dimensionality Reduction Plot', **kwargs)
self.SetName('Plot main')
if properties is not None:
global p
p = properties
if not p.is_initialized():
logging.critical('Classifier requires a properties file. Exiting.')
raise Exception('Classifier requires a properties file. Exiting.')
global db
db = DBConnect.getInstance()
global classifier
classifier = parent
if loadData:
# Define a progress dialog
dlg = wx.ProgressDialog('Fetching cell data...', '0% Complete', 100, classifier,
wx.PD_ELAPSED_TIME | wx.PD_ESTIMATED_TIME |
wx.PD_REMAINING_TIME | wx.PD_CAN_ABORT)
def cb(frac):
cont, skip = dlg.Update(int(frac * 100.), '%d%% Complete'%(frac * 100.))
if not cont: # cancel was pressed
dlg.Destroy()
raise StopCalculating()
# Load the data for each object
try:
self.data, self.data_dic = self.load_obj_measurements(cb)
except StopCalculating:
self.PostMessage('User canceled updating training set.')
return
dlg.Destroy()
else:
self.data, self.data_dic = None, None
self.features_dic = self.load_feature_names()
self.class_masks = None
self.class_names = None
self.object_opacity = None
figpanel = PlotNotebook(self)
self.figure_scores = figpanel.add('Scores')
self.figure_loadings = figpanel.add('Loadings')
self.update_figures()
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(figpanel, 1, wx.EXPAND)
configpanel = PlotControl(self, self.figure_scores, self.figure_loadings)
sizer.Add(configpanel, 0, wx.EXPAND | wx.ALL, 5)
self.SetSizer(sizer)
self.Centre()
def load_obj_measurements(self, cb = None):
'''
Load all cell measurements from the DB into a Numpy array and a dictionary
The dictionary links each object to its key to show it when the mouse is on
its dot representation in the plot.
'''
self.filter_col_names(p.object_table)
all_keys = map(db.GetObjectsFromImage, db.GetAllImageKeys())
obj_counts = db.GetPerImageObjectCounts()
total_obj_count = sum(k[1] for k in obj_counts)
for key in all_keys:
if key:
measurements = len(db.GetCellDataForClassifier(key[0]))
break
data = np.zeros((total_obj_count, measurements))
data_dic = {}
key_list = [key for image_keys in all_keys for key in image_keys]
nKeys = float(len(key_list))
for index, key in enumerate(key_list):
cb(index / nKeys)
data[index, :] = db.GetCellDataForClassifier(key)
data_dic[index] = key
return data, data_dic
def load_feature_names(self):
'''
Load feature names for loadings plot.
'''
feature_names = db.GetColnamesForClassifier()
features_dictionary = {}
for i, feat in enumerate(feature_names):
features_dictionary[i] = feat
return features_dictionary
def set_data(self, data, data_dic, class_masks, class_names, object_opacity=None):
'''
Stores data to be used in the dimensionality reduction process
'''
self.data = data
self.data_dic = data_dic
self.class_masks = class_masks
self.class_names = class_names
self.object_opacity = object_opacity
self.update_figures();
def filter_col_names(self, table):
'''
Add DB non-measurement column names to the 'ignore colums' list.
This is performed to avoid using its data for the calculations
'''
col_names = db.GetColumnNames(table)
filter_cols = [p.cell_x_loc, p.cell_y_loc, p.plate_id, p.well_id, p.image_id]
if not p.classifier_ignore_columns:
p.classifier_ignore_columns = []
[p.classifier_ignore_columns.append(column) for column in filter_cols if column in col_names]
def update_figures(self):
self.figure_scores.data, self.figure_scores.data_dic = self.data, self.data_dic
self.figure_scores.class_masks, self.figure_scores.class_names = self.class_masks, self.class_names
self.figure_scores.object_opacity = self.object_opacity
self.figure_scores.set_plot_type("Scores")
self.figure_loadings.data, self.figure_loadings.data_dic = self.data, self.data_dic
self.figure_loadings.class_masks, self.figure_loadings.class_names = self.class_masks, self.class_names
self.figure_loadings.object_opacity = self.object_opacity
self.figure_loadings.features_dic = self.features_dic
self.figure_loadings.set_plot_type("Loadings")
if __name__ == "__main__":
app = wx.PySimpleApp()
logging.basicConfig(level=logging.INFO,)
global p
p = Properties.getInstance()
global db
db = DBConnect.getInstance()
try:
propsFile = sys.argv[1]
p.LoadFile(propsFile)
except:
if not p.show_load_dialog():
raise Exception("DimensRedux.py needs a CPAnalyst properties file passed as args. Exiting...")
sys.exit()
pca_main = PlotMain(None)
pca_main.Show()
app.MainLoop()
|
gpl-2.0
|
datapythonista/pandas
|
pandas/tests/io/formats/test_console.py
|
9
|
2460
|
import locale
import pytest
from pandas._config import detect_console_encoding
class MockEncoding: # TODO(py27): replace with mock
"""
Used to add a side effect when accessing the 'encoding' property. If the
side effect is a str in nature, the value will be returned. Otherwise, the
side effect should be an exception that will be raised.
"""
def __init__(self, encoding):
super().__init__()
self.val = encoding
@property
def encoding(self):
return self.raise_or_return(self.val)
@staticmethod
def raise_or_return(val):
if isinstance(val, str):
return val
else:
raise val
@pytest.mark.parametrize("empty,filled", [["stdin", "stdout"], ["stdout", "stdin"]])
def test_detect_console_encoding_from_stdout_stdin(monkeypatch, empty, filled):
# Ensures that when sys.stdout.encoding or sys.stdin.encoding is used when
# they have values filled.
# GH 21552
with monkeypatch.context() as context:
context.setattr(f"sys.{empty}", MockEncoding(""))
context.setattr(f"sys.{filled}", MockEncoding(filled))
assert detect_console_encoding() == filled
@pytest.mark.parametrize("encoding", [AttributeError, IOError, "ascii"])
def test_detect_console_encoding_fallback_to_locale(monkeypatch, encoding):
# GH 21552
with monkeypatch.context() as context:
context.setattr("locale.getpreferredencoding", lambda: "foo")
context.setattr("sys.stdout", MockEncoding(encoding))
assert detect_console_encoding() == "foo"
@pytest.mark.parametrize(
"std,locale",
[
["ascii", "ascii"],
["ascii", locale.Error],
[AttributeError, "ascii"],
[AttributeError, locale.Error],
[IOError, "ascii"],
[IOError, locale.Error],
],
)
def test_detect_console_encoding_fallback_to_default(monkeypatch, std, locale):
# When both the stdout/stdin encoding and locale preferred encoding checks
# fail (or return 'ascii', we should default to the sys default encoding.
# GH 21552
with monkeypatch.context() as context:
context.setattr(
"locale.getpreferredencoding", lambda: MockEncoding.raise_or_return(locale)
)
context.setattr("sys.stdout", MockEncoding(std))
context.setattr("sys.getdefaultencoding", lambda: "sysDefaultEncoding")
assert detect_console_encoding() == "sysDefaultEncoding"
|
bsd-3-clause
|
victor-prado/broker-manager
|
environment/lib/python3.5/site-packages/pandas/tests/test_compat.py
|
9
|
2455
|
# -*- coding: utf-8 -*-
"""
Testing that functions from compat work as expected
"""
from pandas.compat import (range, zip, map, filter, lrange, lzip, lmap,
lfilter, builtins, iterkeys, itervalues, iteritems,
next)
import pandas.util.testing as tm
class TestBuiltinIterators(tm.TestCase):
def check_result(self, actual, expected, lengths):
for (iter_res, list_res), exp, length in zip(actual, expected,
lengths):
self.assertNotIsInstance(iter_res, list)
tm.assertIsInstance(list_res, list)
iter_res = list(iter_res)
self.assertEqual(len(list_res), length)
self.assertEqual(len(iter_res), length)
self.assertEqual(iter_res, exp)
self.assertEqual(list_res, exp)
def test_range(self):
actual1 = range(10)
actual2 = lrange(10)
actual = [actual1, actual2],
expected = list(builtins.range(10)),
lengths = 10,
actual1 = range(1, 10, 2)
actual2 = lrange(1, 10, 2)
actual += [actual1, actual2],
lengths += 5,
expected += list(builtins.range(1, 10, 2)),
self.check_result(actual, expected, lengths)
def test_map(self):
func = lambda x, y, z: x + y + z
lst = [builtins.range(10), builtins.range(10), builtins.range(10)]
actual1 = map(func, *lst)
actual2 = lmap(func, *lst)
actual = [actual1, actual2],
expected = list(builtins.map(func, *lst)),
lengths = 10,
self.check_result(actual, expected, lengths)
def test_filter(self):
func = lambda x: x
lst = list(builtins.range(10))
actual1 = filter(func, lst)
actual2 = lfilter(func, lst)
actual = [actual1, actual2],
lengths = 9,
expected = list(builtins.filter(func, lst)),
self.check_result(actual, expected, lengths)
def test_zip(self):
lst = [builtins.range(10), builtins.range(10), builtins.range(10)]
actual = [zip(*lst), lzip(*lst)],
expected = list(builtins.zip(*lst)),
lengths = 10,
self.check_result(actual, expected, lengths)
def test_dict_iterators(self):
self.assertEqual(next(itervalues({1: 2})), 2)
self.assertEqual(next(iterkeys({1: 2})), 1)
self.assertEqual(next(iteritems({1: 2})), (1, 2))
|
mit
|
potash/scikit-learn
|
sklearn/tests/test_multiclass.py
|
8
|
25524
|
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raise_message
from sklearn.multiclass import OneVsRestClassifier
from sklearn.multiclass import OneVsOneClassifier
from sklearn.multiclass import OutputCodeClassifier
from sklearn.utils.multiclass import check_classification_targets, type_of_target
from sklearn.utils import shuffle
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.svm import LinearSVC, SVC
from sklearn.naive_bayes import MultinomialNB
from sklearn.linear_model import (LinearRegression, Lasso, ElasticNet, Ridge,
Perceptron, LogisticRegression)
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.model_selection import GridSearchCV, cross_val_score
from sklearn.pipeline import Pipeline
from sklearn import svm
from sklearn import datasets
from sklearn.externals.six.moves import zip
iris = datasets.load_iris()
rng = np.random.RandomState(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
n_classes = 3
def test_ovr_exceptions():
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
assert_raises(ValueError, ovr.predict, [])
# Fail on multioutput data
assert_raises(ValueError, OneVsRestClassifier(MultinomialNB()).fit,
np.array([[1, 0], [0, 1]]),
np.array([[1, 2], [3, 1]]))
assert_raises(ValueError, OneVsRestClassifier(MultinomialNB()).fit,
np.array([[1, 0], [0, 1]]),
np.array([[1.5, 2.4], [3.1, 0.8]]))
def test_check_classification_targets():
# Test that check_classification_target return correct type. #5782
y = np.array([0.0, 1.1, 2.0, 3.0])
msg = type_of_target(y)
assert_raise_message(ValueError, msg, check_classification_targets, y)
def test_ovr_fit_predict():
# A classifier which implements decision_function.
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovr.estimators_), n_classes)
clf = LinearSVC(random_state=0)
pred2 = clf.fit(iris.data, iris.target).predict(iris.data)
assert_equal(np.mean(iris.target == pred), np.mean(iris.target == pred2))
# A classifier which implements predict_proba.
ovr = OneVsRestClassifier(MultinomialNB())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_greater(np.mean(iris.target == pred), 0.65)
def test_ovr_partial_fit():
# Test if partial_fit is working as intented
X, y = shuffle(iris.data, iris.target, random_state=0)
ovr = OneVsRestClassifier(MultinomialNB())
ovr.partial_fit(X[:100], y[:100], np.unique(y))
ovr.partial_fit(X[100:], y[100:])
pred = ovr.predict(X)
ovr2 = OneVsRestClassifier(MultinomialNB())
pred2 = ovr2.fit(X, y).predict(X)
assert_almost_equal(pred, pred2)
assert_equal(len(ovr.estimators_), len(np.unique(y)))
assert_greater(np.mean(y == pred), 0.65)
# Test when mini batches doesn't have all classes
ovr = OneVsRestClassifier(MultinomialNB())
ovr.partial_fit(iris.data[:60], iris.target[:60], np.unique(iris.target))
ovr.partial_fit(iris.data[60:], iris.target[60:])
pred = ovr.predict(iris.data)
ovr2 = OneVsRestClassifier(MultinomialNB())
pred2 = ovr2.fit(iris.data, iris.target).predict(iris.data)
assert_almost_equal(pred, pred2)
assert_equal(len(ovr.estimators_), len(np.unique(iris.target)))
assert_greater(np.mean(iris.target == pred), 0.65)
def test_ovr_ovo_regressor():
# test that ovr and ovo work on regressors which don't have a decision_function
ovr = OneVsRestClassifier(DecisionTreeRegressor())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovr.estimators_), n_classes)
assert_array_equal(np.unique(pred), [0, 1, 2])
# we are doing something sensible
assert_greater(np.mean(pred == iris.target), .9)
ovr = OneVsOneClassifier(DecisionTreeRegressor())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovr.estimators_), n_classes * (n_classes - 1) / 2)
assert_array_equal(np.unique(pred), [0, 1, 2])
# we are doing something sensible
assert_greater(np.mean(pred == iris.target), .9)
def test_ovr_fit_predict_sparse():
for sparse in [sp.csr_matrix, sp.csc_matrix, sp.coo_matrix, sp.dok_matrix,
sp.lil_matrix]:
base_clf = MultinomialNB(alpha=1)
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=True,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
Y_pred = clf.predict(X_test)
clf_sprs = OneVsRestClassifier(base_clf).fit(X_train, sparse(Y_train))
Y_pred_sprs = clf_sprs.predict(X_test)
assert_true(clf.multilabel_)
assert_true(sp.issparse(Y_pred_sprs))
assert_array_equal(Y_pred_sprs.toarray(), Y_pred)
# Test predict_proba
Y_proba = clf_sprs.predict_proba(X_test)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = Y_proba > .5
assert_array_equal(pred, Y_pred_sprs.toarray())
# Test decision_function
clf_sprs = OneVsRestClassifier(svm.SVC()).fit(X_train, sparse(Y_train))
dec_pred = (clf_sprs.decision_function(X_test) > 0).astype(int)
assert_array_equal(dec_pred, clf_sprs.predict(X_test).toarray())
def test_ovr_always_present():
# Test that ovr works with classes that are always present or absent.
# Note: tests is the case where _ConstantPredictor is utilised
X = np.ones((10, 2))
X[:5, :] = 0
# Build an indicator matrix where two features are always on.
# As list of lists, it would be: [[int(i >= 5), 2, 3] for i in range(10)]
y = np.zeros((10, 3))
y[5:, 0] = 1
y[:, 1] = 1
y[:, 2] = 1
ovr = OneVsRestClassifier(LogisticRegression())
assert_warns(UserWarning, ovr.fit, X, y)
y_pred = ovr.predict(X)
assert_array_equal(np.array(y_pred), np.array(y))
y_pred = ovr.decision_function(X)
assert_equal(np.unique(y_pred[:, -2:]), 1)
y_pred = ovr.predict_proba(X)
assert_array_equal(y_pred[:, -1], np.ones(X.shape[0]))
# y has a constantly absent label
y = np.zeros((10, 2))
y[5:, 0] = 1 # variable label
ovr = OneVsRestClassifier(LogisticRegression())
assert_warns(UserWarning, ovr.fit, X, y)
y_pred = ovr.predict_proba(X)
assert_array_equal(y_pred[:, -1], np.zeros(X.shape[0]))
def test_ovr_multiclass():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 0, 5], [0, 5, 0], [3, 0, 0], [0, 0, 6], [6, 0, 0]])
y = ["eggs", "spam", "ham", "eggs", "ham"]
Y = np.array([[0, 0, 1],
[0, 1, 0],
[1, 0, 0],
[0, 0, 1],
[1, 0, 0]])
classes = set("ham eggs spam".split())
for base_clf in (MultinomialNB(), LinearSVC(random_state=0),
LinearRegression(), Ridge(),
ElasticNet()):
clf = OneVsRestClassifier(base_clf).fit(X, y)
assert_equal(set(clf.classes_), classes)
y_pred = clf.predict(np.array([[0, 0, 4]]))[0]
assert_equal(set(y_pred), set("eggs"))
# test input as label indicator matrix
clf = OneVsRestClassifier(base_clf).fit(X, Y)
y_pred = clf.predict([[0, 0, 4]])[0]
assert_array_equal(y_pred, [0, 0, 1])
def test_ovr_binary():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 0, 5], [0, 5, 0], [3, 0, 0], [0, 0, 6], [6, 0, 0]])
y = ["eggs", "spam", "spam", "eggs", "spam"]
Y = np.array([[0, 1, 1, 0, 1]]).T
classes = set("eggs spam".split())
def conduct_test(base_clf, test_predict_proba=False):
clf = OneVsRestClassifier(base_clf).fit(X, y)
assert_equal(set(clf.classes_), classes)
y_pred = clf.predict(np.array([[0, 0, 4]]))[0]
assert_equal(set(y_pred), set("eggs"))
if test_predict_proba:
X_test = np.array([[0, 0, 4]])
probabilities = clf.predict_proba(X_test)
assert_equal(2, len(probabilities[0]))
assert_equal(clf.classes_[np.argmax(probabilities, axis=1)],
clf.predict(X_test))
# test input as label indicator matrix
clf = OneVsRestClassifier(base_clf).fit(X, Y)
y_pred = clf.predict([[3, 0, 0]])[0]
assert_equal(y_pred, 1)
for base_clf in (LinearSVC(random_state=0), LinearRegression(),
Ridge(), ElasticNet()):
conduct_test(base_clf)
for base_clf in (MultinomialNB(), SVC(probability=True),
LogisticRegression()):
conduct_test(base_clf, test_predict_proba=True)
def test_ovr_multilabel():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 4, 5], [0, 5, 0], [3, 3, 3], [4, 0, 6], [6, 0, 0]])
y = np.array([[0, 1, 1],
[0, 1, 0],
[1, 1, 1],
[1, 0, 1],
[1, 0, 0]])
for base_clf in (MultinomialNB(), LinearSVC(random_state=0),
LinearRegression(), Ridge(),
ElasticNet(), Lasso(alpha=0.5)):
clf = OneVsRestClassifier(base_clf).fit(X, y)
y_pred = clf.predict([[0, 4, 4]])[0]
assert_array_equal(y_pred, [0, 1, 1])
assert_true(clf.multilabel_)
def test_ovr_fit_predict_svc():
ovr = OneVsRestClassifier(svm.SVC())
ovr.fit(iris.data, iris.target)
assert_equal(len(ovr.estimators_), 3)
assert_greater(ovr.score(iris.data, iris.target), .9)
def test_ovr_multilabel_dataset():
base_clf = MultinomialNB(alpha=1)
for au, prec, recall in zip((True, False), (0.51, 0.66), (0.51, 0.80)):
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=2,
length=50,
allow_unlabeled=au,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test, Y_test = X[80:], Y[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
Y_pred = clf.predict(X_test)
assert_true(clf.multilabel_)
assert_almost_equal(precision_score(Y_test, Y_pred, average="micro"),
prec,
decimal=2)
assert_almost_equal(recall_score(Y_test, Y_pred, average="micro"),
recall,
decimal=2)
def test_ovr_multilabel_predict_proba():
base_clf = MultinomialNB(alpha=1)
for au in (False, True):
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=au,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
# decision function only estimator. Fails in current implementation.
decision_only = OneVsRestClassifier(svm.SVR()).fit(X_train, Y_train)
assert_raises(AttributeError, decision_only.predict_proba, X_test)
# Estimator with predict_proba disabled, depending on parameters.
decision_only = OneVsRestClassifier(svm.SVC(probability=False))
decision_only.fit(X_train, Y_train)
assert_raises(AttributeError, decision_only.predict_proba, X_test)
Y_pred = clf.predict(X_test)
Y_proba = clf.predict_proba(X_test)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = Y_proba > .5
assert_array_equal(pred, Y_pred)
def test_ovr_single_label_predict_proba():
base_clf = MultinomialNB(alpha=1)
X, Y = iris.data, iris.target
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
# decision function only estimator. Fails in current implementation.
decision_only = OneVsRestClassifier(svm.SVR()).fit(X_train, Y_train)
assert_raises(AttributeError, decision_only.predict_proba, X_test)
Y_pred = clf.predict(X_test)
Y_proba = clf.predict_proba(X_test)
assert_almost_equal(Y_proba.sum(axis=1), 1.0)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = np.array([l.argmax() for l in Y_proba])
assert_false((pred - Y_pred).any())
def test_ovr_multilabel_decision_function():
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=True,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(svm.SVC()).fit(X_train, Y_train)
assert_array_equal((clf.decision_function(X_test) > 0).astype(int),
clf.predict(X_test))
def test_ovr_single_label_decision_function():
X, Y = datasets.make_classification(n_samples=100,
n_features=20,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(svm.SVC()).fit(X_train, Y_train)
assert_array_equal(clf.decision_function(X_test).ravel() > 0,
clf.predict(X_test))
def test_ovr_gridsearch():
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ovr, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
def test_ovr_pipeline():
# Test with pipeline of length one
# This test is needed because the multiclass estimators may fail to detect
# the presence of predict_proba or decision_function.
clf = Pipeline([("tree", DecisionTreeClassifier())])
ovr_pipe = OneVsRestClassifier(clf)
ovr_pipe.fit(iris.data, iris.target)
ovr = OneVsRestClassifier(DecisionTreeClassifier())
ovr.fit(iris.data, iris.target)
assert_array_equal(ovr.predict(iris.data), ovr_pipe.predict(iris.data))
def test_ovr_coef_():
for base_classifier in [SVC(kernel='linear', random_state=0), LinearSVC(random_state=0)]:
# SVC has sparse coef with sparse input data
ovr = OneVsRestClassifier(base_classifier)
for X in [iris.data, sp.csr_matrix(iris.data)]:
# test with dense and sparse coef
ovr.fit(X, iris.target)
shape = ovr.coef_.shape
assert_equal(shape[0], n_classes)
assert_equal(shape[1], iris.data.shape[1])
# don't densify sparse coefficients
assert_equal(sp.issparse(ovr.estimators_[0].coef_), sp.issparse(ovr.coef_))
def test_ovr_coef_exceptions():
# Not fitted exception!
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
# lambda is needed because we don't want coef_ to be evaluated right away
assert_raises(ValueError, lambda x: ovr.coef_, None)
# Doesn't have coef_ exception!
ovr = OneVsRestClassifier(DecisionTreeClassifier())
ovr.fit(iris.data, iris.target)
assert_raises(AttributeError, lambda x: ovr.coef_, None)
def test_ovo_exceptions():
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
assert_raises(ValueError, ovo.predict, [])
def test_ovo_fit_on_list():
# Test that OneVsOne fitting works with a list of targets and yields the
# same output as predict from an array
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
prediction_from_array = ovo.fit(iris.data, iris.target).predict(iris.data)
iris_data_list = [list(a) for a in iris.data]
prediction_from_list = ovo.fit(iris_data_list,
list(iris.target)).predict(iris_data_list)
assert_array_equal(prediction_from_array, prediction_from_list)
def test_ovo_fit_predict():
# A classifier which implements decision_function.
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
ovo.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovo.estimators_), n_classes * (n_classes - 1) / 2)
# A classifier which implements predict_proba.
ovo = OneVsOneClassifier(MultinomialNB())
ovo.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovo.estimators_), n_classes * (n_classes - 1) / 2)
def test_ovo_partial_fit_predict():
X, y = shuffle(iris.data, iris.target)
ovo1 = OneVsOneClassifier(MultinomialNB())
ovo1.partial_fit(X[:100], y[:100], np.unique(y))
ovo1.partial_fit(X[100:], y[100:])
pred1 = ovo1.predict(X)
ovo2 = OneVsOneClassifier(MultinomialNB())
ovo2.fit(X, y)
pred2 = ovo2.predict(X)
assert_equal(len(ovo1.estimators_), n_classes * (n_classes - 1) / 2)
assert_greater(np.mean(y == pred1), 0.65)
assert_almost_equal(pred1, pred2)
# Test when mini-batches don't have all target classes
ovo1 = OneVsOneClassifier(MultinomialNB())
ovo1.partial_fit(iris.data[:60], iris.target[:60], np.unique(iris.target))
ovo1.partial_fit(iris.data[60:], iris.target[60:])
pred1 = ovo1.predict(iris.data)
ovo2 = OneVsOneClassifier(MultinomialNB())
pred2 = ovo2.fit(iris.data, iris.target).predict(iris.data)
assert_almost_equal(pred1, pred2)
assert_equal(len(ovo1.estimators_), len(np.unique(iris.target)))
assert_greater(np.mean(iris.target == pred1), 0.65)
def test_ovo_decision_function():
n_samples = iris.data.shape[0]
ovo_clf = OneVsOneClassifier(LinearSVC(random_state=0))
ovo_clf.fit(iris.data, iris.target)
decisions = ovo_clf.decision_function(iris.data)
assert_equal(decisions.shape, (n_samples, n_classes))
assert_array_equal(decisions.argmax(axis=1), ovo_clf.predict(iris.data))
# Compute the votes
votes = np.zeros((n_samples, n_classes))
k = 0
for i in range(n_classes):
for j in range(i + 1, n_classes):
pred = ovo_clf.estimators_[k].predict(iris.data)
votes[pred == 0, i] += 1
votes[pred == 1, j] += 1
k += 1
# Extract votes and verify
assert_array_equal(votes, np.round(decisions))
for class_idx in range(n_classes):
# For each sample and each class, there only 3 possible vote levels
# because they are only 3 distinct class pairs thus 3 distinct
# binary classifiers.
# Therefore, sorting predictions based on votes would yield
# mostly tied predictions:
assert_true(set(votes[:, class_idx]).issubset(set([0., 1., 2.])))
# The OVO decision function on the other hand is able to resolve
# most of the ties on this data as it combines both the vote counts
# and the aggregated confidence levels of the binary classifiers
# to compute the aggregate decision function. The iris dataset
# has 150 samples with a couple of duplicates. The OvO decisions
# can resolve most of the ties:
assert_greater(len(np.unique(decisions[:, class_idx])), 146)
def test_ovo_gridsearch():
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ovo, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
def test_ovo_ties():
# Test that ties are broken using the decision function,
# not defaulting to the smallest label
X = np.array([[1, 2], [2, 1], [-2, 1], [-2, -1]])
y = np.array([2, 0, 1, 2])
multi_clf = OneVsOneClassifier(Perceptron(shuffle=False))
ovo_prediction = multi_clf.fit(X, y).predict(X)
ovo_decision = multi_clf.decision_function(X)
# Classifiers are in order 0-1, 0-2, 1-2
# Use decision_function to compute the votes and the normalized
# sum_of_confidences, which is used to disambiguate when there is a tie in
# votes.
votes = np.round(ovo_decision)
normalized_confidences = ovo_decision - votes
# For the first point, there is one vote per class
assert_array_equal(votes[0, :], 1)
# For the rest, there is no tie and the prediction is the argmax
assert_array_equal(np.argmax(votes[1:], axis=1), ovo_prediction[1:])
# For the tie, the prediction is the class with the highest score
assert_equal(ovo_prediction[0], normalized_confidences[0].argmax())
def test_ovo_ties2():
# test that ties can not only be won by the first two labels
X = np.array([[1, 2], [2, 1], [-2, 1], [-2, -1]])
y_ref = np.array([2, 0, 1, 2])
# cycle through labels so that each label wins once
for i in range(3):
y = (y_ref + i) % 3
multi_clf = OneVsOneClassifier(Perceptron(shuffle=False))
ovo_prediction = multi_clf.fit(X, y).predict(X)
assert_equal(ovo_prediction[0], i % 3)
def test_ovo_string_y():
# Test that the OvO doesn't mess up the encoding of string labels
X = np.eye(4)
y = np.array(['a', 'b', 'c', 'd'])
ovo = OneVsOneClassifier(LinearSVC())
ovo.fit(X, y)
assert_array_equal(y, ovo.predict(X))
def test_ecoc_exceptions():
ecoc = OutputCodeClassifier(LinearSVC(random_state=0))
assert_raises(ValueError, ecoc.predict, [])
def test_ecoc_fit_predict():
# A classifier which implements decision_function.
ecoc = OutputCodeClassifier(LinearSVC(random_state=0),
code_size=2, random_state=0)
ecoc.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ecoc.estimators_), n_classes * 2)
# A classifier which implements predict_proba.
ecoc = OutputCodeClassifier(MultinomialNB(), code_size=2, random_state=0)
ecoc.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ecoc.estimators_), n_classes * 2)
def test_ecoc_gridsearch():
ecoc = OutputCodeClassifier(LinearSVC(random_state=0),
random_state=0)
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ecoc, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
def test_pairwise_indices():
clf_precomputed = svm.SVC(kernel='precomputed')
X, y = iris.data, iris.target
ovr_false = OneVsOneClassifier(clf_precomputed)
linear_kernel = np.dot(X, X.T)
ovr_false.fit(linear_kernel, y)
n_estimators = len(ovr_false.estimators_)
precomputed_indices = ovr_false.pairwise_indices_
for idx in precomputed_indices:
assert_equal(idx.shape[0] * n_estimators / (n_estimators - 1),
linear_kernel.shape[0])
def test_pairwise_attribute():
clf_precomputed = svm.SVC(kernel='precomputed')
clf_notprecomputed = svm.SVC()
for MultiClassClassifier in [OneVsRestClassifier, OneVsOneClassifier]:
ovr_false = MultiClassClassifier(clf_notprecomputed)
assert_false(ovr_false._pairwise)
ovr_true = MultiClassClassifier(clf_precomputed)
assert_true(ovr_true._pairwise)
def test_pairwise_cross_val_score():
clf_precomputed = svm.SVC(kernel='precomputed')
clf_notprecomputed = svm.SVC(kernel='linear')
X, y = iris.data, iris.target
for MultiClassClassifier in [OneVsRestClassifier, OneVsOneClassifier]:
ovr_false = MultiClassClassifier(clf_notprecomputed)
ovr_true = MultiClassClassifier(clf_precomputed)
linear_kernel = np.dot(X, X.T)
score_precomputed = cross_val_score(ovr_true, linear_kernel, y)
score_linear = cross_val_score(ovr_false, X, y)
assert_array_equal(score_precomputed, score_linear)
|
bsd-3-clause
|
ashhher3/pylearn2
|
pylearn2/cross_validation/tests/test_subset_iterators.py
|
49
|
2411
|
"""
Test subset iterators.
"""
import numpy as np
from pylearn2.testing.skip import skip_if_no_sklearn
def test_validation_k_fold():
"""Test ValidationKFold."""
skip_if_no_sklearn()
from pylearn2.cross_validation.subset_iterators import ValidationKFold
n = 30
# test with indices
cv = ValidationKFold(n)
for train, valid, test in cv:
assert np.unique(np.concatenate((train, valid, test))).size == n
assert valid.size == n / cv.n_folds
assert test.size == n / cv.n_folds
def test_stratified_validation_k_fold():
"""Test StratifiedValidationKFold."""
skip_if_no_sklearn()
from pylearn2.cross_validation.subset_iterators import (
StratifiedValidationKFold)
n = 30
y = np.concatenate((np.zeros(n / 2, dtype=int), np.ones(n / 2, dtype=int)))
# test with indices
cv = StratifiedValidationKFold(y)
for train, valid, test in cv:
assert np.unique(np.concatenate((train, valid, test))).size == n
assert valid.size == n / cv.n_folds
assert test.size == n / cv.n_folds
assert np.count_nonzero(y[valid]) == (n / 2) * (1. / cv.n_folds)
assert np.count_nonzero(y[test]) == (n / 2) * (1. / cv.n_folds)
def test_validation_shuffle_split():
"""Test ValidationShuffleSplit."""
skip_if_no_sklearn()
from pylearn2.cross_validation.subset_iterators import (
ValidationShuffleSplit)
n = 30
# test with indices
cv = ValidationShuffleSplit(n)
for train, valid, test in cv:
assert np.unique(np.concatenate((train, valid, test))).size == n
assert valid.size == n * cv.test_size
assert test.size == n * cv.test_size
def test_stratified_validation_shuffle_split():
"""Test StratifiedValidationShuffleSplit."""
skip_if_no_sklearn()
from pylearn2.cross_validation.subset_iterators import (
StratifiedValidationShuffleSplit)
n = 60
y = np.concatenate((np.zeros(n / 2, dtype=int), np.ones(n / 2, dtype=int)))
# test with indices
cv = StratifiedValidationShuffleSplit(y)
for train, valid, test in cv:
assert np.unique(np.concatenate((train, valid, test))).size == n
assert valid.size == n * cv.test_size
assert test.size == n * cv.test_size
assert np.count_nonzero(y[valid]) == (n / 2) * cv.test_size
assert np.count_nonzero(y[test]) == (n / 2) * cv.test_size
|
bsd-3-clause
|
khkaminska/scikit-learn
|
examples/linear_model/plot_multi_task_lasso_support.py
|
249
|
2211
|
#!/usr/bin/env python
"""
=============================================
Joint feature selection with multi-task Lasso
=============================================
The multi-task lasso allows to fit multiple regression problems
jointly enforcing the selected features to be the same across
tasks. This example simulates sequential measurements, each task
is a time instant, and the relevant features vary in amplitude
over time while being the same. The multi-task lasso imposes that
features that are selected at one time point are select for all time
point. This makes feature selection by the Lasso more stable.
"""
print(__doc__)
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import MultiTaskLasso, Lasso
rng = np.random.RandomState(42)
# Generate some 2D coefficients with sine waves with random frequency and phase
n_samples, n_features, n_tasks = 100, 30, 40
n_relevant_features = 5
coef = np.zeros((n_tasks, n_features))
times = np.linspace(0, 2 * np.pi, n_tasks)
for k in range(n_relevant_features):
coef[:, k] = np.sin((1. + rng.randn(1)) * times + 3 * rng.randn(1))
X = rng.randn(n_samples, n_features)
Y = np.dot(X, coef.T) + rng.randn(n_samples, n_tasks)
coef_lasso_ = np.array([Lasso(alpha=0.5).fit(X, y).coef_ for y in Y.T])
coef_multi_task_lasso_ = MultiTaskLasso(alpha=1.).fit(X, Y).coef_
###############################################################################
# Plot support and time series
fig = plt.figure(figsize=(8, 5))
plt.subplot(1, 2, 1)
plt.spy(coef_lasso_)
plt.xlabel('Feature')
plt.ylabel('Time (or Task)')
plt.text(10, 5, 'Lasso')
plt.subplot(1, 2, 2)
plt.spy(coef_multi_task_lasso_)
plt.xlabel('Feature')
plt.ylabel('Time (or Task)')
plt.text(10, 5, 'MultiTaskLasso')
fig.suptitle('Coefficient non-zero location')
feature_to_plot = 0
plt.figure()
plt.plot(coef[:, feature_to_plot], 'k', label='Ground truth')
plt.plot(coef_lasso_[:, feature_to_plot], 'g', label='Lasso')
plt.plot(coef_multi_task_lasso_[:, feature_to_plot],
'r', label='MultiTaskLasso')
plt.legend(loc='upper center')
plt.axis('tight')
plt.ylim([-1.1, 1.1])
plt.show()
|
bsd-3-clause
|
pazagra/catkin_ws
|
src/IL-pipeline/src/coord.py
|
1
|
1965
|
# import cv2
# import os
# import math
# import matplotlib.pyplot as plt
# import numpy as np
# from NMS import *
# import SVM
# import timeit
# path = "/media/iglu/Data/DatasetIglu"
# save_path_r ="/home/iglu/catkin_ws/src/IL-pipeline/src/Hands/RGB/"
# save_path_d ="/home/iglu/catkin_ws/src/IL-pipeline/src/Hands/Depth/"
# method = cv2.TM_CCOEFF
# for i in os.listdir(save_path_r):
# template = cv2.imread(save_path_r+i)
# s = i.split('_')
# if s[3]== '0':
# s[3] = '10'
# img= cv2.imread(path+"/user"+s[3]+"/"+s[4]+"_"+s[5]+ "/k1" + "/RGB" +"/"+s[6]+"_"+s[7]+"_"+s[8]+".jpg")
# print s
# print path+"/user"+s[3]+"/"+s[4]+"_"+s[5]+ "/k1" + "/RGB" +"/"+s[6]+"_"+s[7]+"_"+s[8]+".jpg"
# print path+"/user"+s[3]+"/"+s[4]+"_"+s[5]+ "/k1" + "/Depth" +"/"+s[6]+"_"+s[7]+"_Depth.npy"
# print s[9]+","+s[10][:-4]
# d = np.load(path+"/user"+s[3]+"/"+s[4]+"_"+s[5]+ "/k1" + "/Depth" +"/"+s[6]+"_"+s[7]+"_Depth.npy")
# x = int(s[9])
# y = int(s[10][:-4])
# d1 = d[y:y+100,x:x+100]
# if s[3]== '10':
# s[3] = '0'
# np.save(save_path_d+s[0]+"_"+s[1]+"_"+s[2]+"_"+s[3]+"_"+s[4]+"_"+s[5]+"_"+s[6]+"_"+s[7]+"_"+s[8]+"_"+x.__str__()+"_"+y.__str__()+".npy",d1)
#
#
# # np.load(path+"/user"+s[3]+"/"+s[4]+"_"+s[5]+ "/k1" + "/RGB" +"/"+s[6]+"_"+s[7]+"_"+s[8])
# # os.rename(save_path_r+i, save_path_r+i[:-4]+"_"+x.__str__()+"_"+y.__str__()+".jpg")
C = {'point':0,'show':1}
Matrix = [[0,0],[0,0]]
f = open("todoCM", 'r')
for line in f:
user = line
GT = next(f).rstrip('\n')
output = next(f).rstrip('\n')
GT =GT.split('_')[0]
output = output.lower()
print GT
print output
Matrix[C[GT]][C[output]] +=1
print Matrix
S0 = sum(Matrix[0])
S1 = sum(Matrix[1])
Matrix[0][0] = Matrix[0][0]*1.0/S0
Matrix[0][1] = Matrix[0][1]*1.0/S0
Matrix[1][0] = Matrix[1][0]*1.0/S1
Matrix[1][1] = Matrix[1][1]*1.0/S1
print Matrix
St = S0+S1
hit = Matrix[0][0]*100+Matrix[1][1]*100
print hit*1.0/St
|
gpl-3.0
|
Clyde-fare/scikit-learn
|
examples/linear_model/plot_robust_fit.py
|
238
|
2414
|
"""
Robust linear estimator fitting
===============================
Here a sine function is fit with a polynomial of order 3, for values
close to zero.
Robust fitting is demoed in different situations:
- No measurement errors, only modelling errors (fitting a sine with a
polynomial)
- Measurement errors in X
- Measurement errors in y
The median absolute deviation to non corrupt new data is used to judge
the quality of the prediction.
What we can see that:
- RANSAC is good for strong outliers in the y direction
- TheilSen is good for small outliers, both in direction X and y, but has
a break point above which it performs worst than OLS.
"""
from matplotlib import pyplot as plt
import numpy as np
from sklearn import linear_model, metrics
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
np.random.seed(42)
X = np.random.normal(size=400)
y = np.sin(X)
# Make sure that it X is 2D
X = X[:, np.newaxis]
X_test = np.random.normal(size=200)
y_test = np.sin(X_test)
X_test = X_test[:, np.newaxis]
y_errors = y.copy()
y_errors[::3] = 3
X_errors = X.copy()
X_errors[::3] = 3
y_errors_large = y.copy()
y_errors_large[::3] = 10
X_errors_large = X.copy()
X_errors_large[::3] = 10
estimators = [('OLS', linear_model.LinearRegression()),
('Theil-Sen', linear_model.TheilSenRegressor(random_state=42)),
('RANSAC', linear_model.RANSACRegressor(random_state=42)), ]
x_plot = np.linspace(X.min(), X.max())
for title, this_X, this_y in [
('Modeling errors only', X, y),
('Corrupt X, small deviants', X_errors, y),
('Corrupt y, small deviants', X, y_errors),
('Corrupt X, large deviants', X_errors_large, y),
('Corrupt y, large deviants', X, y_errors_large)]:
plt.figure(figsize=(5, 4))
plt.plot(this_X[:, 0], this_y, 'k+')
for name, estimator in estimators:
model = make_pipeline(PolynomialFeatures(3), estimator)
model.fit(this_X, this_y)
mse = metrics.mean_squared_error(model.predict(X_test), y_test)
y_plot = model.predict(x_plot[:, np.newaxis])
plt.plot(x_plot, y_plot,
label='%s: error = %.3f' % (name, mse))
plt.legend(loc='best', frameon=False,
title='Error: mean absolute deviation\n to non corrupt data')
plt.xlim(-4, 10.2)
plt.ylim(-2, 10.2)
plt.title(title)
plt.show()
|
bsd-3-clause
|
ky822/scikit-learn
|
examples/decomposition/plot_ica_vs_pca.py
|
306
|
3329
|
"""
==========================
FastICA on 2D point clouds
==========================
This example illustrates visually in the feature space a comparison by
results using two different component analysis techniques.
:ref:`ICA` vs :ref:`PCA`.
Representing ICA in the feature space gives the view of 'geometric ICA':
ICA is an algorithm that finds directions in the feature space
corresponding to projections with high non-Gaussianity. These directions
need not be orthogonal in the original feature space, but they are
orthogonal in the whitened feature space, in which all directions
correspond to the same variance.
PCA, on the other hand, finds orthogonal directions in the raw feature
space that correspond to directions accounting for maximum variance.
Here we simulate independent sources using a highly non-Gaussian
process, 2 student T with a low number of degrees of freedom (top left
figure). We mix them to create observations (top right figure).
In this raw observation space, directions identified by PCA are
represented by orange vectors. We represent the signal in the PCA space,
after whitening by the variance corresponding to the PCA vectors (lower
left). Running ICA corresponds to finding a rotation in this space to
identify the directions of largest non-Gaussianity (lower right).
"""
print(__doc__)
# Authors: Alexandre Gramfort, Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA, FastICA
###############################################################################
# Generate sample data
rng = np.random.RandomState(42)
S = rng.standard_t(1.5, size=(20000, 2))
S[:, 0] *= 2.
# Mix data
A = np.array([[1, 1], [0, 2]]) # Mixing matrix
X = np.dot(S, A.T) # Generate observations
pca = PCA()
S_pca_ = pca.fit(X).transform(X)
ica = FastICA(random_state=rng)
S_ica_ = ica.fit(X).transform(X) # Estimate the sources
S_ica_ /= S_ica_.std(axis=0)
###############################################################################
# Plot results
def plot_samples(S, axis_list=None):
plt.scatter(S[:, 0], S[:, 1], s=2, marker='o', zorder=10,
color='steelblue', alpha=0.5)
if axis_list is not None:
colors = ['orange', 'red']
for color, axis in zip(colors, axis_list):
axis /= axis.std()
x_axis, y_axis = axis
# Trick to get legend to work
plt.plot(0.1 * x_axis, 0.1 * y_axis, linewidth=2, color=color)
plt.quiver(0, 0, x_axis, y_axis, zorder=11, width=0.01, scale=6,
color=color)
plt.hlines(0, -3, 3)
plt.vlines(0, -3, 3)
plt.xlim(-3, 3)
plt.ylim(-3, 3)
plt.xlabel('x')
plt.ylabel('y')
plt.figure()
plt.subplot(2, 2, 1)
plot_samples(S / S.std())
plt.title('True Independent Sources')
axis_list = [pca.components_.T, ica.mixing_]
plt.subplot(2, 2, 2)
plot_samples(X / np.std(X), axis_list=axis_list)
legend = plt.legend(['PCA', 'ICA'], loc='upper right')
legend.set_zorder(100)
plt.title('Observations')
plt.subplot(2, 2, 3)
plot_samples(S_pca_ / np.std(S_pca_, axis=0))
plt.title('PCA recovered signals')
plt.subplot(2, 2, 4)
plot_samples(S_ica_ / np.std(S_ica_))
plt.title('ICA recovered signals')
plt.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.36)
plt.show()
|
bsd-3-clause
|
macks22/scikit-learn
|
sklearn/setup.py
|
225
|
2856
|
import os
from os.path import join
import warnings
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
from numpy.distutils.system_info import get_info, BlasNotFoundError
import numpy
libraries = []
if os.name == 'posix':
libraries.append('m')
config = Configuration('sklearn', parent_package, top_path)
config.add_subpackage('__check_build')
config.add_subpackage('svm')
config.add_subpackage('datasets')
config.add_subpackage('datasets/tests')
config.add_subpackage('feature_extraction')
config.add_subpackage('feature_extraction/tests')
config.add_subpackage('cluster')
config.add_subpackage('cluster/tests')
config.add_subpackage('covariance')
config.add_subpackage('covariance/tests')
config.add_subpackage('cross_decomposition')
config.add_subpackage('decomposition')
config.add_subpackage('decomposition/tests')
config.add_subpackage("ensemble")
config.add_subpackage("ensemble/tests")
config.add_subpackage('feature_selection')
config.add_subpackage('feature_selection/tests')
config.add_subpackage('utils')
config.add_subpackage('utils/tests')
config.add_subpackage('externals')
config.add_subpackage('mixture')
config.add_subpackage('mixture/tests')
config.add_subpackage('gaussian_process')
config.add_subpackage('gaussian_process/tests')
config.add_subpackage('neighbors')
config.add_subpackage('neural_network')
config.add_subpackage('preprocessing')
config.add_subpackage('manifold')
config.add_subpackage('metrics')
config.add_subpackage('semi_supervised')
config.add_subpackage("tree")
config.add_subpackage("tree/tests")
config.add_subpackage('metrics/tests')
config.add_subpackage('metrics/cluster')
config.add_subpackage('metrics/cluster/tests')
# add cython extension module for isotonic regression
config.add_extension(
'_isotonic',
sources=['_isotonic.c'],
include_dirs=[numpy.get_include()],
libraries=libraries,
)
# some libs needs cblas, fortran-compiled BLAS will not be sufficient
blas_info = get_info('blas_opt', 0)
if (not blas_info) or (
('NO_ATLAS_INFO', 1) in blas_info.get('define_macros', [])):
config.add_library('cblas',
sources=[join('src', 'cblas', '*.c')])
warnings.warn(BlasNotFoundError.__doc__)
# the following packages depend on cblas, so they have to be build
# after the above.
config.add_subpackage('linear_model')
config.add_subpackage('utils')
# add the test directory
config.add_subpackage('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
|
bsd-3-clause
|
HIPS/optofit
|
examples/gp_demo.py
|
1
|
7235
|
import numpy as np
seed = np.random.randint(2**16)
#seed = 25982
print "Seed: ", seed
import matplotlib.pyplot as plt
from matplotlib.patches import Polygon
from optofit.cneuron.compartment import Compartment
from optofit.cneuron.channels import LeakChannel
from optofit.cneuron.simulate import forward_euler
from optofit.cneuron.gpchannel import GPChannel, sigma
from hips.inference.particle_mcmc import *
from optofit.cinference.pmcmc import *
# Set the random seed for reproducibility
np.random.seed(seed)
# Make a simple compartment
hypers = {
'C' : 1.0,
'V0' : -60.0,
'g_leak' : 0.3,
'E_leak' : -65.0,
}
gp_hypers = {
'D' : 1,
'g_gp' : 1.0,
'E_gp' : 0.0,
'alpha_0': 1.0,
'beta_0' : 2.0,
'sigma_kernel': 1.0
}
def create_model():
# Add a few channels
body = Compartment(name='body', hypers=hypers)
leak = LeakChannel(name='leak', hypers=hypers)
gp = GPChannel(name='gp', hypers=gp_hypers)
body.add_child(leak)
body.add_child(gp)
# Initialize the model
D, I = body.initialize_offsets()
return body, gp, D, I
def sample_model( ):
body, gp, D, I = create_model()
# Set the recording duration
t_start = 0
t_stop = 100.
dt = 1.0
t = np.arange(t_start, t_stop, dt)
T = len(t)
# Make input with an injected current from 500-600ms
inpt = np.zeros((T, I))
inpt[50/dt:60/dt,:] = 7.
inpt += np.random.randn(T, I)
# Set the initial distribution to be Gaussian around the steady state
z0 = np.zeros(D)
body.steady_state(z0)
init = GaussianInitialDistribution(z0, 0.1**2 * np.eye(D))
# Set the proposal distribution using Hodgkin Huxley dynamics
sigmas = 0.0001*np.ones(D)
# Set the voltage transition dynamics to be a bit noisier
sigmas[body.x_offset] = 0.25
prop = HodgkinHuxleyProposal(T, 1, D, body, sigmas, t, inpt)
# Set the observation model to observe only the voltage
etas = np.ones(1)
observed_dims = np.array([body.x_offset]).astype(np.int32)
lkhd = PartialGaussianLikelihood(observed_dims, etas)
# Initialize the latent state matrix to sample N=1 particle
z = np.zeros((T,1,D))
z[0,0,:] = init.sample()
# Initialize the output matrix
x = np.zeros((T,D))
# Sample the latent state sequence
for i in np.arange(0,T-1):
# The interface kinda sucks. We have to tell it that
# the first particle is always its ancestor
prop.sample_next(z, i, np.array([0], dtype=np.int32))
# Sample observations
for i in np.arange(0,T):
lkhd.sample(z,x,i,0)
# Extract the GP current
I_gp = gp.current(z, z[:,0,0], np.arange(T), 0)
# Extract the first (and in this case only) particle
z = z[:,0,:].copy(order='C')
# Plot the GP channel dynamics
gp_fig = plt.figure()
gp_ax1 = gp_fig.add_subplot(121)
gp.plot(ax=gp_ax1)
gp_ax2 = gp_fig.add_subplot(122)
# Plot the first particle trajectory
st_axs, _ = body.plot(t, z, color='k')
# Plot the observed voltage
st_axs[0].plot(t, x[:,0], 'r')
plt.ion()
plt.show()
plt.pause(0.01)
return t, z, x, inpt, st_axs, gp_ax2
# Now run the pMCMC inference
def sample_z_given_x(t, x, inpt,
z0=None,
N_particles=100,
axs=None, gp_ax=None):
T,O = x.shape
# Make a model
body, gp, D, I = create_model()
# Set the initial distribution to be Gaussian around the steady state
ss = np.zeros(D)
body.steady_state(ss)
init = GaussianInitialDistribution(ss, 0.1**2 * np.eye(D))
# Set the proposal distribution using Hodgkin Huxley dynamics
sigmas = 0.0001*np.ones(D)
# Set the voltage transition dynamics to be a bit noisier
sigmas[body.x_offset] = 0.25
prop = HodgkinHuxleyProposal(T, N_particles, D, body, sigmas, t, inpt)
# Set the observation model to observe only the voltage
etas = np.ones(1)
observed_dims = np.array([body.x_offset]).astype(np.int32)
lkhd = PartialGaussianLikelihood(observed_dims, etas)
# Initialize the latent state matrix to sample N=1 particle
z = np.ones((T,N_particles,D)) * ss[None, None, :] + np.random.randn(T,N_particles,D) * sigmas[None, None, :]
if z0 is not None:
z[:,0,:] = z0
# Prepare the particle Gibbs sampler with the first particle
pf = ParticleGibbsAncestorSampling(T, N_particles, D)
pf.initialize(init, prop, lkhd, x, z[:,0,:].copy('C'))
# Plot the initial state
gp_ax, im, l_gp = gp.plot(ax=gp_ax, data=z[:,0,:])
axs, lines = body.plot(t, z[:,0,:], color='b', axs=axs)
# Update figures
plt.figure(1)
plt.pause(0.001)
plt.figure(2)
plt.pause(0.001)
# Initialize sample outputs
S = 100
z_smpls = np.zeros((S,T,D))
z_smpls[0,:,:] = z[:,0,:]
for s in range(1,S):
print "Iteration %d" % s
# Reinitialize with the previous particle
pf.initialize(init, prop, lkhd, x, z_smpls[s-1,:,:])
# Sample a new trajectory given the updated kinetics and the previous sample
z_smpls[s,:,:] = pf.sample()
sigmasq = resample_transition_noise(body, z_smpls[s,:,:], inpt, t)
print "Sigmasq: ", sigmasq
prop.set_sigmasq(sigmasq)
gp.set_sigmas(sigmasq)
# Plot the sample
body.plot(t, z_smpls[s,:,:], lines=lines)
# Update the latent state figure
plt.figure(2)
plt.pause(0.001)
# Resample the GP
gp.resample(z_smpls[s,:,:])
gp.plot(im=im, l=l_gp, data=z_smpls[s,:,:])
# Update the gp transition figure
plt.figure(1)
plt.pause(0.001)
z_mean = z_smpls.mean(axis=0)
z_std = z_smpls.std(axis=0)
z_env = np.zeros((T*2,2))
z_env[:,0] = np.concatenate((t, t[::-1]))
z_env[:,1] = np.concatenate((z_mean[:,0] + z_std[:,0], z_mean[::-1,0] - z_std[::-1,0]))
plt.ioff()
plt.show()
return z_smpls
def resample_transition_noise(body, data, inpt, t,
alpha0=0.1, beta0=0.1):
"""
Resample sigma, the transition noise variance, under an inverse gamma prior
"""
T = data.shape[0]
D = data.shape[1]
dxdt = np.zeros((T,1,D))
x = np.zeros((T,1,D))
x[:,0,:] = data
# Compute kinetics of the voltage
body.kinetics(dxdt, x, inpt, np.arange(T-1).astype(np.int32))
dt = np.diff(t)
# TODO: Loop over data
dX_pred = dxdt[:-1, 0, :]
dX_data = (data[1:, :] - data[:-1, :]) / dt[:,None]
X_diffs = dX_pred - dX_data
# Resample transition noise.
X_diffs = np.array(X_diffs)
n = X_diffs.shape[0]
sigmasq = np.zeros(D)
for d in range(D):
alpha = alpha0 + n / 2.0
beta = beta0 + np.sum(X_diffs[:,d] ** 2) / 2.0
# self.sigmas[d] = beta / alpha
sigmasq[d] = 1.0 / np.random.gamma(alpha, 1.0/beta)
# print "Sigma V: %.3f" % (sigmas[d])
return sigmasq
t, z, x, inpt, axs, gp_ax = sample_model()
raw_input("Press enter to being sampling...\n")
sample_z_given_x(t, x, inpt, axs=axs, gp_ax=gp_ax, z0=None)
|
gpl-2.0
|
quheng/scikit-learn
|
sklearn/feature_selection/tests/test_from_model.py
|
244
|
1593
|
import numpy as np
import scipy.sparse as sp
from nose.tools import assert_raises, assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.datasets import load_iris
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import SGDClassifier
from sklearn.svm import LinearSVC
iris = load_iris()
def test_transform_linear_model():
for clf in (LogisticRegression(C=0.1),
LinearSVC(C=0.01, dual=False),
SGDClassifier(alpha=0.001, n_iter=50, shuffle=True,
random_state=0)):
for thresh in (None, ".09*mean", "1e-5 * median"):
for func in (np.array, sp.csr_matrix):
X = func(iris.data)
clf.set_params(penalty="l1")
clf.fit(X, iris.target)
X_new = clf.transform(X, thresh)
if isinstance(clf, SGDClassifier):
assert_true(X_new.shape[1] <= X.shape[1])
else:
assert_less(X_new.shape[1], X.shape[1])
clf.set_params(penalty="l2")
clf.fit(X_new, iris.target)
pred = clf.predict(X_new)
assert_greater(np.mean(pred == iris.target), 0.7)
def test_invalid_input():
clf = SGDClassifier(alpha=0.1, n_iter=10, shuffle=True, random_state=None)
clf.fit(iris.data, iris.target)
assert_raises(ValueError, clf.transform, iris.data, "gobbledigook")
assert_raises(ValueError, clf.transform, iris.data, ".5 * gobbledigook")
|
bsd-3-clause
|
quheng/scikit-learn
|
sklearn/preprocessing/tests/test_function_transformer.py
|
176
|
2169
|
from nose.tools import assert_equal
import numpy as np
from sklearn.preprocessing import FunctionTransformer
def _make_func(args_store, kwargs_store, func=lambda X, *a, **k: X):
def _func(X, *args, **kwargs):
args_store.append(X)
args_store.extend(args)
kwargs_store.update(kwargs)
return func(X)
return _func
def test_delegate_to_func():
# (args|kwargs)_store will hold the positional and keyword arguments
# passed to the function inside the FunctionTransformer.
args_store = []
kwargs_store = {}
X = np.arange(10).reshape((5, 2))
np.testing.assert_array_equal(
FunctionTransformer(_make_func(args_store, kwargs_store)).transform(X),
X,
'transform should have returned X unchanged',
)
# The function should only have recieved X.
assert_equal(
args_store,
[X],
'Incorrect positional arguments passed to func: {args}'.format(
args=args_store,
),
)
assert_equal(
kwargs_store,
{},
'Unexpected keyword arguments passed to func: {args}'.format(
args=kwargs_store,
),
)
# reset the argument stores.
args_store[:] = [] # python2 compatible inplace list clear.
kwargs_store.clear()
y = object()
np.testing.assert_array_equal(
FunctionTransformer(
_make_func(args_store, kwargs_store),
pass_y=True,
).transform(X, y),
X,
'transform should have returned X unchanged',
)
# The function should have recieved X and y.
assert_equal(
args_store,
[X, y],
'Incorrect positional arguments passed to func: {args}'.format(
args=args_store,
),
)
assert_equal(
kwargs_store,
{},
'Unexpected keyword arguments passed to func: {args}'.format(
args=kwargs_store,
),
)
def test_np_log():
X = np.arange(10).reshape((5, 2))
# Test that the numpy.log example still works.
np.testing.assert_array_equal(
FunctionTransformer(np.log1p).transform(X),
np.log1p(X),
)
|
bsd-3-clause
|
Titan-C/scikit-learn
|
sklearn/gaussian_process/gaussian_process.py
|
17
|
34869
|
# -*- coding: utf-8 -*-
# Author: Vincent Dubourg <vincent.dubourg@gmail.com>
# (mostly translation, see implementation details)
# License: BSD 3 clause
from __future__ import print_function
import numpy as np
from scipy import linalg, optimize
from ..base import BaseEstimator, RegressorMixin
from ..metrics.pairwise import manhattan_distances
from ..utils import check_random_state, check_array, check_X_y
from ..utils.validation import check_is_fitted
from . import regression_models as regression
from . import correlation_models as correlation
from ..utils import deprecated
MACHINE_EPSILON = np.finfo(np.double).eps
@deprecated("l1_cross_distances was deprecated in version 0.18 "
"and will be removed in 0.20.")
def l1_cross_distances(X):
"""
Computes the nonzero componentwise L1 cross-distances between the vectors
in X.
Parameters
----------
X : array_like
An array with shape (n_samples, n_features)
Returns
-------
D : array with shape (n_samples * (n_samples - 1) / 2, n_features)
The array of componentwise L1 cross-distances.
ij : arrays with shape (n_samples * (n_samples - 1) / 2, 2)
The indices i and j of the vectors in X associated to the cross-
distances in D: D[k] = np.abs(X[ij[k, 0]] - Y[ij[k, 1]]).
"""
X = check_array(X)
n_samples, n_features = X.shape
n_nonzero_cross_dist = n_samples * (n_samples - 1) // 2
ij = np.zeros((n_nonzero_cross_dist, 2), dtype=np.int)
D = np.zeros((n_nonzero_cross_dist, n_features))
ll_1 = 0
for k in range(n_samples - 1):
ll_0 = ll_1
ll_1 = ll_0 + n_samples - k - 1
ij[ll_0:ll_1, 0] = k
ij[ll_0:ll_1, 1] = np.arange(k + 1, n_samples)
D[ll_0:ll_1] = np.abs(X[k] - X[(k + 1):n_samples])
return D, ij
@deprecated("GaussianProcess was deprecated in version 0.18 and will be "
"removed in 0.20. Use the GaussianProcessRegressor instead.")
class GaussianProcess(BaseEstimator, RegressorMixin):
"""The legacy Gaussian Process model class.
.. deprecated:: 0.18
This class will be removed in 0.20.
Use the :class:`GaussianProcessRegressor` instead.
Read more in the :ref:`User Guide <gaussian_process>`.
Parameters
----------
regr : string or callable, optional
A regression function returning an array of outputs of the linear
regression functional basis. The number of observations n_samples
should be greater than the size p of this basis.
Default assumes a simple constant regression trend.
Available built-in regression models are::
'constant', 'linear', 'quadratic'
corr : string or callable, optional
A stationary autocorrelation function returning the autocorrelation
between two points x and x'.
Default assumes a squared-exponential autocorrelation model.
Built-in correlation models are::
'absolute_exponential', 'squared_exponential',
'generalized_exponential', 'cubic', 'linear'
beta0 : double array_like, optional
The regression weight vector to perform Ordinary Kriging (OK).
Default assumes Universal Kriging (UK) so that the vector beta of
regression weights is estimated using the maximum likelihood
principle.
storage_mode : string, optional
A string specifying whether the Cholesky decomposition of the
correlation matrix should be stored in the class (storage_mode =
'full') or not (storage_mode = 'light').
Default assumes storage_mode = 'full', so that the
Cholesky decomposition of the correlation matrix is stored.
This might be a useful parameter when one is not interested in the
MSE and only plan to estimate the BLUP, for which the correlation
matrix is not required.
verbose : boolean, optional
A boolean specifying the verbose level.
Default is verbose = False.
theta0 : double array_like, optional
An array with shape (n_features, ) or (1, ).
The parameters in the autocorrelation model.
If thetaL and thetaU are also specified, theta0 is considered as
the starting point for the maximum likelihood estimation of the
best set of parameters.
Default assumes isotropic autocorrelation model with theta0 = 1e-1.
thetaL : double array_like, optional
An array with shape matching theta0's.
Lower bound on the autocorrelation parameters for maximum
likelihood estimation.
Default is None, so that it skips maximum likelihood estimation and
it uses theta0.
thetaU : double array_like, optional
An array with shape matching theta0's.
Upper bound on the autocorrelation parameters for maximum
likelihood estimation.
Default is None, so that it skips maximum likelihood estimation and
it uses theta0.
normalize : boolean, optional
Input X and observations y are centered and reduced wrt
means and standard deviations estimated from the n_samples
observations provided.
Default is normalize = True so that data is normalized to ease
maximum likelihood estimation.
nugget : double or ndarray, optional
Introduce a nugget effect to allow smooth predictions from noisy
data. If nugget is an ndarray, it must be the same length as the
number of data points used for the fit.
The nugget is added to the diagonal of the assumed training covariance;
in this way it acts as a Tikhonov regularization in the problem. In
the special case of the squared exponential correlation function, the
nugget mathematically represents the variance of the input values.
Default assumes a nugget close to machine precision for the sake of
robustness (nugget = 10. * MACHINE_EPSILON).
optimizer : string, optional
A string specifying the optimization algorithm to be used.
Default uses 'fmin_cobyla' algorithm from scipy.optimize.
Available optimizers are::
'fmin_cobyla', 'Welch'
'Welch' optimizer is dued to Welch et al., see reference [WBSWM1992]_.
It consists in iterating over several one-dimensional optimizations
instead of running one single multi-dimensional optimization.
random_start : int, optional
The number of times the Maximum Likelihood Estimation should be
performed from a random starting point.
The first MLE always uses the specified starting point (theta0),
the next starting points are picked at random according to an
exponential distribution (log-uniform on [thetaL, thetaU]).
Default does not use random starting point (random_start = 1).
random_state : int, RandomState instance or None, optional (default=None)
The generator used to shuffle the sequence of coordinates of theta in
the Welch optimizer. If int, random_state is the seed used by the
random number generator; If RandomState instance, random_state is the
random number generator; If None, the random number generator is the
RandomState instance used by `np.random`.
Attributes
----------
theta_ : array
Specified theta OR the best set of autocorrelation parameters (the \
sought maximizer of the reduced likelihood function).
reduced_likelihood_function_value_ : array
The optimal reduced likelihood function value.
Examples
--------
>>> import numpy as np
>>> from sklearn.gaussian_process import GaussianProcess
>>> X = np.array([[1., 3., 5., 6., 7., 8.]]).T
>>> y = (X * np.sin(X)).ravel()
>>> gp = GaussianProcess(theta0=0.1, thetaL=.001, thetaU=1.)
>>> gp.fit(X, y) # doctest: +ELLIPSIS
GaussianProcess(beta0=None...
...
Notes
-----
The presentation implementation is based on a translation of the DACE
Matlab toolbox, see reference [NLNS2002]_.
References
----------
.. [NLNS2002] `H.B. Nielsen, S.N. Lophaven, H. B. Nielsen and J.
Sondergaard. DACE - A MATLAB Kriging Toolbox.` (2002)
http://imedea.uib-csic.es/master/cambioglobal/Modulo_V_cod101615/Lab/lab_maps/krigging/DACE-krigingsoft/dace/dace.pdf
.. [WBSWM1992] `W.J. Welch, R.J. Buck, J. Sacks, H.P. Wynn, T.J. Mitchell,
and M.D. Morris (1992). Screening, predicting, and computer
experiments. Technometrics, 34(1) 15--25.`
http://www.jstor.org/stable/1269548
"""
_regression_types = {
'constant': regression.constant,
'linear': regression.linear,
'quadratic': regression.quadratic}
_correlation_types = {
'absolute_exponential': correlation.absolute_exponential,
'squared_exponential': correlation.squared_exponential,
'generalized_exponential': correlation.generalized_exponential,
'cubic': correlation.cubic,
'linear': correlation.linear}
_optimizer_types = [
'fmin_cobyla',
'Welch']
def __init__(self, regr='constant', corr='squared_exponential', beta0=None,
storage_mode='full', verbose=False, theta0=1e-1,
thetaL=None, thetaU=None, optimizer='fmin_cobyla',
random_start=1, normalize=True,
nugget=10. * MACHINE_EPSILON, random_state=None):
self.regr = regr
self.corr = corr
self.beta0 = beta0
self.storage_mode = storage_mode
self.verbose = verbose
self.theta0 = theta0
self.thetaL = thetaL
self.thetaU = thetaU
self.normalize = normalize
self.nugget = nugget
self.optimizer = optimizer
self.random_start = random_start
self.random_state = random_state
def fit(self, X, y):
"""
The Gaussian Process model fitting method.
Parameters
----------
X : double array_like
An array with shape (n_samples, n_features) with the input at which
observations were made.
y : double array_like
An array with shape (n_samples, ) or shape (n_samples, n_targets)
with the observations of the output to be predicted.
Returns
-------
gp : self
A fitted Gaussian Process model object awaiting data to perform
predictions.
"""
# Run input checks
self._check_params()
self.random_state = check_random_state(self.random_state)
# Force data to 2D numpy.array
X, y = check_X_y(X, y, multi_output=True, y_numeric=True)
self.y_ndim_ = y.ndim
if y.ndim == 1:
y = y[:, np.newaxis]
# Check shapes of DOE & observations
n_samples, n_features = X.shape
_, n_targets = y.shape
# Run input checks
self._check_params(n_samples)
# Normalize data or don't
if self.normalize:
X_mean = np.mean(X, axis=0)
X_std = np.std(X, axis=0)
y_mean = np.mean(y, axis=0)
y_std = np.std(y, axis=0)
X_std[X_std == 0.] = 1.
y_std[y_std == 0.] = 1.
# center and scale X if necessary
X = (X - X_mean) / X_std
y = (y - y_mean) / y_std
else:
X_mean = np.zeros(1)
X_std = np.ones(1)
y_mean = np.zeros(1)
y_std = np.ones(1)
# Calculate matrix of distances D between samples
D, ij = l1_cross_distances(X)
if (np.min(np.sum(D, axis=1)) == 0.
and self.corr != correlation.pure_nugget):
raise Exception("Multiple input features cannot have the same"
" target value.")
# Regression matrix and parameters
F = self.regr(X)
n_samples_F = F.shape[0]
if F.ndim > 1:
p = F.shape[1]
else:
p = 1
if n_samples_F != n_samples:
raise Exception("Number of rows in F and X do not match. Most "
"likely something is going wrong with the "
"regression model.")
if p > n_samples_F:
raise Exception(("Ordinary least squares problem is undetermined "
"n_samples=%d must be greater than the "
"regression model size p=%d.") % (n_samples, p))
if self.beta0 is not None:
if self.beta0.shape[0] != p:
raise Exception("Shapes of beta0 and F do not match.")
# Set attributes
self.X = X
self.y = y
self.D = D
self.ij = ij
self.F = F
self.X_mean, self.X_std = X_mean, X_std
self.y_mean, self.y_std = y_mean, y_std
# Determine Gaussian Process model parameters
if self.thetaL is not None and self.thetaU is not None:
# Maximum Likelihood Estimation of the parameters
if self.verbose:
print("Performing Maximum Likelihood Estimation of the "
"autocorrelation parameters...")
self.theta_, self.reduced_likelihood_function_value_, par = \
self._arg_max_reduced_likelihood_function()
if np.isinf(self.reduced_likelihood_function_value_):
raise Exception("Bad parameter region. "
"Try increasing upper bound")
else:
# Given parameters
if self.verbose:
print("Given autocorrelation parameters. "
"Computing Gaussian Process model parameters...")
self.theta_ = self.theta0
self.reduced_likelihood_function_value_, par = \
self.reduced_likelihood_function()
if np.isinf(self.reduced_likelihood_function_value_):
raise Exception("Bad point. Try increasing theta0.")
self.beta = par['beta']
self.gamma = par['gamma']
self.sigma2 = par['sigma2']
self.C = par['C']
self.Ft = par['Ft']
self.G = par['G']
if self.storage_mode == 'light':
# Delete heavy data (it will be computed again if required)
# (it is required only when MSE is wanted in self.predict)
if self.verbose:
print("Light storage mode specified. "
"Flushing autocorrelation matrix...")
self.D = None
self.ij = None
self.F = None
self.C = None
self.Ft = None
self.G = None
return self
def predict(self, X, eval_MSE=False, batch_size=None):
"""
This function evaluates the Gaussian Process model at x.
Parameters
----------
X : array_like
An array with shape (n_eval, n_features) giving the point(s) at
which the prediction(s) should be made.
eval_MSE : boolean, optional
A boolean specifying whether the Mean Squared Error should be
evaluated or not.
Default assumes evalMSE = False and evaluates only the BLUP (mean
prediction).
batch_size : integer, optional
An integer giving the maximum number of points that can be
evaluated simultaneously (depending on the available memory).
Default is None so that all given points are evaluated at the same
time.
Returns
-------
y : array_like, shape (n_samples, ) or (n_samples, n_targets)
An array with shape (n_eval, ) if the Gaussian Process was trained
on an array of shape (n_samples, ) or an array with shape
(n_eval, n_targets) if the Gaussian Process was trained on an array
of shape (n_samples, n_targets) with the Best Linear Unbiased
Prediction at x.
MSE : array_like, optional (if eval_MSE == True)
An array with shape (n_eval, ) or (n_eval, n_targets) as with y,
with the Mean Squared Error at x.
"""
check_is_fitted(self, "X")
# Check input shapes
X = check_array(X)
n_eval, _ = X.shape
n_samples, n_features = self.X.shape
n_samples_y, n_targets = self.y.shape
# Run input checks
self._check_params(n_samples)
if X.shape[1] != n_features:
raise ValueError(("The number of features in X (X.shape[1] = %d) "
"should match the number of features used "
"for fit() "
"which is %d.") % (X.shape[1], n_features))
if batch_size is None:
# No memory management
# (evaluates all given points in a single batch run)
# Normalize input
X = (X - self.X_mean) / self.X_std
# Initialize output
y = np.zeros(n_eval)
if eval_MSE:
MSE = np.zeros(n_eval)
# Get pairwise componentwise L1-distances to the input training set
dx = manhattan_distances(X, Y=self.X, sum_over_features=False)
# Get regression function and correlation
f = self.regr(X)
r = self.corr(self.theta_, dx).reshape(n_eval, n_samples)
# Scaled predictor
y_ = np.dot(f, self.beta) + np.dot(r, self.gamma)
# Predictor
y = (self.y_mean + self.y_std * y_).reshape(n_eval, n_targets)
if self.y_ndim_ == 1:
y = y.ravel()
# Mean Squared Error
if eval_MSE:
C = self.C
if C is None:
# Light storage mode (need to recompute C, F, Ft and G)
if self.verbose:
print("This GaussianProcess used 'light' storage mode "
"at instantiation. Need to recompute "
"autocorrelation matrix...")
reduced_likelihood_function_value, par = \
self.reduced_likelihood_function()
self.C = par['C']
self.Ft = par['Ft']
self.G = par['G']
rt = linalg.solve_triangular(self.C, r.T, lower=True)
if self.beta0 is None:
# Universal Kriging
u = linalg.solve_triangular(self.G.T,
np.dot(self.Ft.T, rt) - f.T,
lower=True)
else:
# Ordinary Kriging
u = np.zeros((n_targets, n_eval))
MSE = np.dot(self.sigma2.reshape(n_targets, 1),
(1. - (rt ** 2.).sum(axis=0)
+ (u ** 2.).sum(axis=0))[np.newaxis, :])
MSE = np.sqrt((MSE ** 2.).sum(axis=0) / n_targets)
# Mean Squared Error might be slightly negative depending on
# machine precision: force to zero!
MSE[MSE < 0.] = 0.
if self.y_ndim_ == 1:
MSE = MSE.ravel()
return y, MSE
else:
return y
else:
# Memory management
if type(batch_size) is not int or batch_size <= 0:
raise Exception("batch_size must be a positive integer")
if eval_MSE:
y, MSE = np.zeros(n_eval), np.zeros(n_eval)
for k in range(max(1, int(n_eval / batch_size))):
batch_from = k * batch_size
batch_to = min([(k + 1) * batch_size + 1, n_eval + 1])
y[batch_from:batch_to], MSE[batch_from:batch_to] = \
self.predict(X[batch_from:batch_to],
eval_MSE=eval_MSE, batch_size=None)
return y, MSE
else:
y = np.zeros(n_eval)
for k in range(max(1, int(n_eval / batch_size))):
batch_from = k * batch_size
batch_to = min([(k + 1) * batch_size + 1, n_eval + 1])
y[batch_from:batch_to] = \
self.predict(X[batch_from:batch_to],
eval_MSE=eval_MSE, batch_size=None)
return y
def reduced_likelihood_function(self, theta=None):
"""
This function determines the BLUP parameters and evaluates the reduced
likelihood function for the given autocorrelation parameters theta.
Maximizing this function wrt the autocorrelation parameters theta is
equivalent to maximizing the likelihood of the assumed joint Gaussian
distribution of the observations y evaluated onto the design of
experiments X.
Parameters
----------
theta : array_like, optional
An array containing the autocorrelation parameters at which the
Gaussian Process model parameters should be determined.
Default uses the built-in autocorrelation parameters
(ie ``theta = self.theta_``).
Returns
-------
reduced_likelihood_function_value : double
The value of the reduced likelihood function associated to the
given autocorrelation parameters theta.
par : dict
A dictionary containing the requested Gaussian Process model
parameters:
sigma2
Gaussian Process variance.
beta
Generalized least-squares regression weights for
Universal Kriging or given beta0 for Ordinary
Kriging.
gamma
Gaussian Process weights.
C
Cholesky decomposition of the correlation matrix [R].
Ft
Solution of the linear equation system : [R] x Ft = F
G
QR decomposition of the matrix Ft.
"""
check_is_fitted(self, "X")
if theta is None:
# Use built-in autocorrelation parameters
theta = self.theta_
# Initialize output
reduced_likelihood_function_value = - np.inf
par = {}
# Retrieve data
n_samples = self.X.shape[0]
D = self.D
ij = self.ij
F = self.F
if D is None:
# Light storage mode (need to recompute D, ij and F)
D, ij = l1_cross_distances(self.X)
if (np.min(np.sum(D, axis=1)) == 0.
and self.corr != correlation.pure_nugget):
raise Exception("Multiple X are not allowed")
F = self.regr(self.X)
# Set up R
r = self.corr(theta, D)
R = np.eye(n_samples) * (1. + self.nugget)
R[ij[:, 0], ij[:, 1]] = r
R[ij[:, 1], ij[:, 0]] = r
# Cholesky decomposition of R
try:
C = linalg.cholesky(R, lower=True)
except linalg.LinAlgError:
return reduced_likelihood_function_value, par
# Get generalized least squares solution
Ft = linalg.solve_triangular(C, F, lower=True)
Q, G = linalg.qr(Ft, mode='economic')
sv = linalg.svd(G, compute_uv=False)
rcondG = sv[-1] / sv[0]
if rcondG < 1e-10:
# Check F
sv = linalg.svd(F, compute_uv=False)
condF = sv[0] / sv[-1]
if condF > 1e15:
raise Exception("F is too ill conditioned. Poor combination "
"of regression model and observations.")
else:
# Ft is too ill conditioned, get out (try different theta)
return reduced_likelihood_function_value, par
Yt = linalg.solve_triangular(C, self.y, lower=True)
if self.beta0 is None:
# Universal Kriging
beta = linalg.solve_triangular(G, np.dot(Q.T, Yt))
else:
# Ordinary Kriging
beta = np.array(self.beta0)
rho = Yt - np.dot(Ft, beta)
sigma2 = (rho ** 2.).sum(axis=0) / n_samples
# The determinant of R is equal to the squared product of the diagonal
# elements of its Cholesky decomposition C
detR = (np.diag(C) ** (2. / n_samples)).prod()
# Compute/Organize output
reduced_likelihood_function_value = - sigma2.sum() * detR
par['sigma2'] = sigma2 * self.y_std ** 2.
par['beta'] = beta
par['gamma'] = linalg.solve_triangular(C.T, rho)
par['C'] = C
par['Ft'] = Ft
par['G'] = G
return reduced_likelihood_function_value, par
def _arg_max_reduced_likelihood_function(self):
"""
This function estimates the autocorrelation parameters theta as the
maximizer of the reduced likelihood function.
(Minimization of the opposite reduced likelihood function is used for
convenience)
Parameters
----------
self : All parameters are stored in the Gaussian Process model object.
Returns
-------
optimal_theta : array_like
The best set of autocorrelation parameters (the sought maximizer of
the reduced likelihood function).
optimal_reduced_likelihood_function_value : double
The optimal reduced likelihood function value.
optimal_par : dict
The BLUP parameters associated to thetaOpt.
"""
# Initialize output
best_optimal_theta = []
best_optimal_rlf_value = []
best_optimal_par = []
if self.verbose:
print("The chosen optimizer is: " + str(self.optimizer))
if self.random_start > 1:
print(str(self.random_start) + " random starts are required.")
percent_completed = 0.
# Force optimizer to fmin_cobyla if the model is meant to be isotropic
if self.optimizer == 'Welch' and self.theta0.size == 1:
self.optimizer = 'fmin_cobyla'
if self.optimizer == 'fmin_cobyla':
def minus_reduced_likelihood_function(log10t):
return - self.reduced_likelihood_function(
theta=10. ** log10t)[0]
constraints = []
for i in range(self.theta0.size):
constraints.append(lambda log10t, i=i:
log10t[i] - np.log10(self.thetaL[0, i]))
constraints.append(lambda log10t, i=i:
np.log10(self.thetaU[0, i]) - log10t[i])
for k in range(self.random_start):
if k == 0:
# Use specified starting point as first guess
theta0 = self.theta0
else:
# Generate a random starting point log10-uniformly
# distributed between bounds
log10theta0 = (np.log10(self.thetaL)
+ self.random_state.rand(*self.theta0.shape)
* np.log10(self.thetaU / self.thetaL))
theta0 = 10. ** log10theta0
# Run Cobyla
try:
log10_optimal_theta = \
optimize.fmin_cobyla(minus_reduced_likelihood_function,
np.log10(theta0).ravel(), constraints,
iprint=0)
except ValueError as ve:
print("Optimization failed. Try increasing the ``nugget``")
raise ve
optimal_theta = 10. ** log10_optimal_theta
optimal_rlf_value, optimal_par = \
self.reduced_likelihood_function(theta=optimal_theta)
# Compare the new optimizer to the best previous one
if k > 0:
if optimal_rlf_value > best_optimal_rlf_value:
best_optimal_rlf_value = optimal_rlf_value
best_optimal_par = optimal_par
best_optimal_theta = optimal_theta
else:
best_optimal_rlf_value = optimal_rlf_value
best_optimal_par = optimal_par
best_optimal_theta = optimal_theta
if self.verbose and self.random_start > 1:
if (20 * k) / self.random_start > percent_completed:
percent_completed = (20 * k) / self.random_start
print("%s completed" % (5 * percent_completed))
optimal_rlf_value = best_optimal_rlf_value
optimal_par = best_optimal_par
optimal_theta = best_optimal_theta
elif self.optimizer == 'Welch':
# Backup of the given attributes
theta0, thetaL, thetaU = self.theta0, self.thetaL, self.thetaU
corr = self.corr
verbose = self.verbose
# This will iterate over fmin_cobyla optimizer
self.optimizer = 'fmin_cobyla'
self.verbose = False
# Initialize under isotropy assumption
if verbose:
print("Initialize under isotropy assumption...")
self.theta0 = check_array(self.theta0.min())
self.thetaL = check_array(self.thetaL.min())
self.thetaU = check_array(self.thetaU.max())
theta_iso, optimal_rlf_value_iso, par_iso = \
self._arg_max_reduced_likelihood_function()
optimal_theta = theta_iso + np.zeros(theta0.shape)
# Iterate over all dimensions of theta allowing for anisotropy
if verbose:
print("Now improving allowing for anisotropy...")
for i in self.random_state.permutation(theta0.size):
if verbose:
print("Proceeding along dimension %d..." % (i + 1))
self.theta0 = check_array(theta_iso)
self.thetaL = check_array(thetaL[0, i])
self.thetaU = check_array(thetaU[0, i])
def corr_cut(t, d):
return corr(check_array(np.hstack([optimal_theta[0][0:i],
t[0],
optimal_theta[0][(i +
1)::]])),
d)
self.corr = corr_cut
optimal_theta[0, i], optimal_rlf_value, optimal_par = \
self._arg_max_reduced_likelihood_function()
# Restore the given attributes
self.theta0, self.thetaL, self.thetaU = theta0, thetaL, thetaU
self.corr = corr
self.optimizer = 'Welch'
self.verbose = verbose
else:
raise NotImplementedError("This optimizer ('%s') is not "
"implemented yet. Please contribute!"
% self.optimizer)
return optimal_theta, optimal_rlf_value, optimal_par
def _check_params(self, n_samples=None):
# Check regression model
if not callable(self.regr):
if self.regr in self._regression_types:
self.regr = self._regression_types[self.regr]
else:
raise ValueError("regr should be one of %s or callable, "
"%s was given."
% (self._regression_types.keys(), self.regr))
# Check regression weights if given (Ordinary Kriging)
if self.beta0 is not None:
self.beta0 = np.atleast_2d(self.beta0)
if self.beta0.shape[1] != 1:
# Force to column vector
self.beta0 = self.beta0.T
# Check correlation model
if not callable(self.corr):
if self.corr in self._correlation_types:
self.corr = self._correlation_types[self.corr]
else:
raise ValueError("corr should be one of %s or callable, "
"%s was given."
% (self._correlation_types.keys(), self.corr))
# Check storage mode
if self.storage_mode != 'full' and self.storage_mode != 'light':
raise ValueError("Storage mode should either be 'full' or "
"'light', %s was given." % self.storage_mode)
# Check correlation parameters
self.theta0 = np.atleast_2d(self.theta0)
lth = self.theta0.size
if self.thetaL is not None and self.thetaU is not None:
self.thetaL = np.atleast_2d(self.thetaL)
self.thetaU = np.atleast_2d(self.thetaU)
if self.thetaL.size != lth or self.thetaU.size != lth:
raise ValueError("theta0, thetaL and thetaU must have the "
"same length.")
if np.any(self.thetaL <= 0) or np.any(self.thetaU < self.thetaL):
raise ValueError("The bounds must satisfy O < thetaL <= "
"thetaU.")
elif self.thetaL is None and self.thetaU is None:
if np.any(self.theta0 <= 0):
raise ValueError("theta0 must be strictly positive.")
elif self.thetaL is None or self.thetaU is None:
raise ValueError("thetaL and thetaU should either be both or "
"neither specified.")
# Force verbose type to bool
self.verbose = bool(self.verbose)
# Force normalize type to bool
self.normalize = bool(self.normalize)
# Check nugget value
self.nugget = np.asarray(self.nugget)
if np.any(self.nugget) < 0.:
raise ValueError("nugget must be positive or zero.")
if (n_samples is not None
and self.nugget.shape not in [(), (n_samples,)]):
raise ValueError("nugget must be either a scalar "
"or array of length n_samples.")
# Check optimizer
if self.optimizer not in self._optimizer_types:
raise ValueError("optimizer should be one of %s"
% self._optimizer_types)
# Force random_start type to int
self.random_start = int(self.random_start)
|
bsd-3-clause
|
flag0010/pop_gen_cnn
|
data_prep_tricks/generic_tajD.py
|
2
|
3383
|
from common import *
from itertools import izip
def tajD(N,S,k):
'''N is the number of indv
S is the number of seg. sites
k is the (ave number of pairwise nuc. diffs), e.g. total number of pairwise diffs divided by choose(N,2)
so:
i1 AT|T|GGCG|A|CAG|T
i2 AT|G|GGCG|C|CAG|A
i3 AT|G|GGCG|A|CAG|A
i4 AT|G|GGCG|C|CAG|T
N = 4, S = 3, k = 11/6 = 1.83333
D = 1.08976
'''
a1, a2 = 0.0, 0.0
for b in range(1,N):
a1 += 1. / b
a2 += 1. / (b * b)
b1 = (N + 1.) / (3. * (N - 1.))
b2 = (2. * N * N + 2. * N + 6.) / (9. * N * (N - 1.))
c1 = b1 - 1. / a1
c2 = b2 - (N + 2.) / (a1 * N) + a2 / (a1 * a1)
e1 = c1 / a1
e2 = c2 / (a1 * a1 + a2)
num = k - S / a1
den = (e1 * S + e2 * S * (S - 1))**0.5
D = num / den
return D
def calc_S_and_k_from_seqs(list_of_seqs):
###first a function of taking the "sequence configuration" and converting it to a tally of pairwise diffs
def pairwise_diffs2(x):
if len(x) == 1:
return 0
else:
q = 0.0
for i,j in pairwise(x):
q += i*j
return q
#now loop through sites and tally S and k
S, p = 0.0, 0.0
for n in izip(*list_of_seqs):
config = count_all(n).values()
diffs = pairwise_diffs2(config)
if diffs: ## all the non-seg sites return zero, which is False
p += diffs
S += 1
k = p / choose(len(list_of_seqs), 2)
return S, k
def seq_boot(seqs):
n = zip(*seqs)
idxs = range(len(n))
while 1:
k = defaultdict(list)
idx_list = sampler(idxs, len(n), replacement=True)
for idx in idx_list:
for pos, val in enumerate(n[idx]):
k[pos].append(val)
yield map(lambda s: ''.join(s), k.values())
def permute(seqs, reps = 1000):
n = []
repnum=0
boot = seq_boot(seqs)
while repnum < reps:
rep = boot.next()
N = len(rep)
S, k = calc_S_and_k_from_seqs(rep)
try:
val = tajD(N,S,k)
except:
val = 'NA'
if val != 'NA':
n.append(val)
repnum+=1
#print repnum
n.sort()
#print len(n)
lower_idx, upper_idx = int(len(n) * 0.025)-1, len(n) - int(len(n) * 0.025)
mean = lambda s: sum(s) / float(len(s))
return mean(n), n[lower_idx], n[upper_idx], len(n)
if __name__ == '__main__':
seqs = ['ATTGGCGACAGT', 'ATGGGCGCCAGA', 'ATGGGCGACAGA', 'ATGGGCGCCAGT']
##works with non DNA seq data too
#seqs = ['00001001000101'*100,
# '01101001000101'*100,
# '00001000010111'*100,
# '01101001010101'*100]
N = len(seqs)
S, k = calc_S_and_k_from_seqs(seqs)
print N, S, k
real = tajD(N,S,k)
mean_perm, lower_ci, upper_ci, perms = permute(seqs)
print "real Tajima's D", real
print 'mean of bootstraps', mean_perm
print 'lower 0.025 CI', lower_ci
print 'upper 0.975 CI', upper_ci
print 'num. replicates', perms
if lower_ci <= 0 <= upper_ci: print 'not sig. different from zero'
else: print 'sig. different from zero'
#from matplotlib import pyplot as plt
#plt.hist(n)
#plt.show()
#idx = 0
#for i in seqs:
# print '>'+str(idx)
# print i
# idx+=1
|
gpl-3.0
|
spallavolu/scikit-learn
|
examples/ensemble/plot_voting_probas.py
|
316
|
2824
|
"""
===========================================================
Plot class probabilities calculated by the VotingClassifier
===========================================================
Plot the class probabilities of the first sample in a toy dataset
predicted by three different classifiers and averaged by the
`VotingClassifier`.
First, three examplary classifiers are initialized (`LogisticRegression`,
`GaussianNB`, and `RandomForestClassifier`) and used to initialize a
soft-voting `VotingClassifier` with weights `[1, 1, 5]`, which means that
the predicted probabilities of the `RandomForestClassifier` count 5 times
as much as the weights of the other classifiers when the averaged probability
is calculated.
To visualize the probability weighting, we fit each classifier on the training
set and plot the predicted class probabilities for the first sample in this
example dataset.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import VotingClassifier
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
X = np.array([[-1.0, -1.0], [-1.2, -1.4], [-3.4, -2.2], [1.1, 1.2]])
y = np.array([1, 1, 2, 2])
eclf = VotingClassifier(estimators=[('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
weights=[1, 1, 5])
# predict class probabilities for all classifiers
probas = [c.fit(X, y).predict_proba(X) for c in (clf1, clf2, clf3, eclf)]
# get class probabilities for the first sample in the dataset
class1_1 = [pr[0, 0] for pr in probas]
class2_1 = [pr[0, 1] for pr in probas]
# plotting
N = 4 # number of groups
ind = np.arange(N) # group positions
width = 0.35 # bar width
fig, ax = plt.subplots()
# bars for classifier 1-3
p1 = ax.bar(ind, np.hstack(([class1_1[:-1], [0]])), width, color='green')
p2 = ax.bar(ind + width, np.hstack(([class2_1[:-1], [0]])), width, color='lightgreen')
# bars for VotingClassifier
p3 = ax.bar(ind, [0, 0, 0, class1_1[-1]], width, color='blue')
p4 = ax.bar(ind + width, [0, 0, 0, class2_1[-1]], width, color='steelblue')
# plot annotations
plt.axvline(2.8, color='k', linestyle='dashed')
ax.set_xticks(ind + width)
ax.set_xticklabels(['LogisticRegression\nweight 1',
'GaussianNB\nweight 1',
'RandomForestClassifier\nweight 5',
'VotingClassifier\n(average probabilities)'],
rotation=40,
ha='right')
plt.ylim([0, 1])
plt.title('Class probabilities for sample 1 by different classifiers')
plt.legend([p1[0], p2[0]], ['class 1', 'class 2'], loc='upper left')
plt.show()
|
bsd-3-clause
|
Lab603/PicEncyclopedias
|
jni-build/jni-build/jni/include/tensorflow/contrib/learn/python/learn/preprocessing/tests/categorical_test.py
|
30
|
2249
|
# encoding: utf-8
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Categorical tests."""
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.contrib.learn.python.learn.learn_io import HAS_PANDAS
from tensorflow.contrib.learn.python.learn.preprocessing import categorical
class CategoricalTest(tf.test.TestCase):
"""Categorical tests."""
def testSingleCategoricalProcessor(self):
cat_processor = categorical.CategoricalProcessor(min_frequency=1)
x = cat_processor.fit_transform([["0"], [1], [float("nan")], ["C"], ["C"],
[1], ["0"], [np.nan], [3]])
self.assertAllEqual(list(x), [[2], [1], [0], [3], [3], [1], [2], [0], [0]])
def testSingleCategoricalProcessorPandasSingleDF(self):
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
cat_processor = categorical.CategoricalProcessor()
data = pd.DataFrame({"Gender": ["Male", "Female", "Male"]})
x = list(cat_processor.fit_transform(data))
self.assertAllEqual(list(x), [[1], [2], [1]])
def testMultiCategoricalProcessor(self):
cat_processor = categorical.CategoricalProcessor(min_frequency=0,
share=False)
x = cat_processor.fit_transform([["0", "Male"], [1, "Female"], ["3", "Male"]
])
self.assertAllEqual(list(x), [[1, 1], [2, 2], [3, 1]])
if __name__ == "__main__":
tf.test.main()
|
mit
|
sanja7s/SR_Twitter
|
src_graph/analyze_SR.py
|
1
|
3725
|
import codecs
from scipy.stats.stats import pearsonr, spearmanr
from collections import defaultdict, OrderedDict
import numpy as np
from scipy.interpolate import UnivariateSpline
import matplotlib.mlab as mlab
from matplotlib import pyplot as plt
f_in = "graph_data_with_SR.tab"
f_out = "mention_graph_with_SR_undirected.tab"
f_undirected_in = "graph_data_with_SR_undirected.tab"
def read_in_graph_with_SR(f_in):
graph_with_SR = defaultdict(list)
with codecs.open(f_in,'r', encoding='utf8') as input_file:
for line in input_file:
line = line.split()
usr1 = line[0]
usr2 = line[1]
weight = int(line[2])
SR = line[3]
if SR == 'None' or SR == '-1':
continue
SR = float(SR)
graph_with_SR[(usr1, usr2)] = (weight, SR)
return graph_with_SR
def read_in_undirected_graph_with_SR():
graph_with_SR = defaultdict(list)
with codecs.open(f_undirected_in,'r', encoding='utf8') as input_file:
for line in input_file:
line = line.split()
usr1 = line[0]
usr2 = line[1]
weight = int(line[2])
SR = line[3]
if SR == 'None' or SR == '-1':
continue
SR = float(SR)
graph_with_SR[(usr1, usr2)] = (weight, SR)
return graph_with_SR
def save_undirected_graph_with_SR():
directed_graph_with_SR = read_in_graph_with_SR(f_in)
undirected_graph_with_SR = defaultdict(list)
with codecs.open(f_out,'w', encoding='utf8') as output_file:
for edge in directed_graph_with_SR.keys():
w = directed_graph_with_SR[edge][0]
sr = directed_graph_with_SR[edge][1]
key2 = tuple(sorted((edge[1],edge[0])))
if key2 not in undirected_graph_with_SR.iterkeys():
undirected_graph_with_SR[key2] = (w, sr)
else:
undirected_graph_with_SR[key2] = (w + undirected_graph_with_SR[key2][0], undirected_graph_with_SR[key2][1])
output_file.write(str(edge[0]) + '\t' + str(edge[1]) + '\t' + str(undirected_graph_with_SR[key2][0]) + '\t' + str(undirected_graph_with_SR[key2][1]) + '\n')
return undirected_graph_with_SR
def SR_vs_weight(threshold=50):
graph_with_SR = read_in_undirected_graph_with_SR()
SR = []
weights = []
cnt = 0
for key in graph_with_SR.iterkeys():
sr = graph_with_SR[key][1]
w = graph_with_SR[key][0]
cnt += 1
if cnt % 10000 == 0:
print "Read ", cnt, " edges."
if w > threshold and sr > 0:
SR.append(sr)
weights.append(w)
print "Pearson cor (undirected) with threshold ", threshold,
print ('is: {0:.3f}'.format(float(pearsonr(SR, weights))))
print "Spearman cor (undirected) with threshold ", threshold,
print ('is: {0:.3f}'.format(float(spearmanr(SR, weights))))
SR_vs_weight()
def SR_pdf():
graph_with_SR = read_in_graph_with_SR(f_in)
SR = []
weights = []
for key in graph_with_SR.iterkeys():
sr = float(graph_with_SR[key][1])
# print sr
if sr >= 0 and sr < 1:
SR.append(sr)
weights.append(graph_with_SR[key][0])
# took this code from StackOverflow and edited to plot pdf and add a function to smooth the data
N = len(SR)
n = N/10
print N
s = np.asarray(SR) # generate your data sample with N elements
print len(s)
'''
p, x = np.histogram(s, bins=n) # bin it into n = N/10 bins
x = x[:-1] + (x[1] - x[0])/2 # convert bin edges to centers
f = UnivariateSpline(x, p, s=n)
plt.plot(x, f(x))
plt.show()
'''
mu, sigma = 100, 15
# the histogram of the data
n, bins, patches = plt.hist(s, 100, normed=0, facecolor='green', alpha=0.55)
# add a 'best fit' line
#y = mlab.normpdf( bins, mu, sigma)
#l = plt.plot(bins, y, 'r--', linewidth=1)
plt.xlabel('SR value')
plt.ylabel('Number of user pairs')
plt.title('Histogram of SR')
plt.axis([0, 1, 0, 115])
plt.grid(True)
plt.show()
#SR_pdf()
save_undirected_graph_with_SR()
|
mit
|
wdm0006/pyculiarity
|
pyculiarity/detect_ts.py
|
1
|
10466
|
# -*- coding: utf-8 -*-
from collections import namedtuple
import datetime
import copy
from past.builtins import basestring
from pandas import DataFrame, to_datetime
from pandas.lib import Timestamp
import numpy as np
from pyculiarity.date_utils import get_gran
from pyculiarity.detect_anoms import detect_anoms
Direction = namedtuple('Direction', ['one_tail', 'upper_tail'])
def detect_ts(df, max_anoms=0.10, direction='pos', alpha=0.05, threshold=None, e_value=False, longterm=False,
piecewise_median_period_weeks=2, granularity='day', verbose=False, inplace=True):
"""
Anomaly Detection Using Seasonal Hybrid ESD Test
A technique for detecting anomalies in seasonal univariate time series where the input is a
series of <timestamp, value> pairs.
Args:
x: Time series as a two column data frame where the first column consists of the integer UTC Unix
timestamps and the second column consists of the observations.
max_anoms: Maximum number of anomalies that S-H-ESD will detect as a percentage of the
data.
direction: Directionality of the anomalies to be detected. Options are: ('pos' | 'neg' | 'both').
alpha: The level of statistical significance with which to accept or reject anomalies.
only_last: Find and report anomalies only within the last day or hr in the time series. Options: (None | 'day' | 'hr')
threshold: Only report positive going anoms above the threshold specified. Options are: (None | 'med_max' | 'p95' | 'p99')
e_value: Add an additional column to the anoms output containing the expected value.
longterm: Increase anom detection efficacy for time series that are greater than a month.
See Details below.
piecewise_median_period_weeks: The piecewise median time window as described in Vallis, Hochenbaum, and Kejariwal
(2014). Defaults to 2.
Details
'longterm' This option should be set when the input time series is longer than a month.
The option enables the approach described in Vallis, Hochenbaum, and Kejariwal (2014).
'threshold' Filter all negative anomalies and those anomalies whose magnitude is smaller
than one of the specified thresholds which include: the median
of the daily max values (med_max), the 95th percentile of the daily max values (p95), and the
99th percentile of the daily max values (p99).
The returned value is a dictionary with the following components:
anoms: Data frame containing timestamps, values, and optionally expected values.
plot: A graphical object if plotting was requested by the user. The plot contains
the estimated anomalies annotated on the input time series
"""
if not isinstance(df, DataFrame):
raise ValueError("data must be a single data frame.")
else:
if len(df.columns) != 2 or not df.iloc[:, 1].map(np.isreal).all():
raise ValueError('''data must be a 2 column data.frame, with the first column being a set of timestamps, and
the second coloumn being numeric values.''')
if not (df.dtypes[0].type is np.float64) and not (df.dtypes[0].type is np.int64):
raise ValueError('''The input timestamp column must be a float or integer of the unix timestamp, not date
time columns, date strings or pd.TimeStamp columns.''')
if not inplace:
df = copy.deepcopy(df)
# change the column names in place, rather than copying the entire dataset, but save the headers to replace them.
orig_header = df.columns.values
df.rename(columns={df.columns.values[0]: "timestamp", df.columns.values[1]: "value"}, inplace=True)
# Sanity check all input parameters
if max_anoms > 0.49:
length = len(df.value)
raise ValueError("max_anoms must be less than 50%% of the data points (max_anoms =%f data_points =%s)." % (round(max_anoms * length, 0), length))
if direction not in ['pos', 'neg', 'both']:
raise ValueError("direction options are: pos | neg | both.")
if not (0.01 <= alpha or alpha <= 0.1):
if verbose:
import warnings
warnings.warn("alpha is the statistical signifigance, and is usually between 0.01 and 0.1")
if threshold not in [None, 'med_max', 'p95', 'p99']:
raise ValueError("threshold options are: None | med_max | p95 | p99")
if not isinstance(e_value, bool):
raise ValueError("e_value must be a boolean")
if not isinstance(longterm, bool):
raise ValueError("longterm must be a boolean")
if piecewise_median_period_weeks < 2:
raise ValueError(
"piecewise_median_period_weeks must be at greater than 2 weeks")
# if the data is daily, then we need to bump the period to weekly to get multiple examples
gran = granularity
gran_period = {
'ms': 60000,
'sec': 3600,
'min': 1440,
'hr': 24,
'day': 7
}
period = gran_period.get(gran)
if not period:
raise ValueError('%s granularity detected. This is currently not supported.' % (gran, ))
# now convert the timestamp column into a proper timestamp
df['timestamp'] = df['timestamp'].map(lambda x: datetime.datetime.utcfromtimestamp(x))
num_obs = len(df.value)
clamp = (1 / float(num_obs))
if max_anoms < clamp:
max_anoms = clamp
if longterm:
if gran == "day":
num_obs_in_period = period * piecewise_median_period_weeks + 1
num_days_in_period = 7 * piecewise_median_period_weeks + 1
else:
num_obs_in_period = period * 7 * piecewise_median_period_weeks
num_days_in_period = 7 * piecewise_median_period_weeks
last_date = df.timestamp.iget(-1)
all_data = []
for j in range(0, len(df.timestamp), num_obs_in_period):
start_date = df.timestamp.iget(j)
end_date = min(start_date + datetime.timedelta(days=num_obs_in_period), df.timestamp.iget(-1))
# if there is at least 14 days left, subset it, otherwise subset last_date - 14days
if (end_date - start_date).days == num_days_in_period:
sub_df = df[(df.timestamp >= start_date) & (df.timestamp < end_date)]
else:
sub_df = df[(df.timestamp > (last_date - datetime.timedelta(days=num_days_in_period))) & (df.timestamp <= last_date)]
all_data.append(sub_df)
else:
all_data = [df]
all_anoms = DataFrame(columns=['timestamp', 'value'])
seasonal_plus_trend = DataFrame(columns=['timestamp', 'value'])
# Detect anomalies on all data (either entire data in one-pass, or in 2 week blocks if longterm=TRUE)
for i in range(len(all_data)):
directions = {
'pos': Direction(True, True),
'neg': Direction(True, False),
'both': Direction(False, True)
}
anomaly_direction = directions[direction]
# detect_anoms actually performs the anomaly detection and returns the result in a list containing the anomalies
# as well as the decomposed components of the time series for further analysis.
s_h_esd_timestamps = detect_anoms(all_data[i],
k=max_anoms,
alpha=alpha,
num_obs_per_period=period,
use_decomp=True,
one_tail=anomaly_direction.one_tail,
upper_tail=anomaly_direction.upper_tail,
verbose=verbose)
if s_h_esd_timestamps is None:
return {
'anoms': DataFrame(columns=["timestamp", "anoms"])
}
# store decomposed comps in local variable and overwrite s_h_esd_timestamps to contain only the anom timestamps
data_decomp = s_h_esd_timestamps['stl']
s_h_esd_timestamps = s_h_esd_timestamps['anoms']
# -- Step 3: Use detected anomaly timestamps to extract the actual anomalies (timestamp and value) from the data
if s_h_esd_timestamps:
anoms = all_data[i][all_data[i].timestamp.isin(s_h_esd_timestamps)]
else:
anoms = DataFrame(columns=['timestamp', 'value'])
# Filter the anomalies using one of the thresholding functions if applicable
if threshold:
# Calculate daily max values
periodic_maxes = df.groupby(df.timestamp.map(Timestamp.date)).aggregate(np.max).value
# Calculate the threshold set by the user
thresh = 0.5
if threshold == 'med_max':
thresh = periodic_maxes.median()
elif threshold == 'p95':
thresh = periodic_maxes.quantile(.95)
elif threshold == 'p99':
thresh = periodic_maxes.quantile(.99)
# Remove any anoms below the threshold
anoms = anoms[anoms.value >= thresh]
all_anoms = all_anoms.append(anoms)
seasonal_plus_trend = seasonal_plus_trend.append(data_decomp)
# Cleanup potential duplicates
try:
all_anoms.drop_duplicates(subset=['timestamp'])
seasonal_plus_trend.drop_duplicates(subset=['timestamp'])
except TypeError:
all_anoms.drop_duplicates(cols=['timestamp'])
seasonal_plus_trend.drop_duplicates(cols=['timestamp'])
# Calculate number of anomalies as a percentage
anom_pct = (len(df.value) / float(num_obs)) * 100
# name the columns back
df.rename(columns={"timestamp": orig_header[0], "value": orig_header[1]}, inplace=True)
if anom_pct == 0:
return {"anoms": None}
all_anoms.index = all_anoms.timestamp
if e_value:
d = {
'timestamp': all_anoms.timestamp,
'anoms': all_anoms.value,
'expected_value': seasonal_plus_trend[
seasonal_plus_trend.timestamp.isin(
all_anoms.timestamp)].value
}
else:
d = {
'timestamp': all_anoms.timestamp,
'anoms': all_anoms.value
}
anoms = DataFrame(d, index=d['timestamp'].index)
# convert timestamps back to unix time
anoms['timestamp'] = anoms['timestamp'].astype(np.int64)
anoms['timestamp'] = anoms['timestamp'].map(lambda x: x * 10e-10)
return {'anoms': anoms}
|
gpl-3.0
|
jjs0sbw/CSPLN
|
apps/scaffolding/mac/web2py/web2py.app/Contents/Resources/lib/python2.7/matplotlib/backends/backend_template.py
|
3
|
8679
|
"""
This is a fully functional do nothing backend to provide a template to
backend writers. It is fully functional in that you can select it as
a backend with
import matplotlib
matplotlib.use('Template')
and your matplotlib scripts will (should!) run without error, though
no output is produced. This provides a nice starting point for
backend writers because you can selectively implement methods
(draw_rectangle, draw_lines, etc...) and slowly see your figure come
to life w/o having to have a full blown implementation before getting
any results.
Copy this to backend_xxx.py and replace all instances of 'template'
with 'xxx'. Then implement the class methods and functions below, and
add 'xxx' to the switchyard in matplotlib/backends/__init__.py and
'xxx' to the backends list in the validate_backend methon in
matplotlib/__init__.py and you're off. You can use your backend with::
import matplotlib
matplotlib.use('xxx')
from pylab import *
plot([1,2,3])
show()
matplotlib also supports external backends, so you can place you can
use any module in your PYTHONPATH with the syntax::
import matplotlib
matplotlib.use('module://my_backend')
where my_backend.py is your module name. Thus syntax is also
recognized in the rc file and in the -d argument in pylab, eg::
python simple_plot.py -dmodule://my_backend
The files that are most relevant to backend_writers are
matplotlib/backends/backend_your_backend.py
matplotlib/backend_bases.py
matplotlib/backends/__init__.py
matplotlib/__init__.py
matplotlib/_pylab_helpers.py
Naming Conventions
* classes Upper or MixedUpperCase
* varables lower or lowerUpper
* functions lower or underscore_separated
"""
from __future__ import division
import matplotlib
from matplotlib._pylab_helpers import Gcf
from matplotlib.backend_bases import RendererBase, GraphicsContextBase,\
FigureManagerBase, FigureCanvasBase
from matplotlib.figure import Figure
from matplotlib.transforms import Bbox
class RendererTemplate(RendererBase):
"""
The renderer handles drawing/rendering operations.
This is a minimal do-nothing class that can be used to get started when
writing a new backend. Refer to backend_bases.RendererBase for
documentation of the classes methods.
"""
def __init__(self, dpi):
self.dpi = dpi
def draw_path(self, gc, path, transform, rgbFace=None):
pass
# draw_markers is optional, and we get more correct relative
# timings by leaving it out. backend implementers concerned with
# performance will probably want to implement it
# def draw_markers(self, gc, marker_path, marker_trans, path, trans, rgbFace=None):
# pass
# draw_path_collection is optional, and we get more correct
# relative timings by leaving it out. backend implementers concerned with
# performance will probably want to implement it
# def draw_path_collection(self, gc, master_transform, paths,
# all_transforms, offsets, offsetTrans, facecolors,
# edgecolors, linewidths, linestyles,
# antialiaseds):
# pass
# draw_quad_mesh is optional, and we get more correct
# relative timings by leaving it out. backend implementers concerned with
# performance will probably want to implement it
# def draw_quad_mesh(self, gc, master_transform, meshWidth, meshHeight,
# coordinates, offsets, offsetTrans, facecolors,
# antialiased, showedges):
# pass
def draw_image(self, gc, x, y, im):
pass
def draw_text(self, gc, x, y, s, prop, angle, ismath=False):
pass
def flipy(self):
return True
def get_canvas_width_height(self):
return 100, 100
def get_text_width_height_descent(self, s, prop, ismath):
return 1, 1, 1
def new_gc(self):
return GraphicsContextTemplate()
def points_to_pixels(self, points):
# if backend doesn't have dpi, eg, postscript or svg
return points
# elif backend assumes a value for pixels_per_inch
#return points/72.0 * self.dpi.get() * pixels_per_inch/72.0
# else
#return points/72.0 * self.dpi.get()
class GraphicsContextTemplate(GraphicsContextBase):
"""
The graphics context provides the color, line styles, etc... See the gtk
and postscript backends for examples of mapping the graphics context
attributes (cap styles, join styles, line widths, colors) to a particular
backend. In GTK this is done by wrapping a gtk.gdk.GC object and
forwarding the appropriate calls to it using a dictionary mapping styles
to gdk constants. In Postscript, all the work is done by the renderer,
mapping line styles to postscript calls.
If it's more appropriate to do the mapping at the renderer level (as in
the postscript backend), you don't need to override any of the GC methods.
If it's more appropriate to wrap an instance (as in the GTK backend) and
do the mapping here, you'll need to override several of the setter
methods.
The base GraphicsContext stores colors as a RGB tuple on the unit
interval, eg, (0.5, 0.0, 1.0). You may need to map this to colors
appropriate for your backend.
"""
pass
########################################################################
#
# The following functions and classes are for pylab and implement
# window/figure managers, etc...
#
########################################################################
def draw_if_interactive():
"""
For image backends - is not required
For GUI backends - this should be overriden if drawing should be done in
interactive python mode
"""
pass
def show():
"""
For image backends - is not required
For GUI backends - show() is usually the last line of a pylab script and
tells the backend that it is time to draw. In interactive mode, this may
be a do nothing func. See the GTK backend for an example of how to handle
interactive versus batch mode
"""
for manager in Gcf.get_all_fig_managers():
# do something to display the GUI
pass
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
# if a main-level app must be created, this is the usual place to
# do it -- see backend_wx, backend_wxagg and backend_tkagg for
# examples. Not all GUIs require explicit instantiation of a
# main-level app (egg backend_gtk, backend_gtkagg) for pylab
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
canvas = FigureCanvasTemplate(thisFig)
manager = FigureManagerTemplate(canvas, num)
return manager
class FigureCanvasTemplate(FigureCanvasBase):
"""
The canvas the figure renders into. Calls the draw and print fig
methods, creates the renderers, etc...
Public attribute
figure - A Figure instance
Note GUI templates will want to connect events for button presses,
mouse movements and key presses to functions that call the base
class methods button_press_event, button_release_event,
motion_notify_event, key_press_event, and key_release_event. See,
eg backend_gtk.py, backend_wx.py and backend_tkagg.py
"""
def draw(self):
"""
Draw the figure using the renderer
"""
renderer = RendererTemplate(self.figure.dpi)
self.figure.draw(renderer)
# You should provide a print_xxx function for every file format
# you can write.
# If the file type is not in the base set of filetypes,
# you should add it to the class-scope filetypes dictionary as follows:
filetypes = FigureCanvasBase.filetypes.copy()
filetypes['foo'] = 'My magic Foo format'
def print_foo(self, filename, *args, **kwargs):
"""
Write out format foo. The dpi, facecolor and edgecolor are restored
to their original values after this call, so you don't need to
save and restore them.
"""
pass
def get_default_filetype(self):
return 'foo'
class FigureManagerTemplate(FigureManagerBase):
"""
Wrap everything up into a window for the pylab interface
For non interactive backends, the base class does all the work
"""
pass
########################################################################
#
# Now just provide the standard names that backend.__init__ is expecting
#
########################################################################
FigureManager = FigureManagerTemplate
|
gpl-3.0
|
robin-lai/scikit-learn
|
benchmarks/bench_plot_omp_lars.py
|
266
|
4447
|
"""Benchmarks of orthogonal matching pursuit (:ref:`OMP`) versus least angle
regression (:ref:`least_angle_regression`)
The input data is mostly low rank but is a fat infinite tail.
"""
from __future__ import print_function
import gc
import sys
from time import time
import numpy as np
from sklearn.linear_model import lars_path, orthogonal_mp
from sklearn.datasets.samples_generator import make_sparse_coded_signal
def compute_bench(samples_range, features_range):
it = 0
results = dict()
lars = np.empty((len(features_range), len(samples_range)))
lars_gram = lars.copy()
omp = lars.copy()
omp_gram = lars.copy()
max_it = len(samples_range) * len(features_range)
for i_s, n_samples in enumerate(samples_range):
for i_f, n_features in enumerate(features_range):
it += 1
n_informative = n_features / 10
print('====================')
print('Iteration %03d of %03d' % (it, max_it))
print('====================')
# dataset_kwargs = {
# 'n_train_samples': n_samples,
# 'n_test_samples': 2,
# 'n_features': n_features,
# 'n_informative': n_informative,
# 'effective_rank': min(n_samples, n_features) / 10,
# #'effective_rank': None,
# 'bias': 0.0,
# }
dataset_kwargs = {
'n_samples': 1,
'n_components': n_features,
'n_features': n_samples,
'n_nonzero_coefs': n_informative,
'random_state': 0
}
print("n_samples: %d" % n_samples)
print("n_features: %d" % n_features)
y, X, _ = make_sparse_coded_signal(**dataset_kwargs)
X = np.asfortranarray(X)
gc.collect()
print("benchmarking lars_path (with Gram):", end='')
sys.stdout.flush()
tstart = time()
G = np.dot(X.T, X) # precomputed Gram matrix
Xy = np.dot(X.T, y)
lars_path(X, y, Xy=Xy, Gram=G, max_iter=n_informative)
delta = time() - tstart
print("%0.3fs" % delta)
lars_gram[i_f, i_s] = delta
gc.collect()
print("benchmarking lars_path (without Gram):", end='')
sys.stdout.flush()
tstart = time()
lars_path(X, y, Gram=None, max_iter=n_informative)
delta = time() - tstart
print("%0.3fs" % delta)
lars[i_f, i_s] = delta
gc.collect()
print("benchmarking orthogonal_mp (with Gram):", end='')
sys.stdout.flush()
tstart = time()
orthogonal_mp(X, y, precompute=True,
n_nonzero_coefs=n_informative)
delta = time() - tstart
print("%0.3fs" % delta)
omp_gram[i_f, i_s] = delta
gc.collect()
print("benchmarking orthogonal_mp (without Gram):", end='')
sys.stdout.flush()
tstart = time()
orthogonal_mp(X, y, precompute=False,
n_nonzero_coefs=n_informative)
delta = time() - tstart
print("%0.3fs" % delta)
omp[i_f, i_s] = delta
results['time(LARS) / time(OMP)\n (w/ Gram)'] = (lars_gram / omp_gram)
results['time(LARS) / time(OMP)\n (w/o Gram)'] = (lars / omp)
return results
if __name__ == '__main__':
samples_range = np.linspace(1000, 5000, 5).astype(np.int)
features_range = np.linspace(1000, 5000, 5).astype(np.int)
results = compute_bench(samples_range, features_range)
max_time = max(np.max(t) for t in results.values())
import pylab as pl
fig = pl.figure('scikit-learn OMP vs. LARS benchmark results')
for i, (label, timings) in enumerate(sorted(results.iteritems())):
ax = fig.add_subplot(1, 2, i)
vmax = max(1 - timings.min(), -1 + timings.max())
pl.matshow(timings, fignum=False, vmin=1 - vmax, vmax=1 + vmax)
ax.set_xticklabels([''] + map(str, samples_range))
ax.set_yticklabels([''] + map(str, features_range))
pl.xlabel('n_samples')
pl.ylabel('n_features')
pl.title(label)
pl.subplots_adjust(0.1, 0.08, 0.96, 0.98, 0.4, 0.63)
ax = pl.axes([0.1, 0.08, 0.8, 0.06])
pl.colorbar(cax=ax, orientation='horizontal')
pl.show()
|
bsd-3-clause
|
dchabot/bluesky
|
bluesky/broker_callbacks.py
|
1
|
2716
|
from dataportal import DataBroker as db, get_events
import filestore
import filestore.api as fsapi
from metadatastore.commands import run_start_given_uid, descriptors_by_start
import matplotlib.pyplot as plt
from xray_vision.backend.mpl.cross_section_2d import CrossSection
from .callbacks import CallbackBase
class LiveImage(CallbackBase):
"""
Stream 2D images in a cross-section viewer.
Parameters
----------
field : string
name of data field in an Event
Note
----
Requires a matplotlib fix that is not released as of this writing. The
relevant commit is a951b7.
"""
def __init__(self, field):
super().__init__()
self.field = field
fig = plt.figure()
self.cs = CrossSection(fig)
self.cs._fig.show()
def event(self, doc):
uid = doc['data'][self.field]
data = fsapi.retrieve(uid)
self.cs.update_image(data)
self.cs._fig.canvas.draw()
self.cs._fig.canvas.flush_events()
def post_run(callback):
"""
Trigger a callback to process all the Documents from a run at the end.
This function does not receive the Document stream during collection.
It retrieves the complete set of Documents from the DataBroker after
collection is complete.
Parameters
----------
callback : callable
a function that accepts all four Documents
Returns
-------
func : function
a function that acepts a RunStop Document
Examples
--------
Print a table with full (lossless) result set at the end of a run.
>>> s = Ascan(motor, [det1], [1,2,3])
>>> table = LiveTable(['det1', 'motor'])
>>> RE(s, {'stop': post_run(table)})
+------------+-------------------+----------------+----------------+
| seq_num | time | det1 | motor |
+------------+-------------------+----------------+----------------+
| 3 | 14:02:32.218348 | 5.00 | 3.00 |
| 2 | 14:02:32.158503 | 5.00 | 2.00 |
| 1 | 14:02:32.099807 | 5.00 | 1.00 |
+------------+-------------------+----------------+----------------+
"""
def f(name, stop_doc):
uid = stop_doc['run_start']
start = run_start_given_uid(uid)
descriptors = descriptors_by_start(uid)
# For convenience, I'll rely on the broker to get Events.
header = db[uid]
events = get_events(header)
callback.start(start)
for d in descriptors:
callback.descriptor(d)
for e in events:
callback.event(e)
callback.stop(stop_doc)
return f
|
bsd-3-clause
|
equialgo/scikit-learn
|
sklearn/tests/test_grid_search.py
|
27
|
29492
|
"""
Testing for grid search module (sklearn.grid_search)
"""
from collections import Iterable, Sized
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.externals.six.moves import xrange
from itertools import chain, product
import pickle
import warnings
import sys
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
from scipy.stats import bernoulli, expon, uniform
from sklearn.externals.six.moves import zip
from sklearn.base import BaseEstimator
from sklearn.datasets import make_classification
from sklearn.datasets import make_blobs
from sklearn.datasets import make_multilabel_classification
from sklearn.svm import LinearSVC, SVC
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import DecisionTreeClassifier
from sklearn.cluster import KMeans
from sklearn.neighbors import KernelDensity
from sklearn.metrics import f1_score
from sklearn.metrics import make_scorer
from sklearn.metrics import roc_auc_score
from sklearn.linear_model import Ridge
from sklearn.exceptions import ChangedBehaviorWarning
from sklearn.exceptions import FitFailedWarning
with warnings.catch_warnings():
warnings.simplefilter('ignore')
from sklearn.grid_search import (GridSearchCV, RandomizedSearchCV,
ParameterGrid, ParameterSampler)
from sklearn.cross_validation import KFold, StratifiedKFold
from sklearn.preprocessing import Imputer
from sklearn.pipeline import Pipeline
# Neither of the following two estimators inherit from BaseEstimator,
# to test hyperparameter search on user-defined classifiers.
class MockClassifier(object):
"""Dummy classifier to test the cross-validation"""
def __init__(self, foo_param=0):
self.foo_param = foo_param
def fit(self, X, Y):
assert_true(len(X) == len(Y))
return self
def predict(self, T):
return T.shape[0]
predict_proba = predict
decision_function = predict
transform = predict
def score(self, X=None, Y=None):
if self.foo_param > 1:
score = 1.
else:
score = 0.
return score
def get_params(self, deep=False):
return {'foo_param': self.foo_param}
def set_params(self, **params):
self.foo_param = params['foo_param']
return self
class LinearSVCNoScore(LinearSVC):
"""An LinearSVC classifier that has no score method."""
@property
def score(self):
raise AttributeError
X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
y = np.array([1, 1, 2, 2])
def assert_grid_iter_equals_getitem(grid):
assert_equal(list(grid), [grid[i] for i in range(len(grid))])
def test_parameter_grid():
# Test basic properties of ParameterGrid.
params1 = {"foo": [1, 2, 3]}
grid1 = ParameterGrid(params1)
assert_true(isinstance(grid1, Iterable))
assert_true(isinstance(grid1, Sized))
assert_equal(len(grid1), 3)
assert_grid_iter_equals_getitem(grid1)
params2 = {"foo": [4, 2],
"bar": ["ham", "spam", "eggs"]}
grid2 = ParameterGrid(params2)
assert_equal(len(grid2), 6)
# loop to assert we can iterate over the grid multiple times
for i in xrange(2):
# tuple + chain transforms {"a": 1, "b": 2} to ("a", 1, "b", 2)
points = set(tuple(chain(*(sorted(p.items())))) for p in grid2)
assert_equal(points,
set(("bar", x, "foo", y)
for x, y in product(params2["bar"], params2["foo"])))
assert_grid_iter_equals_getitem(grid2)
# Special case: empty grid (useful to get default estimator settings)
empty = ParameterGrid({})
assert_equal(len(empty), 1)
assert_equal(list(empty), [{}])
assert_grid_iter_equals_getitem(empty)
assert_raises(IndexError, lambda: empty[1])
has_empty = ParameterGrid([{'C': [1, 10]}, {}, {'C': [.5]}])
assert_equal(len(has_empty), 4)
assert_equal(list(has_empty), [{'C': 1}, {'C': 10}, {}, {'C': .5}])
assert_grid_iter_equals_getitem(has_empty)
def test_grid_search():
# Test that the best estimator contains the right value for foo_param
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, verbose=3)
# make sure it selects the smallest parameter in case of ties
old_stdout = sys.stdout
sys.stdout = StringIO()
grid_search.fit(X, y)
sys.stdout = old_stdout
assert_equal(grid_search.best_estimator_.foo_param, 2)
for i, foo_i in enumerate([1, 2, 3]):
assert_true(grid_search.grid_scores_[i][0]
== {'foo_param': foo_i})
# Smoke test the score etc:
grid_search.score(X, y)
grid_search.predict_proba(X)
grid_search.decision_function(X)
grid_search.transform(X)
# Test exception handling on scoring
grid_search.scoring = 'sklearn'
assert_raises(ValueError, grid_search.fit, X, y)
@ignore_warnings
def test_grid_search_no_score():
# Test grid-search on classifier that has no score function.
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
clf_no_score = LinearSVCNoScore(random_state=0)
grid_search = GridSearchCV(clf, {'C': Cs}, scoring='accuracy')
grid_search.fit(X, y)
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs},
scoring='accuracy')
# smoketest grid search
grid_search_no_score.fit(X, y)
# check that best params are equal
assert_equal(grid_search_no_score.best_params_, grid_search.best_params_)
# check that we can call score and that it gives the correct result
assert_equal(grid_search.score(X, y), grid_search_no_score.score(X, y))
# giving no scoring function raises an error
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs})
assert_raise_message(TypeError, "no scoring", grid_search_no_score.fit,
[[1]])
def test_grid_search_score_method():
X, y = make_classification(n_samples=100, n_classes=2, flip_y=.2,
random_state=0)
clf = LinearSVC(random_state=0)
grid = {'C': [.1]}
search_no_scoring = GridSearchCV(clf, grid, scoring=None).fit(X, y)
search_accuracy = GridSearchCV(clf, grid, scoring='accuracy').fit(X, y)
search_no_score_method_auc = GridSearchCV(LinearSVCNoScore(), grid,
scoring='roc_auc').fit(X, y)
search_auc = GridSearchCV(clf, grid, scoring='roc_auc').fit(X, y)
# Check warning only occurs in situation where behavior changed:
# estimator requires score method to compete with scoring parameter
score_no_scoring = assert_no_warnings(search_no_scoring.score, X, y)
score_accuracy = assert_warns(ChangedBehaviorWarning,
search_accuracy.score, X, y)
score_no_score_auc = assert_no_warnings(search_no_score_method_auc.score,
X, y)
score_auc = assert_warns(ChangedBehaviorWarning,
search_auc.score, X, y)
# ensure the test is sane
assert_true(score_auc < 1.0)
assert_true(score_accuracy < 1.0)
assert_not_equal(score_auc, score_accuracy)
assert_almost_equal(score_accuracy, score_no_scoring)
assert_almost_equal(score_auc, score_no_score_auc)
def test_trivial_grid_scores():
# Test search over a "grid" with only one point.
# Non-regression test: grid_scores_ wouldn't be set by GridSearchCV.
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1]})
grid_search.fit(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
random_search = RandomizedSearchCV(clf, {'foo_param': [0]}, n_iter=1)
random_search.fit(X, y)
assert_true(hasattr(random_search, "grid_scores_"))
def test_no_refit():
# Test that grid search can be used for model selection only
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=False)
grid_search.fit(X, y)
assert_true(hasattr(grid_search, "best_params_"))
def test_grid_search_error():
# Test that grid search will capture errors on data with different
# length
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, X_[:180], y_)
def test_grid_search_iid():
# test the iid parameter
# noise-free simple 2d-data
X, y = make_blobs(centers=[[0, 0], [1, 0], [0, 1], [1, 1]], random_state=0,
cluster_std=0.1, shuffle=False, n_samples=80)
# split dataset into two folds that are not iid
# first one contains data of all 4 blobs, second only from two.
mask = np.ones(X.shape[0], dtype=np.bool)
mask[np.where(y == 1)[0][::2]] = 0
mask[np.where(y == 2)[0][::2]] = 0
# this leads to perfect classification on one fold and a score of 1/3 on
# the other
svm = SVC(kernel='linear')
# create "cv" for splits
cv = [[mask, ~mask], [~mask, mask]]
# once with iid=True (default)
grid_search = GridSearchCV(svm, param_grid={'C': [1, 10]}, cv=cv)
grid_search.fit(X, y)
first = grid_search.grid_scores_[0]
assert_equal(first.parameters['C'], 1)
assert_array_almost_equal(first.cv_validation_scores, [1, 1. / 3.])
# for first split, 1/4 of dataset is in test, for second 3/4.
# take weighted average
assert_almost_equal(first.mean_validation_score,
1 * 1. / 4. + 1. / 3. * 3. / 4.)
# once with iid=False
grid_search = GridSearchCV(svm, param_grid={'C': [1, 10]}, cv=cv,
iid=False)
grid_search.fit(X, y)
first = grid_search.grid_scores_[0]
assert_equal(first.parameters['C'], 1)
# scores are the same as above
assert_array_almost_equal(first.cv_validation_scores, [1, 1. / 3.])
# averaged score is just mean of scores
assert_almost_equal(first.mean_validation_score,
np.mean(first.cv_validation_scores))
def test_grid_search_one_grid_point():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
param_dict = {"C": [1.0], "kernel": ["rbf"], "gamma": [0.1]}
clf = SVC()
cv = GridSearchCV(clf, param_dict)
cv.fit(X_, y_)
clf = SVC(C=1.0, kernel="rbf", gamma=0.1)
clf.fit(X_, y_)
assert_array_equal(clf.dual_coef_, cv.best_estimator_.dual_coef_)
def test_grid_search_bad_param_grid():
param_dict = {"C": 1.0}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
param_dict = {"C": []}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
param_dict = {"C": np.ones(6).reshape(3, 2)}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
def test_grid_search_sparse():
# Test that grid search works with both dense and sparse matrices
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180].tocoo(), y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_true(np.mean(y_pred == y_pred2) >= .9)
assert_equal(C, C2)
def test_grid_search_sparse_scoring():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_array_equal(y_pred, y_pred2)
assert_equal(C, C2)
# Smoke test the score
# np.testing.assert_allclose(f1_score(cv.predict(X_[:180]), y[:180]),
# cv.score(X_[:180], y[:180]))
# test loss where greater is worse
def f1_loss(y_true_, y_pred_):
return -f1_score(y_true_, y_pred_)
F1Loss = make_scorer(f1_loss, greater_is_better=False)
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring=F1Loss)
cv.fit(X_[:180], y_[:180])
y_pred3 = cv.predict(X_[180:])
C3 = cv.best_estimator_.C
assert_equal(C, C3)
assert_array_equal(y_pred, y_pred3)
def test_grid_search_precomputed_kernel():
# Test that grid search works when the input features are given in the
# form of a precomputed kernel matrix
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
# compute the training kernel matrix corresponding to the linear kernel
K_train = np.dot(X_[:180], X_[:180].T)
y_train = y_[:180]
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(K_train, y_train)
assert_true(cv.best_score_ >= 0)
# compute the test kernel matrix
K_test = np.dot(X_[180:], X_[:180].T)
y_test = y_[180:]
y_pred = cv.predict(K_test)
assert_true(np.mean(y_pred == y_test) >= 0)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cv.fit, K_train.tolist(), y_train)
def test_grid_search_precomputed_kernel_error_nonsquare():
# Test that grid search returns an error with a non-square precomputed
# training kernel matrix
K_train = np.zeros((10, 20))
y_train = np.ones((10, ))
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, K_train, y_train)
def test_grid_search_precomputed_kernel_error_kernel_function():
# Test that grid search returns an error when using a kernel_function
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
kernel_function = lambda x1, x2: np.dot(x1, x2.T)
clf = SVC(kernel=kernel_function)
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, X_, y_)
class BrokenClassifier(BaseEstimator):
"""Broken classifier that cannot be fit twice"""
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y):
assert_true(not hasattr(self, 'has_been_fit_'))
self.has_been_fit_ = True
def predict(self, X):
return np.zeros(X.shape[0])
@ignore_warnings
def test_refit():
# Regression test for bug in refitting
# Simulates re-fitting a broken estimator; this used to break with
# sparse SVMs.
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = GridSearchCV(BrokenClassifier(), [{'parameter': [0, 1]}],
scoring="precision", refit=True)
clf.fit(X, y)
def test_gridsearch_nd():
# Pass X as list in GridSearchCV
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
check_X = lambda x: x.shape[1:] == (5, 3, 2)
check_y = lambda x: x.shape[1:] == (7, 11)
clf = CheckingClassifier(check_X=check_X, check_y=check_y)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_4d, y_3d).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_X_as_list():
# Pass X as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(check_X=lambda x: isinstance(x, list))
cv = KFold(n=len(X), n_folds=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X.tolist(), y).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_y_as_list():
# Pass y as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(check_y=lambda x: isinstance(x, list))
cv = KFold(n=len(X), n_folds=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X, y.tolist()).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_pandas_input():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((DataFrame, Series))
except ImportError:
pass
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
for InputFeatureType, TargetType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_df, y_ser).score(X_df, y_ser)
grid_search.predict(X_df)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_unsupervised_grid_search():
# test grid-search with unsupervised estimator
X, y = make_blobs(random_state=0)
km = KMeans(random_state=0)
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]),
scoring='adjusted_rand_score')
grid_search.fit(X, y)
# ARI can find the right number :)
assert_equal(grid_search.best_params_["n_clusters"], 3)
# Now without a score, and without y
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]))
grid_search.fit(X)
assert_equal(grid_search.best_params_["n_clusters"], 4)
def test_gridsearch_no_predict():
# test grid-search with an estimator without predict.
# slight duplication of a test from KDE
def custom_scoring(estimator, X):
return 42 if estimator.bandwidth == .1 else 0
X, _ = make_blobs(cluster_std=.1, random_state=1,
centers=[[0, 1], [1, 0], [0, 0]])
search = GridSearchCV(KernelDensity(),
param_grid=dict(bandwidth=[.01, .1, 1]),
scoring=custom_scoring)
search.fit(X)
assert_equal(search.best_params_['bandwidth'], .1)
assert_equal(search.best_score_, 42)
def test_param_sampler():
# test basic properties of param sampler
param_distributions = {"kernel": ["rbf", "linear"],
"C": uniform(0, 1)}
sampler = ParameterSampler(param_distributions=param_distributions,
n_iter=10, random_state=0)
samples = [x for x in sampler]
assert_equal(len(samples), 10)
for sample in samples:
assert_true(sample["kernel"] in ["rbf", "linear"])
assert_true(0 <= sample["C"] <= 1)
def test_randomized_search_grid_scores():
# Make a dataset with a lot of noise to get various kind of prediction
# errors across CV folds and parameter settings
X, y = make_classification(n_samples=200, n_features=100, n_informative=3,
random_state=0)
# XXX: as of today (scipy 0.12) it's not possible to set the random seed
# of scipy.stats distributions: the assertions in this test should thus
# not depend on the randomization
params = dict(C=expon(scale=10),
gamma=expon(scale=0.1))
n_cv_iter = 3
n_search_iter = 30
search = RandomizedSearchCV(SVC(), n_iter=n_search_iter, cv=n_cv_iter,
param_distributions=params, iid=False)
search.fit(X, y)
assert_equal(len(search.grid_scores_), n_search_iter)
# Check consistency of the structure of each cv_score item
for cv_score in search.grid_scores_:
assert_equal(len(cv_score.cv_validation_scores), n_cv_iter)
# Because we set iid to False, the mean_validation score is the
# mean of the fold mean scores instead of the aggregate sample-wise
# mean score
assert_almost_equal(np.mean(cv_score.cv_validation_scores),
cv_score.mean_validation_score)
assert_equal(list(sorted(cv_score.parameters.keys())),
list(sorted(params.keys())))
# Check the consistency with the best_score_ and best_params_ attributes
sorted_grid_scores = list(sorted(search.grid_scores_,
key=lambda x: x.mean_validation_score))
best_score = sorted_grid_scores[-1].mean_validation_score
assert_equal(search.best_score_, best_score)
tied_best_params = [s.parameters for s in sorted_grid_scores
if s.mean_validation_score == best_score]
assert_true(search.best_params_ in tied_best_params,
"best_params_={0} is not part of the"
" tied best models: {1}".format(
search.best_params_, tied_best_params))
def test_grid_search_score_consistency():
# test that correct scores are used
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
for score in ['f1', 'roc_auc']:
grid_search = GridSearchCV(clf, {'C': Cs}, scoring=score)
grid_search.fit(X, y)
cv = StratifiedKFold(n_folds=3, y=y)
for C, scores in zip(Cs, grid_search.grid_scores_):
clf.set_params(C=C)
scores = scores[2] # get the separate runs from grid scores
i = 0
for train, test in cv:
clf.fit(X[train], y[train])
if score == "f1":
correct_score = f1_score(y[test], clf.predict(X[test]))
elif score == "roc_auc":
dec = clf.decision_function(X[test])
correct_score = roc_auc_score(y[test], dec)
assert_almost_equal(correct_score, scores[i])
i += 1
def test_pickle():
# Test that a fit search can be pickled
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=True)
grid_search.fit(X, y)
pickle.dumps(grid_search) # smoke test
random_search = RandomizedSearchCV(clf, {'foo_param': [1, 2, 3]},
refit=True, n_iter=3)
random_search.fit(X, y)
pickle.dumps(random_search) # smoke test
def test_grid_search_with_multioutput_data():
# Test search with multi-output estimator
X, y = make_multilabel_classification(random_state=0)
est_parameters = {"max_depth": [1, 2, 3, 4]}
cv = KFold(y.shape[0], random_state=0)
estimators = [DecisionTreeRegressor(random_state=0),
DecisionTreeClassifier(random_state=0)]
# Test with grid search cv
for est in estimators:
grid_search = GridSearchCV(est, est_parameters, cv=cv)
grid_search.fit(X, y)
for parameters, _, cv_validation_scores in grid_search.grid_scores_:
est.set_params(**parameters)
for i, (train, test) in enumerate(cv):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(correct_score,
cv_validation_scores[i])
# Test with a randomized search
for est in estimators:
random_search = RandomizedSearchCV(est, est_parameters,
cv=cv, n_iter=3)
random_search.fit(X, y)
for parameters, _, cv_validation_scores in random_search.grid_scores_:
est.set_params(**parameters)
for i, (train, test) in enumerate(cv):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(correct_score,
cv_validation_scores[i])
def test_predict_proba_disabled():
# Test predict_proba when disabled on estimator.
X = np.arange(20).reshape(5, -1)
y = [0, 0, 1, 1, 1]
clf = SVC(probability=False)
gs = GridSearchCV(clf, {}, cv=2).fit(X, y)
assert_false(hasattr(gs, "predict_proba"))
def test_grid_search_allows_nans():
# Test GridSearchCV with Imputer
X = np.arange(20, dtype=np.float64).reshape(5, -1)
X[2, :] = np.nan
y = [0, 0, 1, 1, 1]
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
GridSearchCV(p, {'classifier__foo_param': [1, 2, 3]}, cv=2).fit(X, y)
class FailingClassifier(BaseEstimator):
"""Classifier that raises a ValueError on fit()"""
FAILING_PARAMETER = 2
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y=None):
if self.parameter == FailingClassifier.FAILING_PARAMETER:
raise ValueError("Failing classifier failed as required")
def predict(self, X):
return np.zeros(X.shape[0])
def test_grid_search_failing_classifier():
# GridSearchCV with on_error != 'raise'
# Ensures that a warning is raised and score reset where appropriate.
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we only want to check that errors caused by fits
# to individual folds will be caught and warnings raised instead. If
# refit was done, then an exception would be raised on refit and not
# caught by grid_search (expected behavior), and this would cause an
# error in this test.
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=0.0)
assert_warns(FitFailedWarning, gs.fit, X, y)
# Ensure that grid scores were set to zero as required for those fits
# that are expected to fail.
assert all(np.all(this_point.cv_validation_scores == 0.0)
for this_point in gs.grid_scores_
if this_point.parameters['parameter'] ==
FailingClassifier.FAILING_PARAMETER)
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=float('nan'))
assert_warns(FitFailedWarning, gs.fit, X, y)
assert all(np.all(np.isnan(this_point.cv_validation_scores))
for this_point in gs.grid_scores_
if this_point.parameters['parameter'] ==
FailingClassifier.FAILING_PARAMETER)
def test_grid_search_failing_classifier_raise():
# GridSearchCV with on_error == 'raise' raises the error
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we want to test the behaviour of the grid search part
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score='raise')
# FailingClassifier issues a ValueError so this is what we look for.
assert_raises(ValueError, gs.fit, X, y)
def test_parameters_sampler_replacement():
# raise error if n_iter too large
params = {'first': [0, 1], 'second': ['a', 'b', 'c']}
sampler = ParameterSampler(params, n_iter=7)
assert_raises(ValueError, list, sampler)
# degenerates to GridSearchCV if n_iter the same as grid_size
sampler = ParameterSampler(params, n_iter=6)
samples = list(sampler)
assert_equal(len(samples), 6)
for values in ParameterGrid(params):
assert_true(values in samples)
# test sampling without replacement in a large grid
params = {'a': range(10), 'b': range(10), 'c': range(10)}
sampler = ParameterSampler(params, n_iter=99, random_state=42)
samples = list(sampler)
assert_equal(len(samples), 99)
hashable_samples = ["a%db%dc%d" % (p['a'], p['b'], p['c'])
for p in samples]
assert_equal(len(set(hashable_samples)), 99)
# doesn't go into infinite loops
params_distribution = {'first': bernoulli(.5), 'second': ['a', 'b', 'c']}
sampler = ParameterSampler(params_distribution, n_iter=7)
samples = list(sampler)
assert_equal(len(samples), 7)
def test_classes__property():
# Test that classes_ property matches best_esimator_.classes_
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
Cs = [.1, 1, 10]
grid_search = GridSearchCV(LinearSVC(random_state=0), {'C': Cs})
grid_search.fit(X, y)
assert_array_equal(grid_search.best_estimator_.classes_,
grid_search.classes_)
# Test that regressors do not have a classes_ attribute
grid_search = GridSearchCV(Ridge(), {'alpha': [1.0, 2.0]})
grid_search.fit(X, y)
assert_false(hasattr(grid_search, 'classes_'))
|
bsd-3-clause
|
yandex/rep
|
tests/test_factory_reg.py
|
1
|
3000
|
from __future__ import division, print_function, absolute_import
from sklearn.ensemble import RandomForestRegressor, AdaBoostRegressor
from sklearn.metrics import mean_squared_error
import numpy
from rep.data import LabeledDataStorage
from rep.metaml import RegressorsFactory
from six.moves import cPickle
from rep.report import RegressionReport
from rep.test.test_estimators import generate_classification_data
__author__ = 'Tatiana Likhomanenko'
def test_factory():
factory = RegressorsFactory()
try:
from rep.estimators.tmva import TMVARegressor
factory.add_regressor('tmva', TMVARegressor(factory_options="Silent=True:V=False:DrawProgressBar=False"))
except ImportError:
pass
factory.add_regressor('rf', RandomForestRegressor(n_estimators=10))
factory.add_regressor('ada', AdaBoostRegressor(n_estimators=20))
X, y, sample_weight = generate_classification_data()
assert factory == factory.fit(X, y, sample_weight=sample_weight, features=list(X.columns))
values = factory.predict(X)
for cl in factory.values():
assert list(cl.features) == list(X.columns)
for key, val in values.items():
score = mean_squared_error(y, val)
print(score)
assert score < 0.2
for key, iterator in factory.staged_predict(X).items():
assert key != 'tmva', 'tmva does not support staged pp'
for p in iterator:
assert p.shape == (len(X),)
# checking that last iteration coincides with previous
assert numpy.all(p == values[key])
# testing picklability
dump_string = cPickle.dumps(factory)
clf_loaded = cPickle.loads(dump_string)
assert type(factory) == type(clf_loaded)
probs1 = factory.predict(X)
probs2 = clf_loaded.predict(X)
for key, val in probs1.items():
assert numpy.all(val == probs2[key]), 'something strange was loaded'
report = RegressionReport({'rf': factory['rf']}, LabeledDataStorage(X, y, sample_weight))
report.feature_importance_shuffling(mean_squared_mod).plot(new_plot=True, figsize=(18, 3))
report = factory.test_on_lds(LabeledDataStorage(X, y, sample_weight))
report = factory.test_on(X, y, sample_weight=sample_weight)
report.feature_importance()
report.features_correlation_matrix()
report.predictions_scatter()
val = numpy.mean(X['column0'])
report_mask(report, "column0 > %f" % val, X)
report_mask(report, lambda x: numpy.array(x['column0']) < val, X)
report_mask(report, None, X)
def mean_squared_mod(y_true, values, sample_weight=None):
return mean_squared_error(y_true, values, sample_weight=sample_weight)
def report_mask(report, mask, X):
report.features_correlation_matrix(mask=mask).plot()
report.feature_importance().plot()
report.scatter([(X.columns[0], X.columns[2])], mask=mask).plot()
report.predictions_scatter([X.columns[0], X.columns[2]], mask=mask).plot()
report.learning_curve(mean_squared_error, mask=mask).plot()
|
apache-2.0
|
cavestruz/L500analysis
|
plotting/profiles/K_evolution/plot_K_r200m.py
|
1
|
2587
|
from L500analysis.data_io.get_cluster_data import GetClusterData
from L500analysis.utils.utils import aexp2redshift
from L500analysis.plotting.tools.figure_formatting import *
from L500analysis.plotting.profiles.tools.profiles_percentile \
import *
from L500analysis.utils.constants import rbins
from derived_field_functions import *
color = matplotlib.cm.afmhot_r
matplotlib.rcParams['legend.handlelength'] = 0
matplotlib.rcParams['legend.numpoints'] = 1
matplotlib.rcParams['legend.fontsize'] = 12
aexps = [1.0,0.9,0.8,0.7,0.6,0.5,0.45,0.4,0.35]
db_name = 'L500_NR_0'
db_dir = '/home/babyostrich/Documents/Repos/L500analysis/'
profiles_list = ['S_mw', 'r_mid',
'S_mw/S200m',
'R/R200m']
halo_properties_list=['r200m','M_total_200m','nu_200m']
Kratio=r"$\tilde{K}=K(R)/K_{200m}$"
fKz1=r"$\tilde{K}/\tilde{K}(z=1)$"
pa = PlotAxes(figname='Kmw_r200m',
axes=[[0.15,0.4,0.80,0.55],[0.15,0.15,0.80,0.24]],
axes_labels=[Kratio,fKz1],
ylog=[True,False],
xlabel=r"$R/R_{200m}$",
xlim=(0.2,2),
ylims=[(0.1,11),(0.6,1.4)])
Tmw={}
Tplots = [Tmw]
clkeys = ['S_mw/S200m']
linestyles = ['-']
for aexp in aexps :
cldata = GetClusterData(aexp=aexp,db_name=db_name,
db_dir=db_dir,
profiles_list=profiles_list,
halo_properties_list=halo_properties_list)
for Tplot, key in zip(Tplots,clkeys) :
Tplot[aexp] = calculate_profiles_mean_variance(cldata[key])
pa.axes[Kratio].plot( rbins, Tmw[aexp]['mean'],color=color(aexp),ls='-',
label="$z=%3.1f$" % aexp2redshift(aexp))
# pa.axes[Kratio].fill_between(rbins, Tmw[0.5]['down'], Tmw[0.5]['up'],
# color=color(0.5), zorder=0)
for aexp in aexps :
for T,ls in zip(Tplots,linestyles) :
fractional_evolution = get_profiles_division_mean_variance(
mean_profile1=T[aexp]['mean'],
var_profile1=T[aexp]['var'],
mean_profile2=T[0.5]['mean'],
var_profile2=T[0.5]['var'],
)
pa.axes[fKz1].plot( rbins, fractional_evolution['mean'],
color=color(aexp),ls=ls)
pa.axes[Kratio].tick_params(labelsize=12)
pa.axes[Kratio].tick_params(labelsize=12)
pa.axes[fKz1].set_yticks(arange(0.6,1.4,0.2))
pa.set_legend(axes_label=Kratio,ncol=3,loc='best', frameon=False)
pa.color_legend_texts(axes_label=Kratio)
pa.savefig()
|
mit
|
atgreen/RTEMS
|
testsuites/tmtests/tmcontext01/plot.py
|
14
|
1341
|
#
# Copyright (c) 2014 embedded brains GmbH. All rights reserved.
#
# The license and distribution terms for this file may be
# found in the file LICENSE in this distribution or at
# http://www.rtems.org/license/LICENSE.
#
import libxml2
from libxml2 import xmlNode
import matplotlib.pyplot as plt
doc = libxml2.parseFile("tmcontext01.scn")
ctx = doc.xpathNewContext()
colors = ['k', 'r', 'b', 'g', 'y', 'm']
def plot(y, color, label, first):
n=len(y)
x=range(0, n)
if first:
plt.plot(x, y, color=color, label=label)
else:
plt.plot(x, y, color=color)
plt.title("context switch timing test")
plt.xlabel('function nest level')
plt.ylabel('context switch time [ns]')
c = 0
for e in ["normal", "dirty"]:
first = True
for i in ["Min", "Q1", "Q2", "Q3", "Max"]:
y=map(xmlNode.getContent, ctx.xpathEval("/Test/ContextSwitchTest[@environment='" + e + "' and not(@load)]/Sample/" + i))
plot(y, colors[c], e, first)
first = False
c = c + 1
load = 1
while load > 0:
first = True
for i in ["Min", "Q1", "Q2", "Q3", "Max"]:
y=map(xmlNode.getContent, ctx.xpathEval("/Test/ContextSwitchTest[@environment='dirty' and @load='" + str(load) + "']/Sample/" + i))
if len(y) > 0:
plot(y, colors[c], "load " + str(load), first)
first = False
else:
load = 0
if load > 0:
load = load + 1
c = c + 1
plt.legend()
plt.show()
|
gpl-2.0
|
networks-lab/metaknowledge
|
metaknowledge/tests/test_recordcollection.py
|
2
|
26484
|
#Written by Reid McIlroy-Young for Dr. John McLevey, University of Waterloo 2015
import unittest
import metaknowledge
import metaknowledge.WOS
import os
import filecmp
import networkx as nx
disableJournChecking = True
class TestRecordCollection(unittest.TestCase):
@classmethod
def setUpClass(cls):
metaknowledge.VERBOSE_MODE = False
cls.RCmain = metaknowledge.RecordCollection("metaknowledge/tests/testFile.isi")
cls.RCbadmain = metaknowledge.RecordCollection("metaknowledge/tests/badFile.isi")
def setUp(self):
self.RC = self.RCmain.copy()
self.RCbad = self.RCbadmain.copy()
def test_isCollection(self):
self.assertIsInstance(self.RC, metaknowledge.RecordCollection)
self.assertEqual(str(metaknowledge.RecordCollection()), "RecordCollection(Empty)")
self.assertTrue(self.RC == self.RC)
def test_fullRead(self):
RC = metaknowledge.RecordCollection("metaknowledge/tests/")
self.assertEqual(len(RC), 1032)
def test_caching(self):
RC = metaknowledge.RecordCollection("metaknowledge/tests/", cached = True, name = 'testingCache', extension = 'testFile.isi')
self.assertTrue(os.path.isfile("metaknowledge/tests/tests.[testFile.isi].mkRecordDirCache"))
accessTime = os.stat("metaknowledge/tests/testFile.isi").st_atime
RC2 = metaknowledge.RecordCollection("metaknowledge/tests/", cached = True, name = 'testingCache', extension = 'testFile.isi')
self.assertEqual(accessTime, os.stat("metaknowledge/tests/testFile.isi").st_atime)
RC.dropBadEntries()
RC2.dropBadEntries()
self.assertEqual(RC, RC2)
os.remove("metaknowledge/tests/tests.[testFile.isi].mkRecordDirCache")
def test_bad(self):
self.assertTrue(metaknowledge.RecordCollection('metaknowledge/tests/badFile.isi').bad)
with self.assertRaises(metaknowledge.mkExceptions.RCTypeError):
metaknowledge.RecordCollection('metaknowledge/tests/testFile.isi', extension = '.txt')
self.assertEqual(self.RCbad | self.RC, self.RCbad | self.RC )
self.assertEqual(len(self.RCbad | self.RCbad), 32)
self.assertFalse(self.RCbad == self.RC)
self.assertEqual('/Users/Reid/Documents/Work/NetworksLab/metaknowledge/metaknowledge/tests/badFile.isi', self.RCbad.errors.keys().__iter__().__next__())
def test_badEntries(self):
badRecs = self.RC.badEntries()
self.assertTrue(badRecs <= self.RC)
self.assertTrue(badRecs.pop().bad)
self.RC.dropBadEntries()
def test_dropJourn(self):
RCcopy = self.RC.copy()
self.RC.dropNonJournals()
self.assertEqual(len(self.RC), len(RCcopy) - 2)
self.RC.dropNonJournals(invert = True)
self.assertEqual(len(self.RC), 0)
RCcopy.dropNonJournals(ptVal = 'B')
self.assertEqual(len(RCcopy), 1)
def test_repr(self):
self.assertEqual(repr(self.RC), "<metaknowledge.RecordCollection object testFile>")
def test_hash(self):
self.assertNotEqual(hash(self.RC), hash(self.RCbad))
R = self.RC.pop()
RC = metaknowledge.RecordCollection([R])
self.assertEqual(hash(RC), hash(hash(R)))
def test_contains(self):
R = self.RC.peek()
self.assertTrue(R in self.RC)
R = self.RC.pop()
self.assertFalse(R in self.RC)
def test_conID(self):
R = self.RC.peek()
self.assertTrue(self.RC.containsID(R.id))
self.assertFalse(self.RC.containsID('234567654'))
def test_discard(self):
R = self.RC.peek()
l = len(self.RC)
self.RC.discard(R)
l2 = len(self.RC)
self.assertEqual(l, l2 + 1)
self.RC.discard(R)
self.assertEqual(l2, len(self.RC))
def test_pop(self):
R = self.RC.pop()
self.assertFalse(R in self.RC)
self.RC.clear()
with self.assertRaises(KeyError):
R = self.RC.pop()
def test_peek(self):
R = self.RC.peek()
self.assertTrue(R in self.RC)
self.RC.clear()
R = self.RC.peek()
self.assertTrue(R is None)
def test_clear(self):
R = self.RCbad.peek()
self.assertTrue(self.RCbad.bad)
self.RCbad.clear()
self.assertFalse(self.RCbad.bad)
self.assertFalse(R in self.RCbad)
def test_remove(self):
R = self.RC.peek()
l = len(self.RC)
self.RC.remove(R)
self.assertEqual(l, len(self.RC) + 1)
with self.assertRaises(KeyError):
self.RC.remove(R)
def test_equOps(self):
l = len(self.RC)
for i in range(10):
self.RCbad.pop()
lb = len(self.RCbad)
RC = metaknowledge.RecordCollection([])
RC.bad = True
RC |= self.RC
self.assertEqual(self.RC, RC)
RC -= self.RC
self.assertNotEqual(self.RC, RC)
RC ^= self.RC
self.assertEqual(self.RC, RC)
RC &= self.RCbad
self.assertNotEqual(self.RC, RC)
def test_newOps(self):
l = len(self.RC)
for i in range(10):
self.RCbad.pop()
lb = len(self.RCbad)
RC = metaknowledge.RecordCollection([])
RC.bad = True
RC3 = self.RC | RC
self.assertEqual(self.RC, RC3)
RC4 = RC3 - self.RC
self.assertNotEqual(self.RC, RC4)
RC5 = RC4 ^ self.RC
self.assertEqual(self.RC, RC5)
RC6 = RC5 & self.RCbad
self.assertNotEqual(self.RC, RC6)
def test_opErrors(self):
with self.assertRaises(TypeError):
self.RC <= 1
with self.assertRaises(TypeError):
self.RC >= 1
self.assertTrue(self.RC != 1)
with self.assertRaises(TypeError):
self.RC >= 1
with self.assertRaises(TypeError):
self.RC |= 1
with self.assertRaises(TypeError):
self.RC ^= 1
with self.assertRaises(TypeError):
self.RC &= 1
with self.assertRaises(TypeError):
self.RC -= 1
with self.assertRaises(TypeError):
self.RC | 1
with self.assertRaises(TypeError):
self.RC ^ 1
with self.assertRaises(TypeError):
self.RC & 1
with self.assertRaises(TypeError):
self.RC - 1
def test_addRec(self):
l = len(self.RC)
R = self.RC.pop()
self.assertEqual(len(self.RC), l - 1)
self.RC.add(R)
self.assertEqual(len(self.RC), l)
RC2 = metaknowledge.RecordCollection("metaknowledge/tests/TwoPaper.isi")
self.RC |= RC2
self.assertEqual(len(self.RC), l + 2)
with self.assertRaises(metaknowledge.CollectionTypeError):
self.RC.add(1)
def test_bytes(self):
with self.assertRaises(metaknowledge.BadRecord):
self.assertIsInstance(bytes(self.RC), bytes)
self.RC.dropBadEntries()
self.assertIsInstance(bytes(self.RC), bytes)
def test_WOS(self):
self.RC.dropBadEntries()
R = self.RC.peek()
l = len(self.RC)
self.assertTrue(R, self.RC.getID(R.id))
self.assertEqual(len(self.RC), l)
self.RC.removeID(R.id)
self.assertEqual(len(self.RC), l - 1)
self.RC.getID(self.RC.peek().id)
self.assertEqual(len(self.RC), l - 1)
self.assertFalse(self.RC.getID(self.RC.pop().id))
self.RC.discardID('sdfghjkjhgfdfghj')
self.RC.discardID('WOS:A1979GV55600001')
with self.assertRaises(KeyError):
self.RC.removeID('ghjkljhgfdfghjmh')
def test_directoryRead(self):
self.assertEqual(len(metaknowledge.RecordCollection('.')), 0)
self.assertTrue(metaknowledge.RecordCollection('metaknowledge/tests/') >= self.RC)
self.assertTrue(metaknowledge.RecordCollection('metaknowledge/tests/', extension= '.txt') <= self.RC)
def test_contentType(self):
RC = metaknowledge.RecordCollection('metaknowledge/tests/')
self.assertEqual(RC._collectedTypes, {'MedlineRecord', 'WOSRecord', 'ProQuestRecord', 'ScopusRecord'})
self.assertEqual(self.RC._collectedTypes, {'WOSRecord'})
def test_write(self):
fileName = 'OnePaper2.isi'
RC = metaknowledge.RecordCollection('metaknowledge/tests/' + fileName)
RC.writeFile(fileName + '.tmp')
RC.writeFile()
self.assertTrue(filecmp.cmp('metaknowledge/tests/' + fileName, fileName + '.tmp'))
self.assertTrue(filecmp.cmp('metaknowledge/tests/' + fileName, RC.name + '.txt'))
os.remove(fileName + '.tmp')
os.remove(RC.name + '.txt')
def test_writeCSV(self):
filename = "test_writeCSV_temporaryFile.csv"
if os.path.isfile(filename):
os.remove(filename)
self.RC.writeCSV(filename, onlyTheseTags=['UT', 'PT', 'TI', 'AF','J9' ,'CR', 'pubMedID'], firstTags = ['CR', 'UT', 'J9', 'citations'], csvDelimiter = '∂', csvQuote='≠', listDelimiter= '«', longNames=True, numAuthors = False)
self.assertTrue(os.path.isfile(filename))
self.assertEqual(os.path.getsize(filename), 107396)
os.remove(filename)
self.RC.writeCSV(filename)
self.assertTrue(os.path.isfile(filename))
self.assertEqual(os.path.getsize(filename), 89272)
os.remove(filename)
self.RC.writeCSV(splitByTag = 'PY', onlyTheseTags = ['id', 'title', 'authorsFull', 'citations', 'keywords', 'DOI'])
yearsSt = set()
for R in self.RC:
yearsSt.add(str(R.get('PY', 2012)))
for year in yearsSt:
f = open("{}-testFile.csv".format(year))
self.assertEqual(f.readline(), '"id","TI","AF","CR","ID","DI","num-Authors","num-Male","num-Female","num-Unknown"\n')
self.assertGreater(len(f.readline()), 1)
f.close()
os.remove("{}-testFile.csv".format(year))
def test_writeBib(self):
filename = 'testFile.bib'
if os.path.isfile(filename):
os.remove(filename)
self.RC.dropBadEntries()
self.RC.writeBib(maxStringLength = 100)
self.assertEqual(os.path.getsize(filename), 100418)
os.remove(filename)
self.RC.writeBib(fname = filename, wosMode = True, reducedOutput = True, niceIDs = False)
self.assertEqual(os.path.getsize(filename), 78163)
os.remove(filename)
def test_rpys(self):
d = self.RC.rpys()
self.assertIn(17, d['count'])
d = self.RC.rpys(1990, 2000)
self.assertEqual(len(d['year']), 11)
for v in d.values():
for i in v:
self.assertIsInstance(i, int)
def test_CopyrightFinder(self):
l = self.RC.findProbableCopyright()
self.assertEqual(len(l), 7)
l = self.RC.findProbableCopyright()
self.assertTrue(' (C) 2002 Optical Society of America.' in l)
def test_NLP(self):
filename = 'NLP_test.csv'
full = self.RC.forNLP(filename, removeCopyright = True, extraColumns = ['ID'])
self.assertEqual(len(full), 7)
self.assertEqual(len(full['id']), 33)
self.assertEqual(full['keywords'][0], full['ID'][0])
self.assertTrue(' (C) 2002 Optical Society of America.' in full['copyright'])
self.assertEqual(os.path.getsize(filename), 14445)
os.remove(filename)
dropping = self.RC.forNLP(filename,removeNumbers = False, dropList = ['a', 'and', 'the', 'is'], stemmer = lambda x: x.title())
self.assertEqual(len(dropping), 5)
self.assertEqual(len(dropping['id']), 33)
self.assertEqual(os.path.getsize(filename), 12901)
os.remove(filename)
def test_forBurst(self):
filename = 'Burst_test.csv'
full = self.RC.forBurst('keywords', outputFile = filename)
self.assertEqual(len(full), 2)
self.assertEqual(len(full['year']), 75)
self.assertIn('guides', full['word'])
os.remove(filename)
def test_genderStats(self):
stats = self.RC.genderStats()
self.assertEqual(stats, {'Unknown': 65, 'Male': 6, 'Female': 1})
stats = self.RC.genderStats(asFractions = True)
self.assertEqual(stats['Male'], 0.08333333333333333)
def test_getCitations(self):
cites = self.RC.getCitations()
self.assertIn('LAUE MV, 1920, RELATIVITATSTHEORIE, V1, P227', cites['citeString'])
def test_makeDict(self):
d = self.RC.makeDict(onlyTheseTags = list(metaknowledge.WOS.tagsAndNameSet), longNames = True)
self.assertEqual(len(d), 65)
self.assertEqual(len(d['wosString']), len(self.RC))
if d['eISSN'][0] == '2155-3165':
self.assertEqual(d['eISSN'][1], None)
else:
self.assertEqual(d['eISSN'][0], None)
self.assertIsInstance(d['citations'], list)
d = self.RC.makeDict(longNames = False, raw = True, numAuthors = False)
self.assertEqual(len(d), 45)
self.assertEqual(len(d['UT']), len(self.RC))
self.assertIsInstance(d['CR'], list)
def test_coCite(self):
Gdefault = self.RC.networkCoCitation(fullInfo = True)
Gauths = self.RC.networkCoCitation(nodeType = "author", dropAnon = False, detailedCore = True)
GauthsNoExtra = self.RC.networkCoCitation(nodeType = "author", nodeInfo = False)
Gunwei = self.RC.networkCoCitation(nodeType = 'original', weighted = False)
if not disableJournChecking:
Gjour = self.RC.networkCoCitation(nodeType = "journal", dropNonJournals = True)
Gyear = self.RC.networkCoCitation(nodeType = "year", fullInfo = True, count = False)
Gcore = self.RC.networkCoCitation(detailedCore = ['AF','AU', 'DE', 'ID', 'PY'], coreOnly = True)
Gexplode = self.RC.networkCoCitation(expandedCore = True, keyWords = 'a')
Gcr = self.RC.networkCoCitation(addCR = True, coreOnly = True)
self.assertIsInstance(Gdefault, nx.classes.graph.Graph)
self.assertLessEqual(len(Gdefault.edges()), len(Gunwei.edges()))
self.assertLessEqual(len(Gdefault.nodes()), len(Gunwei.nodes()))
self.assertEqual(len(GauthsNoExtra.edges()), len(Gauths.edges()))
self.assertEqual(len(GauthsNoExtra.nodes()), len(Gauths.nodes()) - 1 )
self.assertTrue('weight' in list(Gdefault.edges(data = True))[0][2])
self.assertTrue('info' in list(Gdefault.nodes(data = True))[0][1])
self.assertTrue('fullCite' in list(Gdefault.nodes(data = True))[0][1])
self.assertFalse('weight' in list(Gunwei.edges(data = True))[0][2])
self.assertEqual(metaknowledge.graphStats(Gdefault, sentenceString = True), "The graph has 493 nodes, 13000 edges, 0 isolates, 22 self loops, a density of 0.107282 and a transitivity of 0.611431")
self.assertEqual(metaknowledge.graphStats(Gauths, sentenceString = True), "The graph has 321 nodes, 6699 edges, 1 isolates, 68 self loops, a density of 0.131094 and a transitivity of 0.598575")
self.assertEqual(metaknowledge.graphStats(Gyear, sentenceString = True), "The graph has 91 nodes, 1898 edges, 0 isolates, 55 self loops, a density of 0.47033 and a transitivity of 0.702332")
if not disableJournChecking:
self.assertEqual(len(Gjour.nodes()), 85)
self.assertEqual(len(Gjour.edges()), 1195)
self.assertTrue('info' in Gjour.nodes(data=True)[0][1])
self.assertTrue('info' in list(Gyear.nodes(data=True))[0][1])
self.assertTrue('fullCite' in list(Gyear.nodes(data = True))[0][1])
self.assertEqual(Gcore.node['Costadebeauregard O, 1975, CAN J PHYS']['info'], 'COSTADEBEAUREGARD O, COSTADEBEAUREGARD O')
self.assertEqual(metaknowledge.graphStats(Gexplode, sentenceString = True), "The graph has 73 nodes, 366 edges, 0 isolates, 5 self loops, a density of 0.140411 and a transitivity of 0.523179")
self.assertIn('AUDOIN C, 1976, J PHYS E SCI INSTRUM', Gcr.node['Huard S, 1979, CAN J PHYS']['citations'])
def test_coAuth(self):
Gdefault = self.RC.networkCoAuthor()
if not disableJournChecking:
Gdetailed = self.RC.networkCoAuthor(count = False, weighted = False, detailedInfo = True, dropNonJournals = True)
self.assertIsInstance(Gdefault, nx.classes.graph.Graph)
self.assertEqual(len(Gdefault.nodes()), 45)
self.assertEqual(len(Gdefault.edges()), 46)
if not disableJournChecking:
self.assertEqual(metaknowledge.graphStats(Gdetailed, sentenceString = True), 'The graph has 45 nodes, 46 edges, 9 isolates, 0 self loops, a density of 0.0464646 and a transitivity of 0.822581')
def test_cite(self):
Gdefault = self.RC.networkCitation(fullInfo = True, count = False, dropAnon = True)
Ganon = self.RC.networkCitation(dropAnon = False)
Gauths = self.RC.networkCitation(nodeType = "author", detailedCore = True, dropAnon = True)
GauthsNoExtra = self.RC.networkCitation(nodeType = "author", nodeInfo = False, dropAnon = True)
Gunwei = self.RC.networkCitation(nodeType = 'original', weighted = False)
if not disableJournChecking:
Gjour = self.RC.networkCitation(nodeType = "author", dropNonJournals = True, nodeInfo = True, count = False)
Gyear = self.RC.networkCitation(nodeType = "year", nodeInfo = True)
Gcore = self.RC.networkCitation(detailedCore = True, coreOnly = False)
Gexplode = self.RC.networkCitation(expandedCore = True, keyWords = ['b', 'c'])
self.assertIsInstance(Gdefault, nx.classes.digraph.DiGraph)
self.assertLessEqual(len(Gdefault.edges()), len(Gunwei.edges()))
self.assertLessEqual(len(Gdefault.nodes()), len(Gunwei.nodes()))
self.assertEqual(len(GauthsNoExtra.edges()), len(Gauths.edges()))
self.assertEqual(len(GauthsNoExtra.nodes()), len(Gauths.nodes()))
self.assertTrue('weight' in list(Gdefault.edges(data = True))[0][2])
self.assertTrue('info' in list(Gdefault.nodes(data = True))[0][1])
self.assertFalse('weight' in list(Gunwei.edges(data = True))[0][2])
self.assertEqual(metaknowledge.graphStats(Gdefault, sentenceString = True), "The graph has 510 nodes, 816 edges, 1 isolates, 0 self loops, a density of 0.00314342 and a transitivity of 0.00600437")
self.assertEqual(metaknowledge.graphStats(Ganon, sentenceString = True), "The graph has 511 nodes, 817 edges, 0 isolates, 0 self loops, a density of 0.00313495 and a transitivity of 0.00600437")
self.assertEqual(metaknowledge.graphStats(Gauths, sentenceString = True), "The graph has 324 nodes, 568 edges, 1 isolates, 15 self loops, a density of 0.00542751 and a transitivity of 0.0210315")
if not disableJournChecking:
self.assertEqual(len(Gjour.edges()), 432)
self.assertTrue('info' in list(Gjour.nodes(data=True))[0][1])
self.assertTrue('info' in list(Gyear.nodes(data=True))[0][1])
self.assertEqual(Gcore.node['Gilles H, 2002, OPT LETT']['info'], 'WOS:000177484300017, Gilles H, Simple technique for measuring the Goos-Hanchen effect with polarization modulation and a position-sensitive detector, OPTICS LETTERS, 27, 1421')
self.assertEqual(metaknowledge.graphStats(Gexplode, sentenceString = True), "The graph has 19 nodes, 29 edges, 0 isolates, 3 self loops, a density of 0.0847953 and a transitivity of 0.132075")
def test_networkBibCoupling(self):
G = self.RC.networkBibCoupling()
self.assertEqual(metaknowledge.graphStats(G, sentenceString = True), 'The graph has 32 nodes, 304 edges, 1 isolates, 0 self loops, a density of 0.612903 and a transitivity of 0.836511')
def test_coOccurnce(self):
self.assertEqual(sum(self.RC.cooccurrenceCounts('TI', *tuple(self.RC.tags()))['Longitudinal and transverse effects of nonspecular reflection'].values()), 104)
def test_nLevel(self):
G = self.RC.networkMultiLevel(*tuple(self.RC.tags()))
self.assertEqual(metaknowledge.graphStats(G, sentenceString = True), 'The graph has 1187 nodes, 58731 edges, 0 isolates, 59 self loops, a density of 0.0834803 and a transitivity of 0.493814')
def test_oneMode(self):
Gcr = self.RC.networkOneMode('CR')
Gcite = self.RC.networkOneMode('citations', nodeCount = False, edgeWeight = False)
GcoCit = self.RC.networkCoCitation()
Gtit = self.RC.networkOneMode('title')
stemFunc = lambda x: x[:-1]
Gstem = self.RC.networkOneMode('keywords', stemmer = stemFunc)
self.assertEqual(len(Gcite.edges()), len(Gcr.edges()))
self.assertEqual(len(Gcite.nodes()), len(Gcr.nodes()))
self.assertAlmostEqual(len(Gcite.nodes()), len(GcoCit.nodes()), delta = 50)
self.assertEqual(len(self.RC.networkOneMode('D2').nodes()), 0)
self.assertEqual(len(Gtit.nodes()), 31)
self.assertEqual(len(Gtit.edges()), 0)
self.assertEqual(len(self.RC.networkOneMode('email').edges()), 3)
self.assertEqual(len(self.RC.networkOneMode('UT').nodes()), len(self.RC) - 1)
self.assertEqual(metaknowledge.graphStats(Gstem, sentenceString = True), 'The graph has 41 nodes, 142 edges, 2 isolates, 0 self loops, a density of 0.173171 and a transitivity of 0.854015')
self.assertIsInstance(list(Gstem.nodes())[0], str)
with self.assertRaises(TypeError):
G = self.RC.networkOneMode(b'Not a Tag')
del G
def test_twoMode(self):
self.RC.dropBadEntries()
Gutti = self.RC.networkTwoMode('UT', 'title', directed = True, recordType = False)
Gafwc = self.RC.networkTwoMode('AF', 'WC', nodeCount = False, edgeWeight = False)
Gd2em = self.RC.networkTwoMode('D2', 'email')
Gemd2 = self.RC.networkTwoMode('email', 'D2')
Gstemm = self.RC.networkTwoMode('title', 'title', stemmerTag1 = lambda x: x[:-1], stemmerTag2 = lambda x: x + 's')
self.assertIsInstance(Gutti, nx.classes.digraph.DiGraph)
self.assertIsInstance(Gafwc, nx.classes.graph.Graph)
self.assertEqual(list(Gutti.edges('WOS:A1979GV55600001'))[0][1][:31], "EXPERIMENTS IN PHENOMENOLOGICAL")
self.assertEqual(len(Gutti.nodes()), 2 * len(self.RC) - 1)
with self.assertRaises(metaknowledge.TagError):
G = self.RC.networkTwoMode('TI', b'not a tag')
del G
with self.assertRaises(metaknowledge.TagError):
G = self.RC.networkTwoMode(b'Not a Tag', 'TI')
del G
self.assertTrue(nx.is_isomorphic(Gd2em, Gemd2))
self.assertEqual(metaknowledge.graphStats(Gstemm, sentenceString = True), 'The graph has 62 nodes, 31 edges, 0 isolates, 0 self loops, a density of 0.0163934 and a transitivity of 0')
self.assertTrue('Optical properties of nanostructured thin filmss' in Gstemm)
def test_nMode(self):
G = self.RC.networkMultiMode(metaknowledge.WOS.tagToFullDict.keys())
Gstem = self.RC.networkMultiMode(metaknowledge.WOS.tagToFullDict.keys(), stemmer = lambda x : x[0])
self.assertEqual(metaknowledge.graphStats(G, sentenceString = True), 'The graph has 1186 nodes, 38564 edges, 0 isolates, 56 self loops, a density of 0.0549192 and a transitivity of 0.295384')
self.assertEqual(metaknowledge.graphStats(Gstem, sentenceString = True), 'The graph has 50 nodes, 997 edges, 0 isolates, 35 self loops, a density of 0.828571 and a transitivity of 0.855834')
def test_localCiteStats(self):
d = self.RC.localCiteStats()
dPan = self.RC.localCiteStats(pandasFriendly = True)
dYear = self.RC.localCiteStats(keyType = 'year')
self.assertEqual(d[metaknowledge.Citation("Azzam R. M. A., 1977, ELLIPSOMETRY POLARIZ")], 1)
self.assertEqual(len(dPan['Citations']),len(d))
self.assertTrue(dPan['Citations'][0] in d)
self.assertEqual(dYear[2009], 2)
def test_localCitesOf(self):
C = metaknowledge.Citation("COSTADEB.O, 1974, LETT NUOVO CIMENTO, V10, P852")
self.assertEqual("WOS:A1976CW02200002", self.RC.localCitesOf(C).peek().id)
self.assertEqual(self.RC.localCitesOf(self.RC.peek().id),
self.RC.localCitesOf(self.RC.peek().createCitation()))
def test_citeFilter(self):
RCmin = self.RC.citeFilter('', reverse = True)
RCmax = self.RC.citeFilter('')
RCanon = self.RC.citeFilter('', 'anonymous')
RC1970 = self.RC.citeFilter(1970, 'year')
RCno1970 = self.RC.citeFilter(1970, 'year', reverse = True)
RCMELLER = self.RC.citeFilter('meller', 'author')
self.assertEqual(len(RCmin), 0)
self.assertEqual(len(RCmax), len(self.RC))
self.assertEqual(len(RCanon), 1)
self.assertEqual(len(RC1970), 15)
self.assertEqual(len(RC1970) + len(RCno1970), len(self.RC))
self.assertEqual(len(RCMELLER), 1)
RCnocite = metaknowledge.RecordCollection('metaknowledge/tests/OnePaperNoCites.isi')
self.assertEqual(len(RCnocite.citeFilter('')), 0)
def test_yearDiff(self):
Gdefault = self.RC.networkCitation()
Gfull = self.RC.networkCitation(nodeType="full")
Goriginal = self.RC.networkCitation(nodeType="original")
# Is yearDiff included as an attribute
self.assertTrue('yearDiff' in list(Gdefault.edges(data=True))[0][2])
self.assertTrue('yearDiff' in list(Gfull.edges(data=True))[0][2])
self.assertTrue('yearDiff' in list(Goriginal.edges(data=True))[0][2])
# Is yearDiff being calculated correctly?
self.assertEqual(Gdefault["Costadebo, 1974, CR ACAD SCI A MATH"]["Gordon Jp, 1973, PHYS REV A"]["yearDiff"], 1)
self.assertEqual(Gfull["Costadebo, 1974, CR ACAD SCI A MATH"]["Gordon Jp, 1973, PHYS REV A"]["yearDiff"], 1)
self.assertEqual(Goriginal["COWAN JJ, 1977, J OPT SOC AM, V67, P1307, DOI 10.1364/JOSA.67.001307"]["GOOS F, 1947, ANN PHYS-BERLIN, V1, P333"]['yearDiff'], 30)
def test_glimpse(self):
#These tests do depend on terminal size
gBasic = self.RC.glimpse()
gCompact = self.RC.glimpse(compact = True)
gEmpty = self.RC.glimpse('AF', 'qwertyhujk')
self.assertIn('RecordCollection glimpse made at:', gBasic)
self.assertIn('Top Authors\n', gBasic)
self.assertIn('1 Gilles, H\n', gBasic)
self.assertIn('|1 JOURNAL OF THE OPTICA', gCompact)
self.assertIn('|Columns are ranked by num. of occurrences and are independent of one another++', gCompact)
self.assertIn('qwertyhujk', gEmpty)
|
gpl-2.0
|
nesterione/scikit-learn
|
examples/neighbors/plot_approximate_nearest_neighbors_hyperparameters.py
|
227
|
5170
|
"""
=================================================
Hyper-parameters of Approximate Nearest Neighbors
=================================================
This example demonstrates the behaviour of the
accuracy of the nearest neighbor queries of Locality Sensitive Hashing
Forest as the number of candidates and the number of estimators (trees)
vary.
In the first plot, accuracy is measured with the number of candidates. Here,
the term "number of candidates" refers to maximum bound for the number of
distinct points retrieved from each tree to calculate the distances. Nearest
neighbors are selected from this pool of candidates. Number of estimators is
maintained at three fixed levels (1, 5, 10).
In the second plot, the number of candidates is fixed at 50. Number of trees
is varied and the accuracy is plotted against those values. To measure the
accuracy, the true nearest neighbors are required, therefore
:class:`sklearn.neighbors.NearestNeighbors` is used to compute the exact
neighbors.
"""
from __future__ import division
print(__doc__)
# Author: Maheshakya Wijewardena <maheshakya.10@cse.mrt.ac.lk>
#
# License: BSD 3 clause
###############################################################################
import numpy as np
from sklearn.datasets.samples_generator import make_blobs
from sklearn.neighbors import LSHForest
from sklearn.neighbors import NearestNeighbors
import matplotlib.pyplot as plt
# Initialize size of the database, iterations and required neighbors.
n_samples = 10000
n_features = 100
n_queries = 30
rng = np.random.RandomState(42)
# Generate sample data
X, _ = make_blobs(n_samples=n_samples + n_queries,
n_features=n_features, centers=10,
random_state=0)
X_index = X[:n_samples]
X_query = X[n_samples:]
# Get exact neighbors
nbrs = NearestNeighbors(n_neighbors=1, algorithm='brute',
metric='cosine').fit(X_index)
neighbors_exact = nbrs.kneighbors(X_query, return_distance=False)
# Set `n_candidate` values
n_candidates_values = np.linspace(10, 500, 5).astype(np.int)
n_estimators_for_candidate_value = [1, 5, 10]
n_iter = 10
stds_accuracies = np.zeros((len(n_estimators_for_candidate_value),
n_candidates_values.shape[0]),
dtype=float)
accuracies_c = np.zeros((len(n_estimators_for_candidate_value),
n_candidates_values.shape[0]), dtype=float)
# LSH Forest is a stochastic index: perform several iteration to estimate
# expected accuracy and standard deviation displayed as error bars in
# the plots
for j, value in enumerate(n_estimators_for_candidate_value):
for i, n_candidates in enumerate(n_candidates_values):
accuracy_c = []
for seed in range(n_iter):
lshf = LSHForest(n_estimators=value,
n_candidates=n_candidates, n_neighbors=1,
random_state=seed)
# Build the LSH Forest index
lshf.fit(X_index)
# Get neighbors
neighbors_approx = lshf.kneighbors(X_query,
return_distance=False)
accuracy_c.append(np.sum(np.equal(neighbors_approx,
neighbors_exact)) /
n_queries)
stds_accuracies[j, i] = np.std(accuracy_c)
accuracies_c[j, i] = np.mean(accuracy_c)
# Set `n_estimators` values
n_estimators_values = [1, 5, 10, 20, 30, 40, 50]
accuracies_trees = np.zeros(len(n_estimators_values), dtype=float)
# Calculate average accuracy for each value of `n_estimators`
for i, n_estimators in enumerate(n_estimators_values):
lshf = LSHForest(n_estimators=n_estimators, n_neighbors=1)
# Build the LSH Forest index
lshf.fit(X_index)
# Get neighbors
neighbors_approx = lshf.kneighbors(X_query, return_distance=False)
accuracies_trees[i] = np.sum(np.equal(neighbors_approx,
neighbors_exact))/n_queries
###############################################################################
# Plot the accuracy variation with `n_candidates`
plt.figure()
colors = ['c', 'm', 'y']
for i, n_estimators in enumerate(n_estimators_for_candidate_value):
label = 'n_estimators = %d ' % n_estimators
plt.plot(n_candidates_values, accuracies_c[i, :],
'o-', c=colors[i], label=label)
plt.errorbar(n_candidates_values, accuracies_c[i, :],
stds_accuracies[i, :], c=colors[i])
plt.legend(loc='upper left', fontsize='small')
plt.ylim([0, 1.2])
plt.xlim(min(n_candidates_values), max(n_candidates_values))
plt.ylabel("Accuracy")
plt.xlabel("n_candidates")
plt.grid(which='both')
plt.title("Accuracy variation with n_candidates")
# Plot the accuracy variation with `n_estimators`
plt.figure()
plt.scatter(n_estimators_values, accuracies_trees, c='k')
plt.plot(n_estimators_values, accuracies_trees, c='g')
plt.ylim([0, 1.2])
plt.xlim(min(n_estimators_values), max(n_estimators_values))
plt.ylabel("Accuracy")
plt.xlabel("n_estimators")
plt.grid(which='both')
plt.title("Accuracy variation with n_estimators")
plt.show()
|
bsd-3-clause
|
Ernestyj/PyStudy
|
finance/TradingAlgo.py
|
1
|
5732
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.finance as finance
import zipline as zp
import math
from datetime import datetime
from zipline import TradingAlgorithm
class MyTradingAlog(TradingAlgorithm):
def __init__(self):
pass
def initialize(self):
pass
def handle_data(self, data):
pass
def analyze(self, perf):
pass
df = pd.DataFrame({code: wsdClose['2015']})
perf = MyTradingAlog().run(df)
import zipline
from zipline import TradingAlgorithm
from zipline.api import sid, order, order_target, record, symbol, history, add_history
from zipline.api import *
from zipline.pipeline import Pipeline
code='000001.SH'
df = pd.DataFrame({code:wsdClose['2015']})
shortWin=20
longWin=40
def initialize(context):
context.day = -1
context.code = symbol(code)
context.maDF = MA(df[code], shortWin=shortWin, longWin=longWin)
context.maShort = context.maDF[str(shortWin)+'MA']
context.maLong = context.maDF[str(longWin)+'MA']
context.invested = False
set_slippage(slippage.VolumeShareSlippage(volume_limit=1.0, price_impact=0.0))
set_commission(commission.PerDollar(cost=0.003))
pass
def handle_data(context, data):
#print context.portfolio.cash
context.day += 1
i = context.day
s = context.maShort[i]
l = context.maLong[i]
pres = s
prel = l
if i!=0:
pres = context.maShort[i-1]
prel = context.maLong[i-1]
if i>=longWin-1:
if s>l and pres<=prel and not context.invested:
order_percent(symbol(code), 1.0)
context.invested = True
elif s<l and context.invested:
order_percent(symbol(code), -1.0)
context.invested = False
record(maShort=s, maLong=l)
pass
def analyze(context, perf):
perf_trans = perf.ix[[t!=[] for t in perf.transactions]]
buys = perf_trans.ix[[t[0]['amount'] > 0 for t in perf_trans.transactions]]
sells = perf_trans.ix[[t[0]['amount'] < 0 for t in perf_trans.transactions]]
fig = plt.figure(figsize=(20,15))
ax1 = fig.add_subplot(311)
#data['AAPL'].plot(ax=ax1, color='r', lw=2.)
perf[['maShort', 'maLong']].plot(ax=ax1, lw=2.)
ax1.plot(buys.index, perf.maShort.ix[buys.index], '^', markersize=10, color='m')
ax1.plot(sells.index, perf.maLong.ix[sells.index], 'v', markersize=10, color='k')
ax2 = fig.add_subplot(312)
portfolio_ratio = perf.portfolio_value/100000.0
portfolio_ratio.plot(ax=ax2, lw=2.)
ax2.plot(buys.index, portfolio_ratio.ix[buys.index], '^', markersize=10, color='m')
ax2.plot(sells.index, portfolio_ratio.ix[sells.index], 'v', markersize=10, color='k')
# ax3 = fig.add_subplot(313)
# perf.portfolio_value.plot(ax=ax3, lw=2.)
# ax3.plot(buys.index, perf.portfolio_value.ix[buys.index], '^', markersize=10, color='m')
# ax3.plot(sells.index, perf.portfolio_value.ix[sells.index], 'v', markersize=10, color='k')
pass
algo = TradingAlgorithm(initialize=initialize, handle_data=handle_data)
algo._analyze = analyze
perf = algo.run(df)
perf_trans = perf.ix[[t!=[] for t in perf.transactions]]
buys = perf_trans.ix[[t[0]['amount'] > 0 for t in perf_trans.transactions]]
sells = perf_trans.ix[[t[0]['amount'] < 0 for t in perf_trans.transactions]]
investDays = validInvestDays(buys, sells, perf)
print investDays
cashes = perf.portfolio_value.ix[sells.index]
returnRatArr = returnRatioArr(cashes.values)
final_return_ratio = returnRatio(perf.portfolio_value[-1])
print '总收益率:', final_return_ratio
print '年化收益率:', annualizedReturnRatio([final_return_ratio], T=investDays, D=250.0)
from zipline.api import order_target, record, symbol, history, add_history
import numpy as np
def initialize(context):
# Register 2 histories that track daily prices,
# one with a 100 window and one with a 300 day window
add_history(100, '1d', 'price')
add_history(300, '1d', 'price')
context.i = 0
def handle_data(context, data):
# Skip first 300 days to get full windows
context.i += 1
if context.i < 300:
return
# Compute averages
# history() has to be called with the same params
# from above and returns a pandas dataframe.
short_mavg = history(100, '1d', 'price').mean()
long_mavg = history(300, '1d', 'price').mean()
# price_history = data.history(assets=symbol('TEST'), fields="price", bar_count=5, frequency="1d")
# Trading logic
if short_mavg[0] > long_mavg[0]:
# order_target orders as many shares as needed to
# achieve the desired number of shares.
order_target(symbol('AAPL'), 100)
elif short_mavg[0] < long_mavg[0]:
order_target(symbol('AAPL'), 0)
# Save values for later inspection
record(AAPL=data[symbol('AAPL')].price,
short_mavg=short_mavg[0],
long_mavg=long_mavg[0])
def analyze(context, perf):
fig = plt.figure()
ax1 = fig.add_subplot(211)
perf.portfolio_value.plot(ax=ax1)
ax1.set_ylabel('portfolio value in $')
ax2 = fig.add_subplot(212)
perf['AAPL'].plot(ax=ax2)
perf[['short_mavg', 'long_mavg']].plot(ax=ax2)
perf_trans = perf.ix[[t != [] for t in perf.transactions]]
buys = perf_trans.ix[[t[0]['amount'] > 0 for t in perf_trans.transactions]]
sells = perf_trans.ix[
[t[0]['amount'] < 0 for t in perf_trans.transactions]]
ax2.plot(buys.index, perf.short_mavg.ix[buys.index],
'^', markersize=10, color='m')
ax2.plot(sells.index, perf.short_mavg.ix[sells.index],
'v', markersize=10, color='k')
ax2.set_ylabel('price in $')
plt.legend(loc=0)
plt.show()
|
apache-2.0
|
CompPhysics/ComputationalPhysics2
|
doc/BookChapters/figures/plotEnergies.py
|
10
|
1487
|
import sys
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
try:
dataFileName = sys.argv[1]
except IndexError:
print("USAGE: python plotEnergies.py 'filename'")
sys.exit(0)
HFEnergy3 = 3.161921401722216
HFEnergy6 = 20.71924844033019
numParticles = \
int(dataFileName[dataFileName.find('N')+1:dataFileName.find('E')-1])
hfenergyFound = False
if (numParticles == 2):
HFEnergy = 3.161921401722216
hfenergyFound = True
elif (numParticles == 6):
HFEnergy = 20.71924844033019
hfenergyFound = True
else:
hfenergyFound = False
data = np.loadtxt(dataFileName, dtype=np.float64)
data[:,1] = np.sqrt(data[:,1])
n = len(data[:,0])
x = np.arange(0,n)
fig = plt.figure()
if (hfenergyFound):
yline = np.zeros(n)
yline.fill(HFEnergy)
plt.plot(x, yline, 'r--', label="HF Energy")
msize = 1.0
ax = fig.add_subplot(111)
plt.errorbar(x, data[:,0], yerr=data[:,1], fmt='bo', markersize=msize, label="VMC Energy")
plt.fill_between(x, data[:,0]-data[:,1], data[:,0]+data[:,1])
plt.xlim(0,n)
plt.xlabel('Iteration')
plt.ylabel('$E_0[a.u]$')
plt.legend(loc='best')
minSub = 80
maxSub = 120
inset_axes(ax, width="50%", height=1.0, loc='right')
plt.errorbar(x[minSub:maxSub], data[minSub:maxSub,0],
yerr=data[minSub:maxSub,1], fmt='bo', markersize=msize, label="VMC "
"Energy")
plt.plot(x[minSub:maxSub], yline[minSub:maxSub], 'r--', label="HF Energy")
plt.show()
|
cc0-1.0
|
michalkurka/h2o-3
|
h2o-py/tests/testdir_algos/glm/pyunit_pubdev_6999_glm_interaction_NA.py
|
2
|
7181
|
from __future__ import division
from __future__ import print_function
from past.utils import old_div
import sys
sys.path.insert(1,"../../../")
import h2o
from tests import pyunit_utils
from h2o.estimators.glm import H2OGeneralizedLinearEstimator
import pandas as pd
import numpy as np
# test missing_value handling for interactions_GLM_Binomial. This test is derived from Brian Scannell's code. Thank you.
# I have tested all three kinds of interactions_GLM_Binomial with validation frame just to make sure my fix works properly:
# a. enum by enum
# b. enum by num
# c. num by num.
#
# In addition, I thrown in interaction pairs as well just to make sure.
#
# The way to test the answer is to test with NA first and get GLM coefficients.
# Next, perform another test with NA values replaced either by mode or mean. This in theory should obtain
# the same model as before since the NA should be replaced by mode or mean. We compare the results of the two
# models to make sure the coefficients are the same.
def interactions_GLM_Binomial():
# test multiple interactions_GLM_Binomial enum by enum, enum by num and num by num all with NA terms
print("******* Test interaction pairs")
pd_df_NA = pd.DataFrame(np.array([[1,0,1,0,1,0], [1,2,4.2/2.2,4,3,1], [2,3,float('NaN'),1,2,3],
["a","a","a","b","a","b"], ['Foo','UNKNOWN','Foo','Foo','Foo','Bar']]).T,
columns=['label','numerical_feat','numerical_feat2','categorical_feat',
'categorical_feat2'])
h2o_df_NA = h2o.H2OFrame(pd_df_NA, na_strings=["UNKNOWN"])
pd_df = pd.DataFrame(np.array([[1,0,1,0,1,0], [1,2,4.2/2.2,4,3,1], [2,3,2.2,1,2,3],
["a","a","a","b","a","b"], ['Foo','Foo','Foo','Foo','Foo','Bar']]).T,
columns=['label','numerical_feat','numerical_feat2','categorical_feat',
'categorical_feat2'])
h2o_df = h2o.H2OFrame(pd_df, na_strings=["UNKNOWN"])
interaction_pairs = [("numerical_feat", "numerical_feat2"),("numerical_feat", "categorical_feat2"),
("categorical_feat", "categorical_feat2")]
xcols = ['numerical_feat','numerical_feat2','categorical_feat','categorical_feat2']
# build model with and without NA in Frame
modelNA = H2OGeneralizedLinearEstimator(family = "Binomial", alpha=0, lambda_search=False,
interaction_pairs=interaction_pairs, standardize=False)
modelNA.train(x=xcols, y='label', training_frame=h2o_df_NA)
# build model with and without NA in Frame
model = H2OGeneralizedLinearEstimator(family = "Binomial", alpha=0, lambda_search=False,
interaction_pairs=interaction_pairs, standardize=False)
model.train(x=xcols, y='label', training_frame=h2o_df)
assert_arrays_equal_NA(modelNA._model_json['output']['coefficients_table'].cell_values,
model._model_json['output']['coefficients_table'].cell_values)
# test interaction of num and num columns
print("******* Test interaction with num by num")
pd_df_num_num_NA = pd.DataFrame(np.array([[1,0,1,0], [1,2,2,4], [2, 3, float('NaN'), 1]]).T,
columns=['label', 'numerical_feat', 'numerical_feat2'])
pd_df_num_num = pd.DataFrame(np.array([[1,0,1,0], [1,2,2,4], [2, 3, 2, 1]]).T,
columns=['label', 'numerical_feat', 'numerical_feat2'])
performOneTest(pd_df_num_num_NA, pd_df_num_num, interactionColumn= ['numerical_feat', 'numerical_feat2'],
xcols=['numerical_feat', 'numerical_feat2'], standard=False)
# test interaction of enum and enum columns
print("******* Test interaction with enum by enum")
pd_df_cat_cat_NA = pd.DataFrame(np.array([[1,0,1,0], ["a", "a", "b", "b"], ['Foo', 'UNKNOWN', 'Foo', 'Bar']]).T,
columns=['label', 'categorical_feat', 'categorical_feat2'])
pd_df_cat_cat = pd.DataFrame(np.array([[1,0,1,0], ["a", "a", "b", "b"], ['Foo', 'Foo', 'Foo', 'Bar']]).T,
columns=['label', 'categorical_feat', 'categorical_feat2'])
performOneTest(pd_df_cat_cat_NA, pd_df_cat_cat, interactionColumn= ['categorical_feat', 'categorical_feat2'],
xcols=['categorical_feat', 'categorical_feat2'])
# test interaction of enum and num columns
print("******* Test interaction with enum by num")
pd_df_cat_num_NA = pd.DataFrame(np.array([[1,0,1,0], [1,2,3,4], ['Foo', 'UNKNOWN', 'Foo', 'Bar']]).T,
columns=['label', 'numerical_feat', 'categorical_feat'])
pd_df_cat_num = pd.DataFrame(np.array([[1,0,1,0], [1,2,3,4], ['Foo', 'Foo', 'Foo', 'Bar']]).T,
columns=['label', 'numerical_feat', 'categorical_feat'])
performOneTest(pd_df_cat_num_NA, pd_df_cat_num, interactionColumn= ['numerical_feat', 'categorical_feat'],
xcols=['numerical_feat', 'categorical_feat'])
def performOneTest(frameWithNA, frameWithoutNA, interactionColumn, xcols, standard=True):
# default missing value handling = meanImputation
h2o_df_NA = h2o.H2OFrame(frameWithNA, na_strings=["UNKNOWN"])
h2o_df_NA_Valid = h2o.H2OFrame(frameWithNA, na_strings=["UNKNOWN"])
h2o_df = h2o.H2OFrame(frameWithoutNA, na_strings=["UNKNOWN"])
h2o_df_valid = h2o.H2OFrame(frameWithoutNA, na_strings=["UNKNOWN"])
# build model with and without NA in Frame
modelNA = H2OGeneralizedLinearEstimator(family = "Binomial", alpha=0, lambda_search=False,
interactions=interactionColumn, standardize=standard)
modelNA.train(x=xcols, y='label', training_frame=h2o_df_NA, validation_frame=h2o_df_NA_Valid)
model = H2OGeneralizedLinearEstimator(family = "Binomial", alpha=0, lambda_search=False,
interactions=interactionColumn, standardize=standard)
model.train(x=xcols, y='label', training_frame=h2o_df, validation_frame=h2o_df_valid)
# extract GLM coefficients
coef_m_NA = modelNA._model_json['output']['coefficients_table']
coef_m = model._model_json['output']['coefficients_table']
if not (len(coef_m_NA.cell_values)==len(coef_m.cell_values)): # deal with 0 coeff for NA
assert_arrays_equal_NA(coef_m_NA.cell_values, coef_m.cell_values)
else:
pyunit_utils.equal_2d_tables(coef_m_NA.cell_values, coef_m.cell_values)
def assert_arrays_equal_NA(coef_m_NA_dict, coef_m):
coefNAIndex = 0
for index in range(len(coef_m)):
if not (coef_m_NA_dict[coefNAIndex][0]==coef_m[index][0]): # skip over coefficients with NA that is 0.0
coefNAIndex = coefNAIndex+1
assert abs(coef_m_NA_dict[coefNAIndex][1]-coef_m[index][1])<1e-6, \
"Expected: {0}, Actual: {1}".format(coef_m_NA_dict[coefNAIndex][1], coef_m[index][1])
coefNAIndex=coefNAIndex+1
if __name__ == "__main__":
pyunit_utils.standalone_test(interactions_GLM_Binomial)
else:
interactions_GLM_Binomial()
|
apache-2.0
|
kdwink/intellij-community
|
python/helpers/pydev/pydev_ipython/inputhook.py
|
52
|
18411
|
# coding: utf-8
"""
Inputhook management for GUI event loop integration.
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import sys
import select
#-----------------------------------------------------------------------------
# Constants
#-----------------------------------------------------------------------------
# Constants for identifying the GUI toolkits.
GUI_WX = 'wx'
GUI_QT = 'qt'
GUI_QT4 = 'qt4'
GUI_GTK = 'gtk'
GUI_TK = 'tk'
GUI_OSX = 'osx'
GUI_GLUT = 'glut'
GUI_PYGLET = 'pyglet'
GUI_GTK3 = 'gtk3'
GUI_NONE = 'none' # i.e. disable
#-----------------------------------------------------------------------------
# Utilities
#-----------------------------------------------------------------------------
def ignore_CTRL_C():
"""Ignore CTRL+C (not implemented)."""
pass
def allow_CTRL_C():
"""Take CTRL+C into account (not implemented)."""
pass
#-----------------------------------------------------------------------------
# Main InputHookManager class
#-----------------------------------------------------------------------------
class InputHookManager(object):
"""Manage PyOS_InputHook for different GUI toolkits.
This class installs various hooks under ``PyOSInputHook`` to handle
GUI event loop integration.
"""
def __init__(self):
self._return_control_callback = None
self._apps = {}
self._reset()
self.pyplot_imported = False
def _reset(self):
self._callback_pyfunctype = None
self._callback = None
self._current_gui = None
def set_return_control_callback(self, return_control_callback):
self._return_control_callback = return_control_callback
def get_return_control_callback(self):
return self._return_control_callback
def return_control(self):
return self._return_control_callback()
def get_inputhook(self):
return self._callback
def set_inputhook(self, callback):
"""Set inputhook to callback."""
# We don't (in the context of PyDev console) actually set PyOS_InputHook, but rather
# while waiting for input on xmlrpc we run this code
self._callback = callback
def clear_inputhook(self, app=None):
"""Clear input hook.
Parameters
----------
app : optional, ignored
This parameter is allowed only so that clear_inputhook() can be
called with a similar interface as all the ``enable_*`` methods. But
the actual value of the parameter is ignored. This uniform interface
makes it easier to have user-level entry points in the main IPython
app like :meth:`enable_gui`."""
self._reset()
def clear_app_refs(self, gui=None):
"""Clear IPython's internal reference to an application instance.
Whenever we create an app for a user on qt4 or wx, we hold a
reference to the app. This is needed because in some cases bad things
can happen if a user doesn't hold a reference themselves. This
method is provided to clear the references we are holding.
Parameters
----------
gui : None or str
If None, clear all app references. If ('wx', 'qt4') clear
the app for that toolkit. References are not held for gtk or tk
as those toolkits don't have the notion of an app.
"""
if gui is None:
self._apps = {}
elif gui in self._apps:
del self._apps[gui]
def enable_wx(self, app=None):
"""Enable event loop integration with wxPython.
Parameters
----------
app : WX Application, optional.
Running application to use. If not given, we probe WX for an
existing application object, and create a new one if none is found.
Notes
-----
This methods sets the ``PyOS_InputHook`` for wxPython, which allows
the wxPython to integrate with terminal based applications like
IPython.
If ``app`` is not given we probe for an existing one, and return it if
found. If no existing app is found, we create an :class:`wx.App` as
follows::
import wx
app = wx.App(redirect=False, clearSigInt=False)
"""
import wx
from distutils.version import LooseVersion as V
wx_version = V(wx.__version__).version
if wx_version < [2, 8]:
raise ValueError("requires wxPython >= 2.8, but you have %s" % wx.__version__)
from pydev_ipython.inputhookwx import inputhook_wx
self.set_inputhook(inputhook_wx)
self._current_gui = GUI_WX
if app is None:
app = wx.GetApp()
if app is None:
app = wx.App(redirect=False, clearSigInt=False)
app._in_event_loop = True
self._apps[GUI_WX] = app
return app
def disable_wx(self):
"""Disable event loop integration with wxPython.
This merely sets PyOS_InputHook to NULL.
"""
if GUI_WX in self._apps:
self._apps[GUI_WX]._in_event_loop = False
self.clear_inputhook()
def enable_qt4(self, app=None):
"""Enable event loop integration with PyQt4.
Parameters
----------
app : Qt Application, optional.
Running application to use. If not given, we probe Qt for an
existing application object, and create a new one if none is found.
Notes
-----
This methods sets the PyOS_InputHook for PyQt4, which allows
the PyQt4 to integrate with terminal based applications like
IPython.
If ``app`` is not given we probe for an existing one, and return it if
found. If no existing app is found, we create an :class:`QApplication`
as follows::
from PyQt4 import QtCore
app = QtGui.QApplication(sys.argv)
"""
from pydev_ipython.inputhookqt4 import create_inputhook_qt4
app, inputhook_qt4 = create_inputhook_qt4(self, app)
self.set_inputhook(inputhook_qt4)
self._current_gui = GUI_QT4
app._in_event_loop = True
self._apps[GUI_QT4] = app
return app
def disable_qt4(self):
"""Disable event loop integration with PyQt4.
This merely sets PyOS_InputHook to NULL.
"""
if GUI_QT4 in self._apps:
self._apps[GUI_QT4]._in_event_loop = False
self.clear_inputhook()
def enable_gtk(self, app=None):
"""Enable event loop integration with PyGTK.
Parameters
----------
app : ignored
Ignored, it's only a placeholder to keep the call signature of all
gui activation methods consistent, which simplifies the logic of
supporting magics.
Notes
-----
This methods sets the PyOS_InputHook for PyGTK, which allows
the PyGTK to integrate with terminal based applications like
IPython.
"""
from pydev_ipython.inputhookgtk import create_inputhook_gtk
self.set_inputhook(create_inputhook_gtk(self._stdin_file))
self._current_gui = GUI_GTK
def disable_gtk(self):
"""Disable event loop integration with PyGTK.
This merely sets PyOS_InputHook to NULL.
"""
self.clear_inputhook()
def enable_tk(self, app=None):
"""Enable event loop integration with Tk.
Parameters
----------
app : toplevel :class:`Tkinter.Tk` widget, optional.
Running toplevel widget to use. If not given, we probe Tk for an
existing one, and create a new one if none is found.
Notes
-----
If you have already created a :class:`Tkinter.Tk` object, the only
thing done by this method is to register with the
:class:`InputHookManager`, since creating that object automatically
sets ``PyOS_InputHook``.
"""
self._current_gui = GUI_TK
if app is None:
try:
import Tkinter as _TK
except:
# Python 3
import tkinter as _TK
app = _TK.Tk()
app.withdraw()
self._apps[GUI_TK] = app
from pydev_ipython.inputhooktk import create_inputhook_tk
self.set_inputhook(create_inputhook_tk(app))
return app
def disable_tk(self):
"""Disable event loop integration with Tkinter.
This merely sets PyOS_InputHook to NULL.
"""
self.clear_inputhook()
def enable_glut(self, app=None):
""" Enable event loop integration with GLUT.
Parameters
----------
app : ignored
Ignored, it's only a placeholder to keep the call signature of all
gui activation methods consistent, which simplifies the logic of
supporting magics.
Notes
-----
This methods sets the PyOS_InputHook for GLUT, which allows the GLUT to
integrate with terminal based applications like IPython. Due to GLUT
limitations, it is currently not possible to start the event loop
without first creating a window. You should thus not create another
window but use instead the created one. See 'gui-glut.py' in the
docs/examples/lib directory.
The default screen mode is set to:
glut.GLUT_DOUBLE | glut.GLUT_RGBA | glut.GLUT_DEPTH
"""
import OpenGL.GLUT as glut
from pydev_ipython.inputhookglut import glut_display_mode, \
glut_close, glut_display, \
glut_idle, inputhook_glut
if GUI_GLUT not in self._apps:
glut.glutInit(sys.argv)
glut.glutInitDisplayMode(glut_display_mode)
# This is specific to freeglut
if bool(glut.glutSetOption):
glut.glutSetOption(glut.GLUT_ACTION_ON_WINDOW_CLOSE,
glut.GLUT_ACTION_GLUTMAINLOOP_RETURNS)
glut.glutCreateWindow(sys.argv[0])
glut.glutReshapeWindow(1, 1)
glut.glutHideWindow()
glut.glutWMCloseFunc(glut_close)
glut.glutDisplayFunc(glut_display)
glut.glutIdleFunc(glut_idle)
else:
glut.glutWMCloseFunc(glut_close)
glut.glutDisplayFunc(glut_display)
glut.glutIdleFunc(glut_idle)
self.set_inputhook(inputhook_glut)
self._current_gui = GUI_GLUT
self._apps[GUI_GLUT] = True
def disable_glut(self):
"""Disable event loop integration with glut.
This sets PyOS_InputHook to NULL and set the display function to a
dummy one and set the timer to a dummy timer that will be triggered
very far in the future.
"""
import OpenGL.GLUT as glut
from glut_support import glutMainLoopEvent # @UnresolvedImport
glut.glutHideWindow() # This is an event to be processed below
glutMainLoopEvent()
self.clear_inputhook()
def enable_pyglet(self, app=None):
"""Enable event loop integration with pyglet.
Parameters
----------
app : ignored
Ignored, it's only a placeholder to keep the call signature of all
gui activation methods consistent, which simplifies the logic of
supporting magics.
Notes
-----
This methods sets the ``PyOS_InputHook`` for pyglet, which allows
pyglet to integrate with terminal based applications like
IPython.
"""
from pydev_ipython.inputhookpyglet import inputhook_pyglet
self.set_inputhook(inputhook_pyglet)
self._current_gui = GUI_PYGLET
return app
def disable_pyglet(self):
"""Disable event loop integration with pyglet.
This merely sets PyOS_InputHook to NULL.
"""
self.clear_inputhook()
def enable_gtk3(self, app=None):
"""Enable event loop integration with Gtk3 (gir bindings).
Parameters
----------
app : ignored
Ignored, it's only a placeholder to keep the call signature of all
gui activation methods consistent, which simplifies the logic of
supporting magics.
Notes
-----
This methods sets the PyOS_InputHook for Gtk3, which allows
the Gtk3 to integrate with terminal based applications like
IPython.
"""
from pydev_ipython.inputhookgtk3 import create_inputhook_gtk3
self.set_inputhook(create_inputhook_gtk3(self._stdin_file))
self._current_gui = GUI_GTK
def disable_gtk3(self):
"""Disable event loop integration with PyGTK.
This merely sets PyOS_InputHook to NULL.
"""
self.clear_inputhook()
def enable_mac(self, app=None):
""" Enable event loop integration with MacOSX.
We call function pyplot.pause, which updates and displays active
figure during pause. It's not MacOSX-specific, but it enables to
avoid inputhooks in native MacOSX backend.
Also we shouldn't import pyplot, until user does it. Cause it's
possible to choose backend before importing pyplot for the first
time only.
"""
def inputhook_mac(app=None):
if self.pyplot_imported:
pyplot = sys.modules['matplotlib.pyplot']
try:
pyplot.pause(0.01)
except:
pass
else:
if 'matplotlib.pyplot' in sys.modules:
self.pyplot_imported = True
self.set_inputhook(inputhook_mac)
self._current_gui = GUI_OSX
def disable_mac(self):
self.clear_inputhook()
def current_gui(self):
"""Return a string indicating the currently active GUI or None."""
return self._current_gui
inputhook_manager = InputHookManager()
enable_wx = inputhook_manager.enable_wx
disable_wx = inputhook_manager.disable_wx
enable_qt4 = inputhook_manager.enable_qt4
disable_qt4 = inputhook_manager.disable_qt4
enable_gtk = inputhook_manager.enable_gtk
disable_gtk = inputhook_manager.disable_gtk
enable_tk = inputhook_manager.enable_tk
disable_tk = inputhook_manager.disable_tk
enable_glut = inputhook_manager.enable_glut
disable_glut = inputhook_manager.disable_glut
enable_pyglet = inputhook_manager.enable_pyglet
disable_pyglet = inputhook_manager.disable_pyglet
enable_gtk3 = inputhook_manager.enable_gtk3
disable_gtk3 = inputhook_manager.disable_gtk3
enable_mac = inputhook_manager.enable_mac
disable_mac = inputhook_manager.disable_mac
clear_inputhook = inputhook_manager.clear_inputhook
set_inputhook = inputhook_manager.set_inputhook
current_gui = inputhook_manager.current_gui
clear_app_refs = inputhook_manager.clear_app_refs
# We maintain this as stdin_ready so that the individual inputhooks
# can diverge as little as possible from their IPython sources
stdin_ready = inputhook_manager.return_control
set_return_control_callback = inputhook_manager.set_return_control_callback
get_return_control_callback = inputhook_manager.get_return_control_callback
get_inputhook = inputhook_manager.get_inputhook
# Convenience function to switch amongst them
def enable_gui(gui=None, app=None):
"""Switch amongst GUI input hooks by name.
This is just a utility wrapper around the methods of the InputHookManager
object.
Parameters
----------
gui : optional, string or None
If None (or 'none'), clears input hook, otherwise it must be one
of the recognized GUI names (see ``GUI_*`` constants in module).
app : optional, existing application object.
For toolkits that have the concept of a global app, you can supply an
existing one. If not given, the toolkit will be probed for one, and if
none is found, a new one will be created. Note that GTK does not have
this concept, and passing an app if ``gui=="GTK"`` will raise an error.
Returns
-------
The output of the underlying gui switch routine, typically the actual
PyOS_InputHook wrapper object or the GUI toolkit app created, if there was
one.
"""
if get_return_control_callback() is None:
raise ValueError("A return_control_callback must be supplied as a reference before a gui can be enabled")
guis = {GUI_NONE: clear_inputhook,
GUI_OSX: enable_mac,
GUI_TK: enable_tk,
GUI_GTK: enable_gtk,
GUI_WX: enable_wx,
GUI_QT: enable_qt4, # qt3 not supported
GUI_QT4: enable_qt4,
GUI_GLUT: enable_glut,
GUI_PYGLET: enable_pyglet,
GUI_GTK3: enable_gtk3,
}
try:
gui_hook = guis[gui]
except KeyError:
if gui is None or gui == '':
gui_hook = clear_inputhook
else:
e = "Invalid GUI request %r, valid ones are:%s" % (gui, guis.keys())
raise ValueError(e)
return gui_hook(app)
__all__ = [
"GUI_WX",
"GUI_QT",
"GUI_QT4",
"GUI_GTK",
"GUI_TK",
"GUI_OSX",
"GUI_GLUT",
"GUI_PYGLET",
"GUI_GTK3",
"GUI_NONE",
"ignore_CTRL_C",
"allow_CTRL_C",
"InputHookManager",
"inputhook_manager",
"enable_wx",
"disable_wx",
"enable_qt4",
"disable_qt4",
"enable_gtk",
"disable_gtk",
"enable_tk",
"disable_tk",
"enable_glut",
"disable_glut",
"enable_pyglet",
"disable_pyglet",
"enable_gtk3",
"disable_gtk3",
"enable_mac",
"disable_mac",
"clear_inputhook",
"set_inputhook",
"current_gui",
"clear_app_refs",
"stdin_ready",
"set_return_control_callback",
"get_return_control_callback",
"get_inputhook",
"enable_gui"]
|
apache-2.0
|
pianomania/scikit-learn
|
examples/classification/plot_lda.py
|
142
|
2419
|
"""
====================================================================
Normal and Shrinkage Linear Discriminant Analysis for classification
====================================================================
Shows how shrinkage improves classification.
"""
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_blobs
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
n_train = 20 # samples for training
n_test = 200 # samples for testing
n_averages = 50 # how often to repeat classification
n_features_max = 75 # maximum number of features
step = 4 # step size for the calculation
def generate_data(n_samples, n_features):
"""Generate random blob-ish data with noisy features.
This returns an array of input data with shape `(n_samples, n_features)`
and an array of `n_samples` target labels.
Only one feature contains discriminative information, the other features
contain only noise.
"""
X, y = make_blobs(n_samples=n_samples, n_features=1, centers=[[-2], [2]])
# add non-discriminative features
if n_features > 1:
X = np.hstack([X, np.random.randn(n_samples, n_features - 1)])
return X, y
acc_clf1, acc_clf2 = [], []
n_features_range = range(1, n_features_max + 1, step)
for n_features in n_features_range:
score_clf1, score_clf2 = 0, 0
for _ in range(n_averages):
X, y = generate_data(n_train, n_features)
clf1 = LinearDiscriminantAnalysis(solver='lsqr', shrinkage='auto').fit(X, y)
clf2 = LinearDiscriminantAnalysis(solver='lsqr', shrinkage=None).fit(X, y)
X, y = generate_data(n_test, n_features)
score_clf1 += clf1.score(X, y)
score_clf2 += clf2.score(X, y)
acc_clf1.append(score_clf1 / n_averages)
acc_clf2.append(score_clf2 / n_averages)
features_samples_ratio = np.array(n_features_range) / n_train
plt.plot(features_samples_ratio, acc_clf1, linewidth=2,
label="Linear Discriminant Analysis with shrinkage", color='navy')
plt.plot(features_samples_ratio, acc_clf2, linewidth=2,
label="Linear Discriminant Analysis", color='gold')
plt.xlabel('n_features / n_samples')
plt.ylabel('Classification accuracy')
plt.legend(loc=1, prop={'size': 12})
plt.suptitle('Linear Discriminant Analysis vs. \
shrinkage Linear Discriminant Analysis (1 discriminative feature)')
plt.show()
|
bsd-3-clause
|
AlexRobson/nilmtk
|
nilmtk/elecmeter.py
|
5
|
30305
|
from __future__ import print_function, division
from warnings import warn
from collections import namedtuple
from copy import deepcopy
from itertools import izip
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import random
from .preprocessing import Clip
from .stats import TotalEnergy, GoodSections, DropoutRate
from .stats.totalenergyresults import TotalEnergyResults
from .hashable import Hashable
from .appliance import Appliance
from .datastore import Key
from .measurement import (select_best_ac_type, AC_TYPES, PHYSICAL_QUANTITIES,
PHYSICAL_QUANTITIES_WITH_AC_TYPES,
check_ac_type, check_physical_quantity)
from .node import Node
from .electric import Electric
from .timeframe import TimeFrame, list_of_timeframe_dicts
from nilmtk.exceptions import MeasurementError
from .utils import flatten_2d_list, capitalise_first_letter
from nilmtk.timeframegroup import TimeFrameGroup
import nilmtk
ElecMeterID = namedtuple('ElecMeterID', ['instance', 'building', 'dataset'])
class ElecMeter(Hashable, Electric):
"""Represents a physical electricity meter.
Attributes
----------
appliances : list of Appliance objects connected immediately downstream
of this meter. Will be [] if no appliances are connected directly
to this meter.
store : nilmtk.DataStore
key : string
key into nilmtk.DataStore to access data.
metadata : dict.
See http://nilm-metadata.readthedocs.org/en/latest/dataset_metadata.html#elecmeter
STATIC ATTRIBUTES
-----------------
meter_devices : dict, static class attribute
See http://nilm-metadata.readthedocs.org/en/latest/dataset_metadata.html#meterdevice
"""
meter_devices = {}
def __init__(self, store=None, metadata=None, meter_id=None):
# Store and check parameters
self.appliances = []
self.metadata = {} if metadata is None else metadata
assert isinstance(self.metadata, dict)
self.store = store
self.identifier = meter_id
# Insert self into nilmtk.global_meter_group
if self.identifier is not None:
assert isinstance(self.identifier, ElecMeterID)
if self not in nilmtk.global_meter_group.meters:
nilmtk.global_meter_group.meters.append(self)
@property
def key(self):
return self.metadata['data_location']
def instance(self):
return self._identifier_attr('instance')
def building(self):
return self._identifier_attr('building')
def dataset(self):
return self._identifier_attr('dataset')
@property
def name(self):
return self.metadata.get('name')
@name.setter
def name(self, value):
self.metadata['name'] = value
def _identifier_attr(self, attr):
if self.identifier is None:
return
else:
return getattr(self.identifier, attr)
def get_timeframe(self):
self._check_store()
return self.store.get_timeframe(key=self.key)
def _check_store(self):
if self.store is None:
raise RuntimeError("ElecMeter needs `store` attribute set to an"
" instance of a `nilmtk.DataStore` subclass")
def upstream_meter(self, raise_warning=True):
"""
Returns
-------
ElecMeterID of upstream meter or None if is site meter.
"""
if self.is_site_meter():
if raise_warning:
warn("There is no meter upstream of this meter '{}' because"
" it is a site meter.".format(self.identifier))
return
submeter_of = self.metadata.get('submeter_of')
# Sanity checks
if submeter_of is None:
raise ValueError(
"This meter has no 'submeter_of' metadata attribute.")
if submeter_of < 0:
raise ValueError("'submeter_of' must be >= 0.")
upstream_meter_in_building = self.metadata.get(
'upstream_meter_in_building')
if (upstream_meter_in_building is not None and
upstream_meter_in_building != self.identifier.building):
raise NotImplementedError(
"'upstream_meter_in_building' not implemented yet.")
id_of_upstream = ElecMeterID(instance=submeter_of,
building=self.identifier.building,
dataset=self.identifier.dataset)
upstream_meter = nilmtk.global_meter_group[id_of_upstream]
if upstream_meter is None:
warn("No upstream meter found for '{}'.".format(self.identifier))
return upstream_meter
@classmethod
def load_meter_devices(cls, store):
dataset_metadata = store.load_metadata('/')
ElecMeter.meter_devices.update(
dataset_metadata.get('meter_devices', {}))
def save(self, destination, key):
"""
Convert all relevant attributes to a dict to be
saved as metadata in destination at location specified
by key
"""
# destination.write_metadata(key, self.metadata)
# then save data
raise NotImplementedError
@property
def device(self):
"""
Returns
-------
dict describing the MeterDevice for this meter (sample period etc).
"""
device_model = self.metadata.get('device_model')
if device_model:
return deepcopy(ElecMeter.meter_devices[device_model])
else:
return {}
def sample_period(self):
device = self.device
if device:
return device['sample_period']
def is_site_meter(self):
return self.metadata.get('site_meter', False)
def dominant_appliance(self):
"""Tries to find the most dominant appliance on this meter,
and then returns that appliance object. Will return None
if there are no appliances on this meter.
"""
n_appliances = len(self.appliances)
if n_appliances == 0:
return
elif n_appliances == 1:
return self.appliances[0]
else:
for app in self.appliances:
if app.metadata.get('dominant_appliance'):
return app
warn('Multiple appliances are associated with meter {}'
' but none are marked as the dominant appliance. Hence'
' returning the first appliance in the list.', RuntimeWarning)
return self.appliances[0]
def label(self, pretty=True):
"""Returns a string describing this meter.
Parameters
----------
pretty : boolean
If True then just return the type name of the dominant appliance
(without the instance number) or metadata['name'], with the
first letter capitalised.
Returns
-------
string : A label listing all the appliance types.
"""
if pretty:
return self._pretty_label()
meter_names = []
if self.is_site_meter():
meter_names.append('SITE METER')
elif "name" in self.metadata:
meter_names.append(self.metadata["name"])
else:
for appliance in self.appliances:
appliance_name = appliance.label()
if appliance.metadata.get('dominant_appliance'):
appliance_name = appliance_name.upper()
meter_names.append(appliance_name)
label = ", ".join(meter_names)
return label
def _pretty_label(self):
name = self.metadata.get("name")
if name:
label = name
elif self.is_site_meter():
label = 'Site meter'
elif self.dominant_appliance() is not None:
label = self.dominant_appliance().identifier.type
else:
meter_names = []
for appliance in self.appliances:
appliance_name = appliance.label()
if appliance.metadata.get('dominant_appliance'):
appliance_name = appliance_name.upper()
meter_names.append(appliance_name)
label = ", ".join(meter_names)
return label
label = capitalise_first_letter(label)
return label
def available_ac_types(self, physical_quantity):
"""Finds available alternating current types for a specific physical quantity.
Parameters
----------
physical_quantity : str or list of strings
Returns
-------
list of strings e.g. ['apparent', 'active']
"""
if isinstance(physical_quantity, list):
ac_types = [self.available_ac_types(pq) for pq in physical_quantity]
return list(set(flatten_2d_list(ac_types)))
if physical_quantity not in PHYSICAL_QUANTITIES:
raise ValueError("`physical_quantity` must by one of '{}', not '{}'"
.format(PHYSICAL_QUANTITIES, physical_quantity))
measurements = self.device['measurements']
return [m['type'] for m in measurements
if m['physical_quantity'] == physical_quantity
and 'type' in m]
def available_physical_quantities(self):
"""
Returns
-------
list of strings e.g. ['power', 'energy']
"""
measurements = self.device['measurements']
return list(set([m['physical_quantity'] for m in measurements]))
def available_columns(self):
"""
Returns
-------
list of 2-tuples of strings e.g. [('power', 'active')]
"""
measurements = self.device['measurements']
return list(set([(m['physical_quantity'], m.get('type', ''))
for m in measurements]))
def __repr__(self):
string = super(ElecMeter, self).__repr__()
# Now add list of appliances...
string = string[:-1] # remove last bracket
# Site meter
if self.metadata.get('site_meter'):
string += ', site_meter'
# Appliances
string += ', appliances={}'.format(self.appliances)
# METER ROOM
room = self.metadata.get('room')
if room:
string += ', room={}'.format(room)
string += ')'
return string
def matches(self, key):
"""
Parameters
----------
key : dict
Returns
-------
Bool
"""
if not key:
return True
if not isinstance(key, dict):
raise TypeError()
match = True
for k, v in key.iteritems():
if hasattr(self.identifier, k):
if getattr(self.identifier, k) != v:
match = False
elif k in self.metadata:
if self.metadata[k] != v:
match = False
elif k in self.device:
metadata_value = self.device[k]
if (isinstance(metadata_value, list) and
not isinstance(v, list)):
if v not in metadata_value:
match = False
elif metadata_value != v:
match = False
else:
raise KeyError("'{}' not a valid key.".format(k))
return match
def load(self, **kwargs):
"""Returns a generator of DataFrames loaded from the DataStore.
By default, `load` will load all available columns from the DataStore.
Specific columns can be selected in one or two mutually exclusive ways:
1. specify a list of column names using the `cols` parameter.
2. specify a `physical_quantity` and/or an `ac_type` parameter to ask
`load` to automatically select columns.
If 'resample' is set to 'True' then the default behaviour is for
gaps shorter than max_sample_period will be forward filled.
Parameters
---------------
physical_quantity : string or list of strings
e.g. 'power' or 'voltage' or 'energy' or ['power', 'energy'].
If a single string then load columns only for that physical quantity.
If a list of strings then load columns for all those physical
quantities.
ac_type : string or list of strings, defaults to None
Where 'ac_type' is short for 'alternating current type'. e.g.
'reactive' or 'active' or 'apparent'.
If set to None then will load all AC types per physical quantity.
If set to 'best' then load the single best AC type per
physical quantity.
If set to a single AC type then load just that single AC type per
physical quantity, else raise an Exception.
If set to a list of AC type strings then will load all those
AC types and will raise an Exception if any cannot be found.
cols : list of tuples, using NILMTK's vocabulary for measurements.
e.g. [('power', 'active'), ('voltage', ''), ('energy', 'reactive')]
`cols` can't be used if `ac_type` and/or `physical_quantity` are set.
sample_period : int, defaults to None
Number of seconds to use as the new sample period for resampling.
If None then will use self.sample_period()
resample : boolean, defaults to False
If True then will resample data using `sample_period`.
Defaults to True if `sample_period` is not None.
resample_kwargs : dict of key word arguments (other than 'rule') to
`pass to pd.DataFrame.resample()`. Defaults to set 'limit' to
`sample_period / max_sample_period` and sets 'fill_method' to ffill.
preprocessing : list of Node subclass instances
e.g. [Clip()].
**kwargs : any other key word arguments to pass to `self.store.load()`
Returns
-------
Always return a generator of DataFrames (even if it only has a single
column).
Raises
------
nilmtk.exceptions.MeasurementError if a measurement is specified
which is not available.
"""
verbose = kwargs.get('verbose')
if verbose:
print()
print("ElecMeter.load")
print(self)
if 'sample_period' in kwargs:
kwargs['resample'] = True
if kwargs.get('resample'):
# Set default key word arguments for resampling.
resample_kwargs = kwargs.setdefault('resample_kwargs', {})
resample_kwargs.setdefault('fill_method', 'ffill')
if 'limit' not in resample_kwargs:
sample_period = kwargs.get('sample_period', self.sample_period())
max_number_of_rows_to_ffill = int(
np.ceil(self.device['max_sample_period'] / sample_period))
resample_kwargs.update({'limit': max_number_of_rows_to_ffill})
if verbose:
print("kwargs after setting resample setting:")
print(kwargs)
kwargs = self._prep_kwargs_for_sample_period_and_resample(**kwargs)
if verbose:
print("kwargs after processing")
print(kwargs)
# Get source node
preprocessing = kwargs.pop('preprocessing', [])
last_node = self.get_source_node(**kwargs)
generator = last_node.generator
# Connect together all preprocessing nodes
for node in preprocessing:
node.upstream = last_node
last_node = node
generator = last_node.process()
return generator
def _ac_type_to_columns(self, ac_type):
if ac_type is None:
return []
if isinstance(ac_type, list):
cols2d = [self._ac_type_to_columns(a_t) for a_t in ac_type]
return list(set(flatten_2d_list(cols2d)))
check_ac_type(ac_type)
cols_matching = [col for col in self.available_columns()
if col[1] == ac_type]
return cols_matching
def _physical_quantity_to_columns(self, physical_quantity):
if physical_quantity is None:
return []
if isinstance(physical_quantity, list):
cols2d = [self._physical_quantity_to_columns(p_q)
for p_q in physical_quantity]
return list(set(flatten_2d_list(cols2d)))
check_physical_quantity(physical_quantity)
cols_matching = [col for col in self.available_columns()
if col[0] == physical_quantity]
return cols_matching
def _get_columns_with_best_ac_type(self, physical_quantity=None):
if physical_quantity is None:
physical_quantity = self.available_physical_quantities()
if isinstance(physical_quantity, list):
columns = set()
for pq in physical_quantity:
best = self._get_columns_with_best_ac_type(pq)
if best:
columns.update(best)
return list(columns)
check_physical_quantity(physical_quantity)
available_pqs = self.available_physical_quantities()
if physical_quantity not in available_pqs:
return []
ac_types = self.available_ac_types(physical_quantity)
try:
best_ac_type = select_best_ac_type(ac_types)
except KeyError:
return []
else:
return [(physical_quantity, best_ac_type)]
def _convert_physical_quantity_and_ac_type_to_cols(
self, physical_quantity=None, ac_type=None, cols=None,
**kwargs):
"""Returns kwargs dict with physical_quantity and ac_type removed
and cols populated appropriately."""
if cols:
if (ac_type or physical_quantity):
raise ValueError("Cannot use `ac_type` and/or `physical_quantity`"
" with `cols` parameter.")
else:
if set(cols).issubset(self.available_columns()):
kwargs['cols'] = cols
return kwargs
else:
msg = ("'{}' is not a subset of the available columns: '{}'"
.format(cols, self.available_columns()))
raise MeasurementError(msg)
msg = ""
if not (ac_type or physical_quantity):
cols = self.available_columns()
elif ac_type == 'best':
cols = self._get_columns_with_best_ac_type(physical_quantity)
if not cols:
msg += "No AC types for physical quantity {}".format(physical_quantity)
else:
if ac_type:
cols = self._ac_type_to_columns(ac_type)
if not cols:
msg += "AC type '{}' not available. ".format(ac_type)
if physical_quantity:
cols_matching_pq = self._physical_quantity_to_columns(physical_quantity)
if not cols_matching_pq:
msg += ("Physical quantity '{}' not available. "
.format(physical_quantity))
if cols:
cols = list(set(cols).intersection(cols_matching_pq))
if not cols:
msg += ("No measurement matching ({}, {}). "
.format(physical_quantity, ac_type))
else:
cols = cols_matching_pq
if msg:
msg += "Available columns = {}. ".format(self.available_columns())
raise MeasurementError(msg)
kwargs['cols'] = cols
return kwargs
def dry_run_metadata(self):
return self.metadata
def get_metadata(self):
return self.metadata
def get_source_node(self, **loader_kwargs):
if self.store is None:
raise RuntimeError(
"Cannot get source node if meter.store is None!")
loader_kwargs = self._convert_physical_quantity_and_ac_type_to_cols(**loader_kwargs)
generator = self.store.load(key=self.key, **loader_kwargs)
self.metadata['device'] = self.device
return Node(self, generator=generator)
def total_energy(self, **loader_kwargs):
"""
Parameters
----------
full_results : bool, default=False
**loader_kwargs : key word arguments for DataStore.load()
Returns
-------
if `full_results` is True then return TotalEnergyResults object
else returns a pd.Series with a row for each AC type.
"""
nodes = [Clip, TotalEnergy]
return self._get_stat_from_cache_or_compute(
nodes, TotalEnergy.results_class(), loader_kwargs)
def dropout_rate(self, ignore_gaps=True, **loader_kwargs):
"""
Parameters
----------
ignore_gaps : bool, default=True
If True then will only calculate dropout rate for good sections.
full_results : bool, default=False
**loader_kwargs : key word arguments for DataStore.load()
Returns
-------
DropoutRateResults object if `full_results` is True,
else float
"""
nodes = [DropoutRate]
if ignore_gaps:
loader_kwargs['sections'] = self.good_sections(**loader_kwargs)
return self._get_stat_from_cache_or_compute(
nodes, DropoutRate.results_class(), loader_kwargs)
def good_sections(self, **loader_kwargs):
"""
Parameters
----------
full_results : bool, default=False
**loader_kwargs : key word arguments for DataStore.load()
Returns
-------
if `full_results` is True then return nilmtk.stats.GoodSectionsResults
object otherwise return list of TimeFrame objects.
"""
loader_kwargs.setdefault('n_look_ahead_rows', 10)
nodes = [GoodSections]
results_obj = GoodSections.results_class(self.device['max_sample_period'])
return self._get_stat_from_cache_or_compute(
nodes, results_obj, loader_kwargs)
def _get_stat_from_cache_or_compute(self, nodes, results_obj, loader_kwargs):
"""General function for computing statistics and/or loading them from
cache.
Cached statistics lives in the DataStore at
'building<I>/elec/cache/meter<K>/<statistic_name>' e.g.
'building1/elec/cache/meter1/total_energy'. We store the
'full' statistic... i.e we store a representation of the `Results._data`
DataFrame. Some times we need to do some conversion to store
`Results._data` on disk. The logic for doing this conversion lives
in the `Results` class or subclass. The cache can be cleared by calling
`ElecMeter.clear_cache()`.
Parameters
----------
nodes : list of nilmtk.Node classes
results_obj : instance of nilmtk.Results subclass
loader_kwargs : dict
Returns
-------
if `full_results` is True then return nilmtk.Results subclass
instance otherwise return nilmtk.Results.simple().
See Also
--------
clear_cache
_compute_stat
key_for_cached_stat
get_cached_stat
"""
full_results = loader_kwargs.pop('full_results', False)
verbose = loader_kwargs.get('verbose')
if 'ac_type' in loader_kwargs or 'physical_quantity' in loader_kwargs:
loader_kwargs = self._convert_physical_quantity_and_ac_type_to_cols(**loader_kwargs)
cols = loader_kwargs.get('cols', [])
ac_types = set([m[1] for m in cols if m[1]])
results_obj_copy = deepcopy(results_obj)
# Prepare `sections` list
sections = loader_kwargs.get('sections')
if sections is None:
tf = self.get_timeframe()
tf.include_end = True
sections = [tf]
sections = TimeFrameGroup(sections)
sections = [s for s in sections if not s.empty]
# Retrieve usable stats from cache
key_for_cached_stat = self.key_for_cached_stat(results_obj.name)
if loader_kwargs.get('preprocessing') is None:
cached_stat = self.get_cached_stat(key_for_cached_stat)
results_obj.import_from_cache(cached_stat, sections)
def find_sections_to_compute():
# Get sections_to_compute
results_obj_timeframes = results_obj.timeframes()
sections_to_compute = set(sections) - set(results_obj_timeframes)
sections_to_compute = list(sections_to_compute)
sections_to_compute.sort()
return sections_to_compute
try:
ac_type_keys = results_obj.simple().keys()
except:
sections_to_compute = find_sections_to_compute()
else:
if ac_types.issubset(ac_type_keys):
sections_to_compute = find_sections_to_compute()
else:
sections_to_compute = sections
results_obj = results_obj_copy
else:
sections_to_compute = sections
if verbose and not results_obj._data.empty:
print("Using cached result.")
# If we get to here then we have to compute some stats
if sections_to_compute:
loader_kwargs['sections'] = sections_to_compute
computed_result = self._compute_stat(nodes, loader_kwargs)
# Merge cached results with newly computed
results_obj.update(computed_result.results)
# Save to disk newly computed stats
stat_for_store = computed_result.results.export_to_cache()
try:
self.store.append(key_for_cached_stat, stat_for_store)
except ValueError:
# the old table probably had different columns
self.store.remove(key_for_cached_stat)
self.store.put(key_for_cached_stat, results_obj.export_to_cache())
if full_results:
return results_obj
else:
res = results_obj.simple()
if ac_types:
try:
ac_type_keys = res.keys()
except:
return res
else:
return pd.Series(res[ac_types], index=ac_types)
else:
return res
def _compute_stat(self, nodes, loader_kwargs):
"""
Parameters
----------
nodes : list of nilmtk.Node subclass objects
loader_kwargs : dict
Returns
-------
Node subclass object
See Also
--------
clear_cache
_get_stat_from_cache_or_compute
key_for_cached_stat
get_cached_stat
"""
results = self.get_source_node(**loader_kwargs)
for node in nodes:
results = node(results)
results.run()
return results
def key_for_cached_stat(self, stat_name):
"""
Parameters
----------
stat_name : str
Returns
-------
key : str
See Also
--------
clear_cache
_compute_stat
_get_stat_from_cache_or_compute
get_cached_stat
"""
if isinstance(self.instance(), tuple):
meter_str = "_".join([str(i) for i in (self.instance())])
else:
meter_str = "{:d}".format(self.instance())
return ("building{:d}/elec/cache/meter{}/{:s}"
.format(self.building(), meter_str, stat_name))
def clear_cache(self, verbose=False):
"""
See Also
--------
_compute_stat
_get_stat_from_cache_or_compute
key_for_cached_stat
get_cached_stat
"""
if self.store is not None:
key_for_cache = self.key_for_cached_stat('')
try:
self.store.remove(key_for_cache)
except KeyError:
if verbose:
print("No existing cache for", key_for_cache)
else:
print("Removed", key_for_cache)
def get_cached_stat(self, key_for_stat):
"""
Parameters
----------
key_for_stat : str
Returns
-------
pd.DataFrame
See Also
--------
_compute_stat
_get_stat_from_cache_or_compute
key_for_cached_stat
clear_cache
"""
if self.store is None:
return pd.DataFrame()
try:
stat_from_cache = self.store[key_for_stat]
except KeyError:
return pd.DataFrame()
else:
return pd.DataFrame() if stat_from_cache is None else stat_from_cache
# def total_on_duration(self):
# """Return timedelta"""
# raise NotImplementedError
# def on_durations(self):
# raise NotImplementedError
# def activity_distribution(self, bin_size, timespan):
# raise NotImplementedError
# def on_off_events(self):
# use self.metadata.minimum_[off|on]_duration
# raise NotImplementedError
# def discrete_appliance_activations(self):
# """
# Return a Mask defining the start and end times of each appliance
# activation.
# """
# raise NotImplementedError
# def contiguous_sections(self):
# """retuns Mask object"""
# raise NotImplementedError
# def clean_and_export(self, destination_datastore):
# """Apply all cleaning configured in meter.cleaning and then export. Also identifies
# and records the locations of gaps. Also records metadata about exactly which
# cleaning steps have been executed and some summary results (e.g. the number of
# implausible values removed)"""
# raise NotImplementedError
|
apache-2.0
|
luo66/scikit-learn
|
sklearn/utils/validation.py
|
30
|
24618
|
"""Utilities for input validation"""
# Authors: Olivier Grisel
# Gael Varoquaux
# Andreas Mueller
# Lars Buitinck
# Alexandre Gramfort
# Nicolas Tresegnie
# License: BSD 3 clause
import warnings
import numbers
import numpy as np
import scipy.sparse as sp
from ..externals import six
from inspect import getargspec
FLOAT_DTYPES = (np.float64, np.float32, np.float16)
class DataConversionWarning(UserWarning):
"""A warning on implicit data conversions happening in the code"""
pass
warnings.simplefilter("always", DataConversionWarning)
class NonBLASDotWarning(UserWarning):
"""A warning on implicit dispatch to numpy.dot"""
class NotFittedError(ValueError, AttributeError):
"""Exception class to raise if estimator is used before fitting
This class inherits from both ValueError and AttributeError to help with
exception handling and backward compatibility.
"""
# Silenced by default to reduce verbosity. Turn on at runtime for
# performance profiling.
warnings.simplefilter('ignore', NonBLASDotWarning)
def _assert_all_finite(X):
"""Like assert_all_finite, but only for ndarray."""
X = np.asanyarray(X)
# First try an O(n) time, O(1) space solution for the common case that
# everything is finite; fall back to O(n) space np.isfinite to prevent
# false positives from overflow in sum method.
if (X.dtype.char in np.typecodes['AllFloat'] and not np.isfinite(X.sum())
and not np.isfinite(X).all()):
raise ValueError("Input contains NaN, infinity"
" or a value too large for %r." % X.dtype)
def assert_all_finite(X):
"""Throw a ValueError if X contains NaN or infinity.
Input MUST be an np.ndarray instance or a scipy.sparse matrix."""
_assert_all_finite(X.data if sp.issparse(X) else X)
def as_float_array(X, copy=True, force_all_finite=True):
"""Converts an array-like to an array of floats
The new dtype will be np.float32 or np.float64, depending on the original
type. The function can create a copy or modify the argument depending
on the argument copy.
Parameters
----------
X : {array-like, sparse matrix}
copy : bool, optional
If True, a copy of X will be created. If False, a copy may still be
returned if X's dtype is not a floating point type.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
Returns
-------
XT : {array, sparse matrix}
An array of type np.float
"""
if isinstance(X, np.matrix) or (not isinstance(X, np.ndarray)
and not sp.issparse(X)):
return check_array(X, ['csr', 'csc', 'coo'], dtype=np.float64,
copy=copy, force_all_finite=force_all_finite,
ensure_2d=False)
elif sp.issparse(X) and X.dtype in [np.float32, np.float64]:
return X.copy() if copy else X
elif X.dtype in [np.float32, np.float64]: # is numpy array
return X.copy('F' if X.flags['F_CONTIGUOUS'] else 'C') if copy else X
else:
return X.astype(np.float32 if X.dtype == np.int32 else np.float64)
def _is_arraylike(x):
"""Returns whether the input is array-like"""
return (hasattr(x, '__len__') or
hasattr(x, 'shape') or
hasattr(x, '__array__'))
def _num_samples(x):
"""Return number of samples in array-like x."""
if hasattr(x, 'fit'):
# Don't get num_samples from an ensembles length!
raise TypeError('Expected sequence or array-like, got '
'estimator %s' % x)
if not hasattr(x, '__len__') and not hasattr(x, 'shape'):
if hasattr(x, '__array__'):
x = np.asarray(x)
else:
raise TypeError("Expected sequence or array-like, got %s" %
type(x))
if hasattr(x, 'shape'):
if len(x.shape) == 0:
raise TypeError("Singleton array %r cannot be considered"
" a valid collection." % x)
return x.shape[0]
else:
return len(x)
def _shape_repr(shape):
"""Return a platform independent reprensentation of an array shape
Under Python 2, the `long` type introduces an 'L' suffix when using the
default %r format for tuples of integers (typically used to store the shape
of an array).
Under Windows 64 bit (and Python 2), the `long` type is used by default
in numpy shapes even when the integer dimensions are well below 32 bit.
The platform specific type causes string messages or doctests to change
from one platform to another which is not desirable.
Under Python 3, there is no more `long` type so the `L` suffix is never
introduced in string representation.
>>> _shape_repr((1, 2))
'(1, 2)'
>>> one = 2 ** 64 / 2 ** 64 # force an upcast to `long` under Python 2
>>> _shape_repr((one, 2 * one))
'(1, 2)'
>>> _shape_repr((1,))
'(1,)'
>>> _shape_repr(())
'()'
"""
if len(shape) == 0:
return "()"
joined = ", ".join("%d" % e for e in shape)
if len(shape) == 1:
# special notation for singleton tuples
joined += ','
return "(%s)" % joined
def check_consistent_length(*arrays):
"""Check that all arrays have consistent first dimensions.
Checks whether all objects in arrays have the same shape or length.
Parameters
----------
*arrays : list or tuple of input objects.
Objects that will be checked for consistent length.
"""
uniques = np.unique([_num_samples(X) for X in arrays if X is not None])
if len(uniques) > 1:
raise ValueError("Found arrays with inconsistent numbers of samples: "
"%s" % str(uniques))
def indexable(*iterables):
"""Make arrays indexable for cross-validation.
Checks consistent length, passes through None, and ensures that everything
can be indexed by converting sparse matrices to csr and converting
non-interable objects to arrays.
Parameters
----------
*iterables : lists, dataframes, arrays, sparse matrices
List of objects to ensure sliceability.
"""
result = []
for X in iterables:
if sp.issparse(X):
result.append(X.tocsr())
elif hasattr(X, "__getitem__") or hasattr(X, "iloc"):
result.append(X)
elif X is None:
result.append(X)
else:
result.append(np.array(X))
check_consistent_length(*result)
return result
def _ensure_sparse_format(spmatrix, accept_sparse, dtype, copy,
force_all_finite):
"""Convert a sparse matrix to a given format.
Checks the sparse format of spmatrix and converts if necessary.
Parameters
----------
spmatrix : scipy sparse matrix
Input to validate and convert.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats ('csc',
'csr', 'coo', 'dok', 'bsr', 'lil', 'dia'). None means that sparse
matrix input will raise an error. If the input is sparse but not in
the allowed format, it will be converted to the first listed format.
dtype : string, type or None (default=none)
Data type of result. If None, the dtype of the input is preserved.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
Returns
-------
spmatrix_converted : scipy sparse matrix.
Matrix that is ensured to have an allowed type.
"""
if accept_sparse in [None, False]:
raise TypeError('A sparse matrix was passed, but dense '
'data is required. Use X.toarray() to '
'convert to a dense numpy array.')
if dtype is None:
dtype = spmatrix.dtype
changed_format = False
if (isinstance(accept_sparse, (list, tuple))
and spmatrix.format not in accept_sparse):
# create new with correct sparse
spmatrix = spmatrix.asformat(accept_sparse[0])
changed_format = True
if dtype != spmatrix.dtype:
# convert dtype
spmatrix = spmatrix.astype(dtype)
elif copy and not changed_format:
# force copy
spmatrix = spmatrix.copy()
if force_all_finite:
if not hasattr(spmatrix, "data"):
warnings.warn("Can't check %s sparse matrix for nan or inf."
% spmatrix.format)
else:
_assert_all_finite(spmatrix.data)
return spmatrix
def check_array(array, accept_sparse=None, dtype="numeric", order=None,
copy=False, force_all_finite=True, ensure_2d=True,
allow_nd=False, ensure_min_samples=1, ensure_min_features=1,
warn_on_dtype=False, estimator=None):
"""Input validation on an array, list, sparse matrix or similar.
By default, the input is converted to an at least 2nd numpy array.
If the dtype of the array is object, attempt converting to float,
raising on failure.
Parameters
----------
array : object
Input object to check / convert.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. None means that sparse matrix input will raise an error.
If the input is sparse but not in the allowed format, it will be
converted to the first listed format.
dtype : string, type, list of types or None (default="numeric")
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
If dtype is a list of types, conversion on the first type is only
performed if the dtype of the input is not in the list.
order : 'F', 'C' or None (default=None)
Whether an array will be forced to be fortran or c-style.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
ensure_2d : boolean (default=True)
Whether to make X at least 2d.
allow_nd : boolean (default=False)
Whether to allow X.ndim > 2.
ensure_min_samples : int (default=1)
Make sure that the array has a minimum number of samples in its first
axis (rows for a 2D array). Setting to 0 disables this check.
ensure_min_features : int (default=1)
Make sure that the 2D array has some minimum number of features
(columns). The default value of 1 rejects empty datasets.
This check is only enforced when the input data has effectively 2
dimensions or is originally 1D and ``ensure_2d`` is True. Setting to 0
disables this check.
warn_on_dtype : boolean (default=False)
Raise DataConversionWarning if the dtype of the input data structure
does not match the requested dtype, causing a memory copy.
estimator : str or estimator instance (default=None)
If passed, include the name of the estimator in warning messages.
Returns
-------
X_converted : object
The converted and validated X.
"""
if isinstance(accept_sparse, str):
accept_sparse = [accept_sparse]
# store whether originally we wanted numeric dtype
dtype_numeric = dtype == "numeric"
dtype_orig = getattr(array, "dtype", None)
if not hasattr(dtype_orig, 'kind'):
# not a data type (e.g. a column named dtype in a pandas DataFrame)
dtype_orig = None
if dtype_numeric:
if dtype_orig is not None and dtype_orig.kind == "O":
# if input is object, convert to float.
dtype = np.float64
else:
dtype = None
if isinstance(dtype, (list, tuple)):
if dtype_orig is not None and dtype_orig in dtype:
# no dtype conversion required
dtype = None
else:
# dtype conversion required. Let's select the first element of the
# list of accepted types.
dtype = dtype[0]
if sp.issparse(array):
array = _ensure_sparse_format(array, accept_sparse, dtype, copy,
force_all_finite)
else:
array = np.array(array, dtype=dtype, order=order, copy=copy)
if ensure_2d:
if array.ndim == 1:
warnings.warn(
"Passing 1d arrays as data is deprecated in 0.17 and will"
"raise ValueError in 0.19. Reshape your data either using "
"X.reshape(-1, 1) if your data has a single feature or "
"X.reshape(1, -1) if it contains a single sample.",
DeprecationWarning)
array = np.atleast_2d(array)
# To ensure that array flags are maintained
array = np.array(array, dtype=dtype, order=order, copy=copy)
# make sure we acually converted to numeric:
if dtype_numeric and array.dtype.kind == "O":
array = array.astype(np.float64)
if not allow_nd and array.ndim >= 3:
raise ValueError("Found array with dim %d. Expected <= 2" %
array.ndim)
if force_all_finite:
_assert_all_finite(array)
shape_repr = _shape_repr(array.shape)
if ensure_min_samples > 0:
n_samples = _num_samples(array)
if n_samples < ensure_min_samples:
raise ValueError("Found array with %d sample(s) (shape=%s) while a"
" minimum of %d is required."
% (n_samples, shape_repr, ensure_min_samples))
if ensure_min_features > 0 and array.ndim == 2:
n_features = array.shape[1]
if n_features < ensure_min_features:
raise ValueError("Found array with %d feature(s) (shape=%s) while"
" a minimum of %d is required."
% (n_features, shape_repr, ensure_min_features))
if warn_on_dtype and dtype_orig is not None and array.dtype != dtype_orig:
msg = ("Data with input dtype %s was converted to %s"
% (dtype_orig, array.dtype))
if estimator is not None:
if not isinstance(estimator, six.string_types):
estimator = estimator.__class__.__name__
msg += " by %s" % estimator
warnings.warn(msg, DataConversionWarning)
return array
def check_X_y(X, y, accept_sparse=None, dtype="numeric", order=None, copy=False,
force_all_finite=True, ensure_2d=True, allow_nd=False,
multi_output=False, ensure_min_samples=1,
ensure_min_features=1, y_numeric=False,
warn_on_dtype=False, estimator=None):
"""Input validation for standard estimators.
Checks X and y for consistent length, enforces X 2d and y 1d.
Standard input checks are only applied to y. For multi-label y,
set multi_output=True to allow 2d and sparse y.
If the dtype of X is object, attempt converting to float,
raising on failure.
Parameters
----------
X : nd-array, list or sparse matrix
Input data.
y : nd-array, list or sparse matrix
Labels.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. None means that sparse matrix input will raise an error.
If the input is sparse but not in the allowed format, it will be
converted to the first listed format.
dtype : string, type, list of types or None (default="numeric")
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
If dtype is a list of types, conversion on the first type is only
performed if the dtype of the input is not in the list.
order : 'F', 'C' or None (default=None)
Whether an array will be forced to be fortran or c-style.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
ensure_2d : boolean (default=True)
Whether to make X at least 2d.
allow_nd : boolean (default=False)
Whether to allow X.ndim > 2.
multi_output : boolean (default=False)
Whether to allow 2-d y (array or sparse matrix). If false, y will be
validated as a vector.
ensure_min_samples : int (default=1)
Make sure that X has a minimum number of samples in its first
axis (rows for a 2D array).
ensure_min_features : int (default=1)
Make sure that the 2D array has some minimum number of features
(columns). The default value of 1 rejects empty datasets.
This check is only enforced when X has effectively 2 dimensions or
is originally 1D and ``ensure_2d`` is True. Setting to 0 disables
this check.
y_numeric : boolean (default=False)
Whether to ensure that y has a numeric type. If dtype of y is object,
it is converted to float64. Should only be used for regression
algorithms.
warn_on_dtype : boolean (default=False)
Raise DataConversionWarning if the dtype of the input data structure
does not match the requested dtype, causing a memory copy.
estimator : str or estimator instance (default=None)
If passed, include the name of the estimator in warning messages.
Returns
-------
X_converted : object
The converted and validated X.
y_converted : object
The converted and validated y.
"""
X = check_array(X, accept_sparse, dtype, order, copy, force_all_finite,
ensure_2d, allow_nd, ensure_min_samples,
ensure_min_features, warn_on_dtype, estimator)
if multi_output:
y = check_array(y, 'csr', force_all_finite=True, ensure_2d=False,
dtype=None)
else:
y = column_or_1d(y, warn=True)
_assert_all_finite(y)
if y_numeric and y.dtype.kind == 'O':
y = y.astype(np.float64)
check_consistent_length(X, y)
return X, y
def column_or_1d(y, warn=False):
""" Ravel column or 1d numpy array, else raises an error
Parameters
----------
y : array-like
warn : boolean, default False
To control display of warnings.
Returns
-------
y : array
"""
shape = np.shape(y)
if len(shape) == 1:
return np.ravel(y)
if len(shape) == 2 and shape[1] == 1:
if warn:
warnings.warn("A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
DataConversionWarning, stacklevel=2)
return np.ravel(y)
raise ValueError("bad input shape {0}".format(shape))
def check_random_state(seed):
"""Turn seed into a np.random.RandomState instance
If seed is None, return the RandomState singleton used by np.random.
If seed is an int, return a new RandomState instance seeded with seed.
If seed is already a RandomState instance, return it.
Otherwise raise ValueError.
"""
if seed is None or seed is np.random:
return np.random.mtrand._rand
if isinstance(seed, (numbers.Integral, np.integer)):
return np.random.RandomState(seed)
if isinstance(seed, np.random.RandomState):
return seed
raise ValueError('%r cannot be used to seed a numpy.random.RandomState'
' instance' % seed)
def has_fit_parameter(estimator, parameter):
"""Checks whether the estimator's fit method supports the given parameter.
Examples
--------
>>> from sklearn.svm import SVC
>>> has_fit_parameter(SVC(), "sample_weight")
True
"""
return parameter in getargspec(estimator.fit)[0]
def check_symmetric(array, tol=1E-10, raise_warning=True,
raise_exception=False):
"""Make sure that array is 2D, square and symmetric.
If the array is not symmetric, then a symmetrized version is returned.
Optionally, a warning or exception is raised if the matrix is not
symmetric.
Parameters
----------
array : nd-array or sparse matrix
Input object to check / convert. Must be two-dimensional and square,
otherwise a ValueError will be raised.
tol : float
Absolute tolerance for equivalence of arrays. Default = 1E-10.
raise_warning : boolean (default=True)
If True then raise a warning if conversion is required.
raise_exception : boolean (default=False)
If True then raise an exception if array is not symmetric.
Returns
-------
array_sym : ndarray or sparse matrix
Symmetrized version of the input array, i.e. the average of array
and array.transpose(). If sparse, then duplicate entries are first
summed and zeros are eliminated.
"""
if (array.ndim != 2) or (array.shape[0] != array.shape[1]):
raise ValueError("array must be 2-dimensional and square. "
"shape = {0}".format(array.shape))
if sp.issparse(array):
diff = array - array.T
# only csr, csc, and coo have `data` attribute
if diff.format not in ['csr', 'csc', 'coo']:
diff = diff.tocsr()
symmetric = np.all(abs(diff.data) < tol)
else:
symmetric = np.allclose(array, array.T, atol=tol)
if not symmetric:
if raise_exception:
raise ValueError("Array must be symmetric")
if raise_warning:
warnings.warn("Array is not symmetric, and will be converted "
"to symmetric by average with its transpose.")
if sp.issparse(array):
conversion = 'to' + array.format
array = getattr(0.5 * (array + array.T), conversion)()
else:
array = 0.5 * (array + array.T)
return array
def check_is_fitted(estimator, attributes, msg=None, all_or_any=all):
"""Perform is_fitted validation for estimator.
Checks if the estimator is fitted by verifying the presence of
"all_or_any" of the passed attributes and raises a NotFittedError with the
given message.
Parameters
----------
estimator : estimator instance.
estimator instance for which the check is performed.
attributes : attribute name(s) given as string or a list/tuple of strings
Eg. : ["coef_", "estimator_", ...], "coef_"
msg : string
The default error message is, "This %(name)s instance is not fitted
yet. Call 'fit' with appropriate arguments before using this method."
For custom messages if "%(name)s" is present in the message string,
it is substituted for the estimator name.
Eg. : "Estimator, %(name)s, must be fitted before sparsifying".
all_or_any : callable, {all, any}, default all
Specify whether all or any of the given attributes must exist.
"""
if msg is None:
msg = ("This %(name)s instance is not fitted yet. Call 'fit' with "
"appropriate arguments before using this method.")
if not hasattr(estimator, 'fit'):
raise TypeError("%s is not an estimator instance." % (estimator))
if not isinstance(attributes, (list, tuple)):
attributes = [attributes]
if not all_or_any([hasattr(estimator, attr) for attr in attributes]):
raise NotFittedError(msg % {'name': type(estimator).__name__})
def check_non_negative(X, whom):
"""
Check if there is any negative value in an array.
Parameters
----------
X : array-like or sparse matrix
Input data.
whom : string
Who passed X to this function.
"""
X = X.data if sp.issparse(X) else X
if (X < 0).any():
raise ValueError("Negative values in data passed to %s" % whom)
|
bsd-3-clause
|
code-for-india/sahana_shelter_worldbank
|
modules/tests/smoke/broken_links.py
|
5
|
26739
|
""" Sahana Eden Test Framework
@copyright: 2011-2014 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
from time import time
try:
from cStringIO import StringIO # Faster, where available
except:
from StringIO import StringIO
import sys
import socket
from tests.web2unittest import Web2UnitTest
from gluon import current
try:
from twill import get_browser
from twill import set_output
from twill.browser import *
except ImportError:
raise NameError("Twill not installed")
try:
import mechanize
#from mechanize import BrowserStateError
#from mechanize import ControlNotFoundError
except ImportError:
raise NameError("Mechanize not installed")
class BrokenLinkTest(Web2UnitTest):
""" Smoke Test, visit every link it can find and report on the outcome """
def __init__(self):
Web2UnitTest.__init__(self)
self.b = get_browser()
self.b_data = StringIO()
set_output(self.b_data)
self.clearRecord()
# This string must exist in the URL for it to be followed
# Useful to avoid going to linked sites
self.homeURL = self.url
# Link used to identify a URL to a ticket
self.url_ticket = "/admin/default/ticket/"
# Tuple of strings that if in the URL will be ignored
# Useful to avoid dynamic URLs that trigger the same functionality
self.include_ignore = ("_language=",
"logout",
"appadmin",
"admin",
"delete",
)
# tuple of strings that should be removed from the URL before storing
# Typically this will be some variables passed in via the URL
self.strip_url = ("?_next=",
)
self.reportOnly = False
self.maxDepth = 16 # sanity check
self.setThreshold(10)
self.setUser("test@example.com/eden")
self.total_visited = 0
self.broken_links_count = 0
def clearRecord(self):
# the total url links visited
self.totalLinks = 0
# The number of unique urls found at depth i, where i is the index
self.linkDepth = []
# Dictionary of the parent for each URL
self.urlParentList = {}
# dictionary of ReportData objects indexed on the url
self.results = {}
def setReportOnly(self, action):
self.reportOnly = action
def setDepth(self, depth):
self.maxDepth = depth
def setUser(self, user):
self.credentials = user.split(",")
def setThreshold(self, value):
value = float(value)
self.threshold = value
# socket.setdefaulttimeout(value*2)
def login(self, credentials):
if credentials == "UNAUTHENTICATED":
url = "%s/default/user/logout" % self.homeURL
self.b.go(url)
return True
try:
(self.user, self.password) = credentials.split("/",1)
except:
msg = "Unable to split %s into a user name and password" % user
self.reporter(msg)
return False
url = "%s/default/user/login" % self.homeURL
self.b.go(url)
forms = self.b.get_all_forms()
for form in forms:
try:
if form["_formname"] == "login":
self.b._browser.form = form
form["email"] = self.user
form["password"] = self.password
self.b.submit("Login")
# If login is successful then should be redirected to the homepage
return self.b.get_url()[len(self.homeURL):] == "/default/index"
except:
# This should be a mechanize.ControlNotFoundError, but
# for some unknown reason that isn't caught on Windows or Mac
pass
return False
def addResults2Current(self):
'''
Store the count links in gluon.current to be used by HTMLTestRunner for better reporting
'''
smoke_results = {}
smoke_results['working_links'] = self.total_visited - self.broken_links_count
smoke_results['broken_links_count'] = self.broken_links_count
current.data['smoke_results'] = smoke_results
def runTest(self):
"""
Test to find all exposed links and check the http code returned.
This test doesn't run any javascript so some false positives
will be found.
The test can also display an histogram depicting the number of
links found at each depth.
Failure or Success to be shown in the report is checked in addSuccess in TestResult
class
"""
for user in self.credentials:
self.clearRecord()
if self.login(user):
self.reporter("Smoke Test for user %s" % self.user)
self.visitLinks()
self.report()
self.addResults2Current()
else:
raise Exception("Login Failed")
def visitLinks(self):
url = self.homeURL + "/default/index"
to_visit = [url]
start = time()
self.total_visited = 0
if not self.reportOnly:
for depth in range(self.maxDepth):
if len(to_visit) == 0:
break
self.linkDepth.append(len(to_visit))
self.totalLinks += len(to_visit)
visit_start = time()
url_visited = "%d urls" % len(to_visit)
self.total_visited += len(to_visit)
to_visit = self.visit(to_visit, depth)
msg = "%.2d Visited %s in %.3f seconds, %d more urls found" % (depth, url_visited, time()-visit_start, len(to_visit))
self.reporter(msg)
if self.config.verbose >= 2:
if self.config.verbose >= 3:
print >> self.stdout
if self.stdout.isatty(): # terminal should support colour
msg = "%.2d Visited \033[1;32m%s\033[0m in %.3f seconds, \033[1;31m%d\033[0m more urls found" % (depth, url_visited, time()-visit_start, len(to_visit))
print >> self.stdout, msg
if len(to_visit) > 0:
self.linkDepth.append(len(to_visit))
finish = time()
self.reporter("Finished took %.3f seconds" % (finish - start))
def visit(self, url_list, depth):
repr_list = [".pdf", ".xls", ".rss", ".kml"]
to_visit = []
record_data = self.config.verbose > 0
for visited_url in url_list:
index_url = visited_url[len(self.homeURL):]
if record_data:
if index_url in self.results.keys():
print >> self.stdout, "Warning duplicated url: %s" % index_url
self.results[index_url] = ReportData()
current_results = self.results[index_url]
current_results.depth = depth
# Find out if the page can be visited
open_novisit = False
for repr in repr_list:
if repr in index_url:
open_novisit = True
break
try:
if open_novisit:
action = "open_novisit"
else:
action = "open"
visit_start = time()
self.b._journey(action, visited_url)
http_code = self.b.get_code()
duration = time() - visit_start
if record_data:
current_results.duration = duration
if duration > self.threshold:
if self.config.verbose >= 3:
print >> self.stdout, "%s took %.3f seconds" % (visited_url, duration)
except Exception as e:
duration = time() - visit_start
import traceback
print traceback.format_exc()
if record_data:
current_results.broken = True
current_results.exception = True
current_results.duration = duration
continue
http_code = self.b.get_code()
if http_code != 200:
if record_data:
current_results.broken = True
current_results.http_code = http_code
elif open_novisit:
continue
links = []
try:
if self.b._browser.viewing_html():
links = self.b._browser.links()
else:
continue
except Exception as e:
import traceback
print traceback.format_exc()
if record_data:
current_results.broken = True
current_results.exception = True
continue
for link in (links):
url = link.absolute_url
if url.find(self.url_ticket) != -1:
# A ticket was raised so...
# capture the details and add to brokenLinks
if record_data:
current_results.broken = True
current_results.ticket = url
break # no need to check any other links on this page
if url.find(self.homeURL) == -1:
continue
ignore_link = False
for ignore in self.include_ignore:
if url.find(ignore) != -1:
ignore_link = True
break
if ignore_link:
continue
for strip in self.strip_url:
location = url.find(strip)
if location != -1:
url = url[0:location]
short_url = url[len(self.homeURL):]
if url not in url_list and \
short_url != "" and \
short_url not in self.results.keys() and \
url not in to_visit:
self.urlParentList[short_url] = index_url
to_visit.append(url)
return to_visit
def report(self):
self.reporter("%d URLs visited" % self.totalLinks)
self.brokenReport()
self.timeReport()
if self.config.record_timings:
if not self.reportOnly:
self.record_timings()
self.scatterplot()
self.depthReport()
def record_timings(self):
import_error = ""
try:
import xlrd
except:
import_error += "ERROR: the xlrd modules is needed to record timings\n"
try:
import xlwt
except:
import_error += "ERROR: the xlwt modules is needed to record timings\n"
if import_error != "":
print >> self.stderr, import_error
return
rec_time_filename = self.config.record_timings_filename
try:
workbook = xlrd.open_workbook(filename=rec_time_filename,
formatting_info=True)
except:
workbook = None
summary = {}
if workbook:
summary = self.read_timings_sheet(workbook)
if len(summary["date"]) > 100:
# Need to rotate the file
# 1) make a summary and save this
self.report_timings_summary(summary, rec_time_filename)
# 2) archive the file
from zipfile import ZipFile
import os
zip_filename = os.path.join(self.config.path, "rec_time.zip")
archive = ZipFile(zip_filename, "a")
arc_name = "%s-%s.xls" % (rec_time_filename[len(self.config.path):-4],
current.request.now.date()
)
archive.write(rec_time_filename,arc_name)
archive.close()
# 3) clear the current file
os.unlink(rec_time_filename)
summary = {}
if "date" not in summary:
last_col = 0
summary["date"] = [current.request.now.date()]
else:
last_col = len(summary["date"])
summary["date"].append(current.request.now.date())
for (url, rd_obj) in self.results.items():
if url not in summary:
summary[url] = []
# ensure that the row is as long as the number of dates
shortage = last_col - len(summary[url])
if shortage > 0:
summary[url] = summary[url] + ['']*shortage
summary[url].append((rd_obj.get_duration(), rd_obj.is_broken()))
self.write_timings_sheet(summary, rec_time_filename)
def read_timings_sheet(self, workbook):
"""
This will extract all the details from the xls sheet
"""
sheet = workbook.sheet_by_name("Timings")
summary = {}
RED = 0x0A
num_cells = sheet.ncols
summary["date"] = []
for col in range(1, num_cells):
summary["date"].append(sheet.cell_value(0, col))
for row in range(1,sheet.nrows):
url = sheet.cell_value(row, 0)
summary[url] = []
for col in range(1, num_cells):
duration = sheet.cell_value(row, col)
xf = sheet.cell_xf_index(row, col)
bg = workbook.xf_list[xf].background
broken = (bg.pattern_colour_index == RED)
summary[url].append((duration, broken))
return summary
def write_timings_sheet(self, summary, filename=None):
import xlwt
RED = 0x0A
book = xlwt.Workbook(encoding="utf-8")
sheet = book.add_sheet("Timings")
stylebroken = xlwt.XFStyle()
stylebroken.pattern.pattern = stylebroken.pattern.SOLID_PATTERN
stylebroken.pattern.pattern_fore_colour = RED
col = 1
for date in summary["date"]:
sheet.write(0,col,str(date))
col += 1
row = 1
for (url, results) in summary.items():
if url == "date":
continue
sheet.write(row,0,url)
col = 1
for data in results:
if len(data) == 2 and data[1]:
sheet.write(row,col,data[0],stylebroken)
elif len(data) > 0:
sheet.write(row,col,data[0])
col += 1
row += 1
if filename:
book.save(filename)
return book
def report_timings_summary(self,
summary,
summary_file_name = None,
mean_threshold = 1):
"""
This will extract the details from the sheet and optionally save
them to a summary file
summary: the summary details returned from the spreadsheet (read_timings_sheet)
summary_file_name: name of the file to record the summary details (if required)
mean_threshold: The minimum number of values required to include
the mean in the regression calculations
"""
import numpy
import datetime
good_values = []
other_values = []
total_values = []
for date in summary["date"]:
good_values.append([])
other_values.append([])
total_values.append([])
for (url,results) in summary.items():
if url == "date":
continue
else:
cnt = 0
for (duration, broken) in results:
if duration != "":
total_values[cnt].append(duration)
if broken:
other_values[cnt].append(duration)
else:
good_values[cnt].append(duration)
cnt += 1
# get the number of days each entry is after the first date
# and calculate the average, if the average is NAN then ignore both
date_summary = []
gv_mean = []
gv_std = []
gv_date = []
cnt = 0
start = datetime.datetime.strptime(summary["date"][0],"%Y-%m-%d")
for list in good_values:
if len(list) > mean_threshold:
mean = numpy.mean(list)
std = numpy.std(list)
if not numpy.isnan(mean):
this_date = datetime.datetime.strptime(summary["date"][cnt],"%Y-%m-%d")
date_summary.append((this_date - start).days)
gv_mean.append(mean)
gv_std.append(std)
gv_date.append(summary["date"][cnt])
cnt += 1
# calculate the regression line
if len(gv_mean) > 2:
(m,b) = numpy.polyfit(date_summary, gv_mean, 1)
else:
m = b = 0
if summary_file_name != None:
book = self.write_timings_sheet(summary)
sheet = book.add_sheet("summary")
row = 0
for date in gv_date:
sheet.write(row,0,str(date))
sheet.write(row,1,gv_mean[row])
row += 1
sheet.write(row,0,"Trend")
sheet.write(row,1,m)
# Save the details to the summary file
book.save(summary_file_name)
return (date_summary, gv_mean, gv_std, m, b)
def report_model_url(self):
print "Report breakdown by module"
for (model, value) in self.model_url.items():
print model
for ud in value:
url = ud[0]
depth = ud[1]
parent = ud[2]
tabs = "\t" * depth
print "%s %s-%s (parent url - %s)" % (tabs, depth, url, parent)
def brokenReport(self):
self.reporter("Broken Links")
as_html = current.test_config.html
self.broken_links_count = 0
for (url, rd_obj) in self.results.items():
if as_html:
print_url = "<a href=%s%s target=\"_blank\">%s</a>" % (self.homeURL, url, url)
else:
print_url = url
if rd_obj.is_broken():
if rd_obj.threw_exception():
msg = "(Exception) %s" % print_url
else:
http_code = rd_obj.return_http_code()
ticket = rd_obj.the_ticket(as_html)
try:
parent = self.urlParentList[url]
if as_html:
parent = "<a href=%s%s target=\"_blank\">Parent</a>" % (self.homeURL, parent)
except:
parent = "unknown"
msg = "%3d. (%s - %s) %s called from %s" % (self.broken_links_count + 1,
http_code,
ticket,
print_url,
parent
)
self.reporter(msg)
self.broken_links_count += 1
def timeReport(self):
from operator import itemgetter
import numpy
thresholdLink = {}
linktimes = []
for (url, rd_obj) in self.results.items():
duration = rd_obj.get_duration()
linktimes.append(duration)
if duration > self.threshold:
thresholdLink[url] = duration
self.reporter("Time Analysis - Links beyond threshold")
for (visited_url, duration) in sorted(thresholdLink.iteritems(),
key=itemgetter(1),
reverse=True):
self.reporter( "%s took %.3f seconds" % (visited_url, duration))
self.reporter("Time Analysis - summary")
total = len(linktimes)
average = numpy.mean(linktimes)
std = numpy.std(linktimes)
msg = "%s links visited with an average time of %.3f and standard deviation of %.3f" % (total, average, std)
self.reporter(msg)
def scatterplot(self):
"""
Method to draw a scatterplot of the average time to download links
against time. Add a regression line to show the trend over time.
"""
try:
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
self.FigureCanvas = FigureCanvas
from matplotlib.figure import Figure
self.Figure = Figure
import numpy
except ImportError:
return
try:
import xlrd
except:
import_error += "ERROR: the xlrd modules is needed to record timings\n"
rec_time_filename = self.config.record_timings_filename
try:
workbook = xlrd.open_workbook(filename=rec_time_filename,
formatting_info=True)
except:
return
import numpy
# Only include the mean in the regression values if there are at least 10 URL timings
summary = self.read_timings_sheet(workbook)
(date_summary, gv_mean, gv_std, m, b) = self.report_timings_summary(summary, mean_threshold=10)
if len(gv_mean) <= 2:
return
fig = Figure(figsize=(5, 2.5))
canvas = self.FigureCanvas(fig)
ax = fig.add_subplot(111)
linear = numpy.poly1d([m,b])
denom = numpy.max(gv_std)/50
size = gv_std/denom
ax.scatter(date_summary, gv_mean, marker="d", s=size)
ax.plot(date_summary, linear(date_summary), '--r')
chart = StringIO()
canvas.print_figure(chart)
image = chart.getvalue()
import base64
base64Img = base64.b64encode(image)
image = "<img src=\"data:image/png;base64,%s\">" % base64Img
self.reporter("Scatterplot of average link times per successful run")
self.reporter(image)
self.reporter("The trend line has a current slope of %s" % m)
self.reporter("The y-intercept is %s seconds" % b)
def depthReport(self):
"""
Method to draw a histogram of the number of new links
discovered at each depth.
(i.e. show how many links are required to reach a link)
"""
try:
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
self.FigureCanvas = FigureCanvas
from matplotlib.figure import Figure
self.Figure = Figure
from numpy import arange
except ImportError:
return
self.reporter("Analysis of link depth")
fig = Figure(figsize=(4, 2.5))
# Draw a histogram
width = 0.9
rect = [0.12, 0.08, 0.9, 0.85]
ax = fig.add_axes(rect)
left = arange(len(self.linkDepth))
plot = ax.bar(left, self.linkDepth, width=width)
# Add the x axis labels
ax.set_xticks(left+(width*0.5))
ax.set_xticklabels(left)
chart = StringIO()
canvas = self.FigureCanvas(fig)
canvas.print_figure(chart)
image = chart.getvalue()
import base64
base64Img = base64.b64encode(image)
image = "<img src=\"data:image/png;base64,%s\">" % base64Img
self.reporter(image)
class ReportData():
"""
Class to hold the data collected from the smoke test ready for reporting
Instances of this class will be held in the dictionary results which will
be keyed on the url. This way, in an attempt to minimise the memory used,
the url doesn't need to be stored in this class.
The class will have the following properties
broken: boolean
exception: boolean
http_code: integer
ticket: URL of any ticket linked with this url
parent: the parent URL of this url
depth: how deep is this url
duration: how long did it take to get the url
"""
def is_broken(self):
if hasattr(self, "broken"):
return self.broken
return False
def threw_exception(self):
if hasattr(self, "exception"):
return self.exception
return False
def return_http_code(self):
if hasattr(self, "http_code"):
return self.http_code
return "-"
def the_ticket(self, html):
"""
Should only have a ticket if it is broken,
but won't always have a ticket to display.
"""
if hasattr(self, "ticket"):
if html:
return "<a href=%s target=\"_blank\">Ticket</a>" % (self.ticket)
else:
return "Ticket: %s" % (self.ticket)
return "no ticket"
def get_parent(self):
if hasattr(self, "parent"):
return self.parent
return ""
def get_depth(self):
if hasattr(self, "depth"):
return self.depth
return 0
def get_duration(self):
if hasattr(self, "duration"):
return self.duration
return 0
|
mit
|
sanketloke/scikit-learn
|
sklearn/linear_model/sag.py
|
29
|
11291
|
"""Solvers for Ridge and LogisticRegression using SAG algorithm"""
# Authors: Tom Dupre la Tour <tom.dupre-la-tour@m4x.org>
#
# Licence: BSD 3 clause
import numpy as np
import warnings
from ..exceptions import ConvergenceWarning
from ..utils import check_array
from ..utils.extmath import row_norms
from .base import make_dataset
from .sag_fast import sag
def get_auto_step_size(max_squared_sum, alpha_scaled, loss, fit_intercept):
"""Compute automatic step size for SAG solver
The step size is set to 1 / (alpha_scaled + L + fit_intercept) where L is
the max sum of squares for over all samples.
Parameters
----------
max_squared_sum : float
Maximum squared sum of X over samples.
alpha_scaled : float
Constant that multiplies the regularization term, scaled by
1. / n_samples, the number of samples.
loss : string, in {"log", "squared"}
The loss function used in SAG solver.
fit_intercept : bool
Specifies if a constant (a.k.a. bias or intercept) will be
added to the decision function.
Returns
-------
step_size : float
Step size used in SAG solver.
References
----------
Schmidt, M., Roux, N. L., & Bach, F. (2013).
Minimizing finite sums with the stochastic average gradient
https://hal.inria.fr/hal-00860051/PDF/sag_journal.pdf
"""
if loss in ('log', 'multinomial'):
# inverse Lipschitz constant for log loss
return 4.0 / (max_squared_sum + int(fit_intercept)
+ 4.0 * alpha_scaled)
elif loss == 'squared':
# inverse Lipschitz constant for squared loss
return 1.0 / (max_squared_sum + int(fit_intercept) + alpha_scaled)
else:
raise ValueError("Unknown loss function for SAG solver, got %s "
"instead of 'log' or 'squared'" % loss)
def sag_solver(X, y, sample_weight=None, loss='log', alpha=1.,
max_iter=1000, tol=0.001, verbose=0, random_state=None,
check_input=True, max_squared_sum=None,
warm_start_mem=None):
"""SAG solver for Ridge and LogisticRegression
SAG stands for Stochastic Average Gradient: the gradient of the loss is
estimated each sample at a time and the model is updated along the way with
a constant learning rate.
IMPORTANT NOTE: 'sag' solver converges faster on columns that are on the
same scale. You can normalize the data by using
sklearn.preprocessing.StandardScaler on your data before passing it to the
fit method.
This implementation works with data represented as dense numpy arrays or
sparse scipy arrays of floating point values for the features. It will
fit the data according to squared loss or log loss.
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using the squared euclidean norm L2.
.. versionadded:: 0.17
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values. With loss='multinomial', y must be label encoded
(see preprocessing.LabelEncoder).
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
loss : 'log' | 'squared' | 'multinomial'
Loss function that will be optimized:
-'log' is the binary logistic loss, as used in LogisticRegression.
-'squared' is the squared loss, as used in Ridge.
-'multinomial' is the multinomial logistic loss, as used in
LogisticRegression.
.. versionadded:: 0.18
*loss='multinomial'*
alpha : float, optional
Constant that multiplies the regularization term. Defaults to 1.
max_iter: int, optional
The max number of passes over the training data if the stopping
criterea is not reached. Defaults to 1000.
tol: double, optional
The stopping criterea for the weights. The iterations will stop when
max(change in weights) / max(weights) < tol. Defaults to .001
verbose: integer, optional
The verbosity level.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
check_input : bool, default True
If False, the input arrays X and y will not be checked.
max_squared_sum : float, default None
Maximum squared sum of X over samples. If None, it will be computed,
going through all the samples. The value should be precomputed
to speed up cross validation.
warm_start_mem: dict, optional
The initialization parameters used for warm starting. Warm starting is
currently used in LogisticRegression but not in Ridge.
It contains:
- 'coef': the weight vector, with the intercept in last line
if the intercept is fitted.
- 'gradient_memory': the scalar gradient for all seen samples.
- 'sum_gradient': the sum of gradient over all seen samples,
for each feature.
- 'intercept_sum_gradient': the sum of gradient over all seen
samples, for the intercept.
- 'seen': array of boolean describing the seen samples.
- 'num_seen': the number of seen samples.
Returns
-------
coef_ : array, shape (n_features)
Weight vector.
n_iter_ : int
The number of full pass on all samples.
warm_start_mem : dict
Contains a 'coef' key with the fitted result, and possibly the
fitted intercept at the end of the array. Contains also other keys
used for warm starting.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> X = np.random.randn(n_samples, n_features)
>>> y = np.random.randn(n_samples)
>>> clf = linear_model.Ridge(solver='sag')
>>> clf.fit(X, y)
... #doctest: +NORMALIZE_WHITESPACE
Ridge(alpha=1.0, copy_X=True, fit_intercept=True, max_iter=None,
normalize=False, random_state=None, solver='sag', tol=0.001)
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> clf = linear_model.LogisticRegression(solver='sag')
>>> clf.fit(X, y)
... #doctest: +NORMALIZE_WHITESPACE
LogisticRegression(C=1.0, class_weight=None, dual=False,
fit_intercept=True, intercept_scaling=1, max_iter=100,
multi_class='ovr', n_jobs=1, penalty='l2', random_state=None,
solver='sag', tol=0.0001, verbose=0, warm_start=False)
References
----------
Schmidt, M., Roux, N. L., & Bach, F. (2013).
Minimizing finite sums with the stochastic average gradient
https://hal.inria.fr/hal-00860051/PDF/sag_journal.pdf
See also
--------
Ridge, SGDRegressor, ElasticNet, Lasso, SVR, and
LogisticRegression, SGDClassifier, LinearSVC, Perceptron
"""
if warm_start_mem is None:
warm_start_mem = {}
# Ridge default max_iter is None
if max_iter is None:
max_iter = 1000
if check_input:
X = check_array(X, dtype=np.float64, accept_sparse='csr', order='C')
y = check_array(y, dtype=np.float64, ensure_2d=False, order='C')
n_samples, n_features = X.shape[0], X.shape[1]
# As in SGD, the alpha is scaled by n_samples.
alpha_scaled = float(alpha) / n_samples
# if loss == 'multinomial', y should be label encoded.
n_classes = int(y.max()) + 1 if loss == 'multinomial' else 1
# initialization
if sample_weight is None:
sample_weight = np.ones(n_samples, dtype=np.float64, order='C')
if 'coef' in warm_start_mem.keys():
coef_init = warm_start_mem['coef']
else:
# assume fit_intercept is False
coef_init = np.zeros((n_features, n_classes), dtype=np.float64,
order='C')
# coef_init contains possibly the intercept_init at the end.
# Note that Ridge centers the data before fitting, so fit_intercept=False.
fit_intercept = coef_init.shape[0] == (n_features + 1)
if fit_intercept:
intercept_init = coef_init[-1, :]
coef_init = coef_init[:-1, :]
else:
intercept_init = np.zeros(n_classes, dtype=np.float64)
if 'intercept_sum_gradient' in warm_start_mem.keys():
intercept_sum_gradient = warm_start_mem['intercept_sum_gradient']
else:
intercept_sum_gradient = np.zeros(n_classes, dtype=np.float64)
if 'gradient_memory' in warm_start_mem.keys():
gradient_memory_init = warm_start_mem['gradient_memory']
else:
gradient_memory_init = np.zeros((n_samples, n_classes),
dtype=np.float64, order='C')
if 'sum_gradient' in warm_start_mem.keys():
sum_gradient_init = warm_start_mem['sum_gradient']
else:
sum_gradient_init = np.zeros((n_features, n_classes),
dtype=np.float64, order='C')
if 'seen' in warm_start_mem.keys():
seen_init = warm_start_mem['seen']
else:
seen_init = np.zeros(n_samples, dtype=np.int32, order='C')
if 'num_seen' in warm_start_mem.keys():
num_seen_init = warm_start_mem['num_seen']
else:
num_seen_init = 0
dataset, intercept_decay = make_dataset(X, y, sample_weight, random_state)
if max_squared_sum is None:
max_squared_sum = row_norms(X, squared=True).max()
step_size = get_auto_step_size(max_squared_sum, alpha_scaled, loss,
fit_intercept)
if step_size * alpha_scaled == 1:
raise ZeroDivisionError("Current sag implementation does not handle "
"the case step_size * alpha_scaled == 1")
num_seen, n_iter_ = sag(dataset, coef_init,
intercept_init, n_samples,
n_features, n_classes, tol,
max_iter,
loss,
step_size, alpha_scaled,
sum_gradient_init,
gradient_memory_init,
seen_init,
num_seen_init,
fit_intercept,
intercept_sum_gradient,
intercept_decay,
verbose)
if n_iter_ == max_iter:
warnings.warn("The max_iter was reached which means "
"the coef_ did not converge", ConvergenceWarning)
if fit_intercept:
coef_init = np.vstack((coef_init, intercept_init))
warm_start_mem = {'coef': coef_init, 'sum_gradient': sum_gradient_init,
'intercept_sum_gradient': intercept_sum_gradient,
'gradient_memory': gradient_memory_init,
'seen': seen_init, 'num_seen': num_seen}
if loss == 'multinomial':
coef_ = coef_init.T
else:
coef_ = coef_init[:, 0]
return coef_, n_iter_, warm_start_mem
|
bsd-3-clause
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.